devres.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/device.h>
  3. #include <linux/pci.h>
  4. #include "pci.h"
  5. /*
  6. * On the state of PCI's devres implementation:
  7. *
  8. * The older devres API for PCI has two significant problems:
  9. *
  10. * 1. It is very strongly tied to the statically allocated mapping table in
  11. * struct pcim_iomap_devres below. This is mostly solved in the sense of the
  12. * pcim_ functions in this file providing things like ranged mapping by
  13. * bypassing this table, whereas the functions that were present in the old
  14. * API still enter the mapping addresses into the table for users of the old
  15. * API.
  16. *
  17. * 2. The region-request-functions in pci.c do become managed IF the device has
  18. * been enabled with pcim_enable_device() instead of pci_enable_device().
  19. * This resulted in the API becoming inconsistent: Some functions have an
  20. * obviously managed counter-part (e.g., pci_iomap() <-> pcim_iomap()),
  21. * whereas some don't and are never managed, while others don't and are
  22. * _sometimes_ managed (e.g. pci_request_region()).
  23. *
  24. * Consequently, in the new API, region requests performed by the pcim_
  25. * functions are automatically cleaned up through the devres callback
  26. * pcim_addr_resource_release().
  27. *
  28. * Users of pcim_enable_device() + pci_*region*() are redirected in
  29. * pci.c to the managed functions here in this file. This isn't exactly
  30. * perfect, but the only alternative way would be to port ALL drivers
  31. * using said combination to pcim_ functions.
  32. *
  33. * TODO:
  34. * Remove the legacy table entirely once all calls to pcim_iomap_table() in
  35. * the kernel have been removed.
  36. */
  37. /*
  38. * Legacy struct storing addresses to whole mapped BARs.
  39. */
  40. struct pcim_iomap_devres {
  41. void __iomem *table[PCI_NUM_RESOURCES];
  42. };
  43. /* Used to restore the old INTx state on driver detach. */
  44. struct pcim_intx_devres {
  45. int orig_intx;
  46. };
  47. enum pcim_addr_devres_type {
  48. /* Default initializer. */
  49. PCIM_ADDR_DEVRES_TYPE_INVALID,
  50. /* A requested region spanning an entire BAR. */
  51. PCIM_ADDR_DEVRES_TYPE_REGION,
  52. /*
  53. * A requested region spanning an entire BAR, and a mapping for
  54. * the entire BAR.
  55. */
  56. PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING,
  57. /*
  58. * A mapping within a BAR, either spanning the whole BAR or just a
  59. * range. Without a requested region.
  60. */
  61. PCIM_ADDR_DEVRES_TYPE_MAPPING,
  62. };
  63. /*
  64. * This struct envelops IO or MEM addresses, i.e., mappings and region
  65. * requests, because those are very frequently requested and released
  66. * together.
  67. */
  68. struct pcim_addr_devres {
  69. enum pcim_addr_devres_type type;
  70. void __iomem *baseaddr;
  71. unsigned long offset;
  72. unsigned long len;
  73. int bar;
  74. };
  75. static inline void pcim_addr_devres_clear(struct pcim_addr_devres *res)
  76. {
  77. memset(res, 0, sizeof(*res));
  78. res->bar = -1;
  79. }
  80. /*
  81. * The following functions, __pcim_*_region*, exist as counterparts to the
  82. * versions from pci.c - which, unfortunately, can be in "hybrid mode", i.e.,
  83. * sometimes managed, sometimes not.
  84. *
  85. * To separate the APIs cleanly, we define our own, simplified versions here.
  86. */
  87. /**
  88. * __pcim_request_region_range - Request a ranged region
  89. * @pdev: PCI device the region belongs to
  90. * @bar: BAR the range is within
  91. * @offset: offset from the BAR's start address
  92. * @maxlen: length in bytes, beginning at @offset
  93. * @name: name associated with the request
  94. * @req_flags: flags for the request, e.g., for kernel-exclusive requests
  95. *
  96. * Returns: 0 on success, a negative error code on failure.
  97. *
  98. * Request a range within a device's PCI BAR. Sanity check the input.
  99. */
  100. static int __pcim_request_region_range(struct pci_dev *pdev, int bar,
  101. unsigned long offset,
  102. unsigned long maxlen,
  103. const char *name, int req_flags)
  104. {
  105. resource_size_t start = pci_resource_start(pdev, bar);
  106. resource_size_t len = pci_resource_len(pdev, bar);
  107. unsigned long dev_flags = pci_resource_flags(pdev, bar);
  108. if (start == 0 || len == 0) /* Unused BAR. */
  109. return 0;
  110. if (len <= offset)
  111. return -EINVAL;
  112. start += offset;
  113. len -= offset;
  114. if (len > maxlen && maxlen != 0)
  115. len = maxlen;
  116. if (dev_flags & IORESOURCE_IO) {
  117. if (!request_region(start, len, name))
  118. return -EBUSY;
  119. } else if (dev_flags & IORESOURCE_MEM) {
  120. if (!__request_mem_region(start, len, name, req_flags))
  121. return -EBUSY;
  122. } else {
  123. /* That's not a device we can request anything on. */
  124. return -ENODEV;
  125. }
  126. return 0;
  127. }
  128. static void __pcim_release_region_range(struct pci_dev *pdev, int bar,
  129. unsigned long offset,
  130. unsigned long maxlen)
  131. {
  132. resource_size_t start = pci_resource_start(pdev, bar);
  133. resource_size_t len = pci_resource_len(pdev, bar);
  134. unsigned long flags = pci_resource_flags(pdev, bar);
  135. if (len <= offset || start == 0)
  136. return;
  137. if (len == 0 || maxlen == 0) /* This an unused BAR. Do nothing. */
  138. return;
  139. start += offset;
  140. len -= offset;
  141. if (len > maxlen)
  142. len = maxlen;
  143. if (flags & IORESOURCE_IO)
  144. release_region(start, len);
  145. else if (flags & IORESOURCE_MEM)
  146. release_mem_region(start, len);
  147. }
  148. static int __pcim_request_region(struct pci_dev *pdev, int bar,
  149. const char *name, int flags)
  150. {
  151. unsigned long offset = 0;
  152. unsigned long len = pci_resource_len(pdev, bar);
  153. return __pcim_request_region_range(pdev, bar, offset, len, name, flags);
  154. }
  155. static void __pcim_release_region(struct pci_dev *pdev, int bar)
  156. {
  157. unsigned long offset = 0;
  158. unsigned long len = pci_resource_len(pdev, bar);
  159. __pcim_release_region_range(pdev, bar, offset, len);
  160. }
  161. static void pcim_addr_resource_release(struct device *dev, void *resource_raw)
  162. {
  163. struct pci_dev *pdev = to_pci_dev(dev);
  164. struct pcim_addr_devres *res = resource_raw;
  165. switch (res->type) {
  166. case PCIM_ADDR_DEVRES_TYPE_REGION:
  167. __pcim_release_region(pdev, res->bar);
  168. break;
  169. case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
  170. pci_iounmap(pdev, res->baseaddr);
  171. __pcim_release_region(pdev, res->bar);
  172. break;
  173. case PCIM_ADDR_DEVRES_TYPE_MAPPING:
  174. pci_iounmap(pdev, res->baseaddr);
  175. break;
  176. default:
  177. break;
  178. }
  179. }
  180. static struct pcim_addr_devres *pcim_addr_devres_alloc(struct pci_dev *pdev)
  181. {
  182. struct pcim_addr_devres *res;
  183. res = devres_alloc_node(pcim_addr_resource_release, sizeof(*res),
  184. GFP_KERNEL, dev_to_node(&pdev->dev));
  185. if (res)
  186. pcim_addr_devres_clear(res);
  187. return res;
  188. }
  189. /* Just for consistency and readability. */
  190. static inline void pcim_addr_devres_free(struct pcim_addr_devres *res)
  191. {
  192. devres_free(res);
  193. }
  194. /*
  195. * Used by devres to identify a pcim_addr_devres.
  196. */
  197. static int pcim_addr_resources_match(struct device *dev,
  198. void *a_raw, void *b_raw)
  199. {
  200. struct pcim_addr_devres *a, *b;
  201. a = a_raw;
  202. b = b_raw;
  203. if (a->type != b->type)
  204. return 0;
  205. switch (a->type) {
  206. case PCIM_ADDR_DEVRES_TYPE_REGION:
  207. case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
  208. return a->bar == b->bar;
  209. case PCIM_ADDR_DEVRES_TYPE_MAPPING:
  210. return a->baseaddr == b->baseaddr;
  211. default:
  212. return 0;
  213. }
  214. }
  215. static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
  216. {
  217. struct resource **res = ptr;
  218. pci_unmap_iospace(*res);
  219. }
  220. /**
  221. * devm_pci_remap_iospace - Managed pci_remap_iospace()
  222. * @dev: Generic device to remap IO address for
  223. * @res: Resource describing the I/O space
  224. * @phys_addr: physical address of range to be mapped
  225. *
  226. * Managed pci_remap_iospace(). Map is automatically unmapped on driver
  227. * detach.
  228. */
  229. int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
  230. phys_addr_t phys_addr)
  231. {
  232. const struct resource **ptr;
  233. int error;
  234. ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
  235. if (!ptr)
  236. return -ENOMEM;
  237. error = pci_remap_iospace(res, phys_addr);
  238. if (error) {
  239. devres_free(ptr);
  240. } else {
  241. *ptr = res;
  242. devres_add(dev, ptr);
  243. }
  244. return error;
  245. }
  246. EXPORT_SYMBOL(devm_pci_remap_iospace);
  247. /**
  248. * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
  249. * @dev: Generic device to remap IO address for
  250. * @offset: Resource address to map
  251. * @size: Size of map
  252. *
  253. * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
  254. * detach.
  255. */
  256. void __iomem *devm_pci_remap_cfgspace(struct device *dev,
  257. resource_size_t offset,
  258. resource_size_t size)
  259. {
  260. void __iomem **ptr, *addr;
  261. ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
  262. if (!ptr)
  263. return NULL;
  264. addr = pci_remap_cfgspace(offset, size);
  265. if (addr) {
  266. *ptr = addr;
  267. devres_add(dev, ptr);
  268. } else
  269. devres_free(ptr);
  270. return addr;
  271. }
  272. EXPORT_SYMBOL(devm_pci_remap_cfgspace);
  273. /**
  274. * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
  275. * @dev: generic device to handle the resource for
  276. * @res: configuration space resource to be handled
  277. *
  278. * Checks that a resource is a valid memory region, requests the memory
  279. * region and ioremaps with pci_remap_cfgspace() API that ensures the
  280. * proper PCI configuration space memory attributes are guaranteed.
  281. *
  282. * All operations are managed and will be undone on driver detach.
  283. *
  284. * Returns a pointer to the remapped memory or an IOMEM_ERR_PTR() encoded error
  285. * code on failure. Usage example::
  286. *
  287. * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  288. * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
  289. * if (IS_ERR(base))
  290. * return PTR_ERR(base);
  291. */
  292. void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
  293. struct resource *res)
  294. {
  295. resource_size_t size;
  296. const char *name;
  297. void __iomem *dest_ptr;
  298. BUG_ON(!dev);
  299. if (!res || resource_type(res) != IORESOURCE_MEM) {
  300. dev_err(dev, "invalid resource\n");
  301. return IOMEM_ERR_PTR(-EINVAL);
  302. }
  303. size = resource_size(res);
  304. if (res->name)
  305. name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
  306. res->name);
  307. else
  308. name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
  309. if (!name)
  310. return IOMEM_ERR_PTR(-ENOMEM);
  311. if (!devm_request_mem_region(dev, res->start, size, name)) {
  312. dev_err(dev, "can't request region for resource %pR\n", res);
  313. return IOMEM_ERR_PTR(-EBUSY);
  314. }
  315. dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
  316. if (!dest_ptr) {
  317. dev_err(dev, "ioremap failed for resource %pR\n", res);
  318. devm_release_mem_region(dev, res->start, size);
  319. dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
  320. }
  321. return dest_ptr;
  322. }
  323. EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
  324. static void __pcim_clear_mwi(void *pdev_raw)
  325. {
  326. struct pci_dev *pdev = pdev_raw;
  327. pci_clear_mwi(pdev);
  328. }
  329. /**
  330. * pcim_set_mwi - a device-managed pci_set_mwi()
  331. * @pdev: the PCI device for which MWI is enabled
  332. *
  333. * Managed pci_set_mwi().
  334. *
  335. * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  336. */
  337. int pcim_set_mwi(struct pci_dev *pdev)
  338. {
  339. int ret;
  340. ret = devm_add_action(&pdev->dev, __pcim_clear_mwi, pdev);
  341. if (ret != 0)
  342. return ret;
  343. ret = pci_set_mwi(pdev);
  344. if (ret != 0)
  345. devm_remove_action(&pdev->dev, __pcim_clear_mwi, pdev);
  346. return ret;
  347. }
  348. EXPORT_SYMBOL(pcim_set_mwi);
  349. static inline bool mask_contains_bar(int mask, int bar)
  350. {
  351. return mask & BIT(bar);
  352. }
  353. static void pcim_intx_restore(struct device *dev, void *data)
  354. {
  355. struct pci_dev *pdev = to_pci_dev(dev);
  356. struct pcim_intx_devres *res = data;
  357. pci_intx(pdev, res->orig_intx);
  358. }
  359. static void save_orig_intx(struct pci_dev *pdev, struct pcim_intx_devres *res)
  360. {
  361. u16 pci_command;
  362. pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
  363. res->orig_intx = !(pci_command & PCI_COMMAND_INTX_DISABLE);
  364. }
  365. /**
  366. * pcim_intx - managed pci_intx()
  367. * @pdev: the PCI device to operate on
  368. * @enable: boolean: whether to enable or disable PCI INTx
  369. *
  370. * Returns: 0 on success, -ENOMEM on error.
  371. *
  372. * Enable/disable PCI INTx for device @pdev.
  373. * Restore the original state on driver detach.
  374. */
  375. int pcim_intx(struct pci_dev *pdev, int enable)
  376. {
  377. struct pcim_intx_devres *res;
  378. struct device *dev = &pdev->dev;
  379. /*
  380. * pcim_intx() must only restore the INTx value that existed before the
  381. * driver was loaded, i.e., before it called pcim_intx() for the
  382. * first time.
  383. */
  384. res = devres_find(dev, pcim_intx_restore, NULL, NULL);
  385. if (!res) {
  386. res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
  387. if (!res)
  388. return -ENOMEM;
  389. save_orig_intx(pdev, res);
  390. devres_add(dev, res);
  391. }
  392. pci_intx(pdev, enable);
  393. return 0;
  394. }
  395. EXPORT_SYMBOL_GPL(pcim_intx);
  396. static void pcim_disable_device(void *pdev_raw)
  397. {
  398. struct pci_dev *pdev = pdev_raw;
  399. if (!pdev->pinned)
  400. pci_disable_device(pdev);
  401. pdev->is_managed = false;
  402. }
  403. /**
  404. * pcim_enable_device - Managed pci_enable_device()
  405. * @pdev: PCI device to be initialized
  406. *
  407. * Returns: 0 on success, negative error code on failure.
  408. *
  409. * Managed pci_enable_device(). Device will automatically be disabled on
  410. * driver detach.
  411. */
  412. int pcim_enable_device(struct pci_dev *pdev)
  413. {
  414. int ret;
  415. ret = devm_add_action(&pdev->dev, pcim_disable_device, pdev);
  416. if (ret != 0)
  417. return ret;
  418. /*
  419. * We prefer removing the action in case of an error over
  420. * devm_add_action_or_reset() because the latter could theoretically be
  421. * disturbed by users having pinned the device too soon.
  422. */
  423. ret = pci_enable_device(pdev);
  424. if (ret != 0) {
  425. devm_remove_action(&pdev->dev, pcim_disable_device, pdev);
  426. return ret;
  427. }
  428. pdev->is_managed = true;
  429. return ret;
  430. }
  431. EXPORT_SYMBOL(pcim_enable_device);
  432. /**
  433. * pcim_pin_device - Pin managed PCI device
  434. * @pdev: PCI device to pin
  435. *
  436. * Pin managed PCI device @pdev. Pinned device won't be disabled on driver
  437. * detach. @pdev must have been enabled with pcim_enable_device().
  438. */
  439. void pcim_pin_device(struct pci_dev *pdev)
  440. {
  441. pdev->pinned = true;
  442. }
  443. EXPORT_SYMBOL(pcim_pin_device);
  444. static void pcim_iomap_release(struct device *gendev, void *res)
  445. {
  446. /*
  447. * Do nothing. This is legacy code.
  448. *
  449. * Cleanup of the mappings is now done directly through the callbacks
  450. * registered when creating them.
  451. */
  452. }
  453. /**
  454. * pcim_iomap_table - access iomap allocation table (DEPRECATED)
  455. * @pdev: PCI device to access iomap table for
  456. *
  457. * Returns:
  458. * Const pointer to array of __iomem pointers on success, NULL on failure.
  459. *
  460. * Access iomap allocation table for @dev. If iomap table doesn't
  461. * exist and @pdev is managed, it will be allocated. All iomaps
  462. * recorded in the iomap table are automatically unmapped on driver
  463. * detach.
  464. *
  465. * This function might sleep when the table is first allocated but can
  466. * be safely called without context and guaranteed to succeed once
  467. * allocated.
  468. *
  469. * This function is DEPRECATED. Do not use it in new code. Instead, obtain a
  470. * mapping's address directly from one of the pcim_* mapping functions. For
  471. * example:
  472. * void __iomem \*mappy = pcim_iomap(pdev, bar, length);
  473. */
  474. void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
  475. {
  476. struct pcim_iomap_devres *dr, *new_dr;
  477. dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
  478. if (dr)
  479. return dr->table;
  480. new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
  481. dev_to_node(&pdev->dev));
  482. if (!new_dr)
  483. return NULL;
  484. dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
  485. return dr->table;
  486. }
  487. EXPORT_SYMBOL(pcim_iomap_table);
  488. /*
  489. * Fill the legacy mapping-table, so that drivers using the old API can
  490. * still get a BAR's mapping address through pcim_iomap_table().
  491. */
  492. static int pcim_add_mapping_to_legacy_table(struct pci_dev *pdev,
  493. void __iomem *mapping, int bar)
  494. {
  495. void __iomem **legacy_iomap_table;
  496. if (!pci_bar_index_is_valid(bar))
  497. return -EINVAL;
  498. legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
  499. if (!legacy_iomap_table)
  500. return -ENOMEM;
  501. /* The legacy mechanism doesn't allow for duplicate mappings. */
  502. WARN_ON(legacy_iomap_table[bar]);
  503. legacy_iomap_table[bar] = mapping;
  504. return 0;
  505. }
  506. /*
  507. * Remove a mapping. The table only contains whole-BAR mappings, so this will
  508. * never interfere with ranged mappings.
  509. */
  510. static void pcim_remove_mapping_from_legacy_table(struct pci_dev *pdev,
  511. void __iomem *addr)
  512. {
  513. int bar;
  514. void __iomem **legacy_iomap_table;
  515. legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
  516. if (!legacy_iomap_table)
  517. return;
  518. for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
  519. if (legacy_iomap_table[bar] == addr) {
  520. legacy_iomap_table[bar] = NULL;
  521. return;
  522. }
  523. }
  524. }
  525. /*
  526. * The same as pcim_remove_mapping_from_legacy_table(), but identifies the
  527. * mapping by its BAR index.
  528. */
  529. static void pcim_remove_bar_from_legacy_table(struct pci_dev *pdev, int bar)
  530. {
  531. void __iomem **legacy_iomap_table;
  532. if (!pci_bar_index_is_valid(bar))
  533. return;
  534. legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
  535. if (!legacy_iomap_table)
  536. return;
  537. legacy_iomap_table[bar] = NULL;
  538. }
  539. /**
  540. * pcim_iomap - Managed pcim_iomap()
  541. * @pdev: PCI device to iomap for
  542. * @bar: BAR to iomap
  543. * @maxlen: Maximum length of iomap
  544. *
  545. * Returns: __iomem pointer on success, NULL on failure.
  546. *
  547. * Managed pci_iomap(). Map is automatically unmapped on driver detach. If
  548. * desired, unmap manually only with pcim_iounmap().
  549. *
  550. * This SHOULD only be used once per BAR.
  551. *
  552. * NOTE:
  553. * Contrary to the other pcim_* functions, this function does not return an
  554. * IOMEM_ERR_PTR() on failure, but a simple NULL. This is done for backwards
  555. * compatibility.
  556. */
  557. void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
  558. {
  559. void __iomem *mapping;
  560. struct pcim_addr_devres *res;
  561. if (!pci_bar_index_is_valid(bar))
  562. return NULL;
  563. res = pcim_addr_devres_alloc(pdev);
  564. if (!res)
  565. return NULL;
  566. res->type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
  567. mapping = pci_iomap(pdev, bar, maxlen);
  568. if (!mapping)
  569. goto err_iomap;
  570. res->baseaddr = mapping;
  571. if (pcim_add_mapping_to_legacy_table(pdev, mapping, bar) != 0)
  572. goto err_table;
  573. devres_add(&pdev->dev, res);
  574. return mapping;
  575. err_table:
  576. pci_iounmap(pdev, mapping);
  577. err_iomap:
  578. pcim_addr_devres_free(res);
  579. return NULL;
  580. }
  581. EXPORT_SYMBOL(pcim_iomap);
  582. /**
  583. * pcim_iounmap - Managed pci_iounmap()
  584. * @pdev: PCI device to iounmap for
  585. * @addr: Address to unmap
  586. *
  587. * Managed pci_iounmap(). @addr must have been mapped using a pcim_* mapping
  588. * function.
  589. */
  590. void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
  591. {
  592. struct pcim_addr_devres res_searched;
  593. pcim_addr_devres_clear(&res_searched);
  594. res_searched.type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
  595. res_searched.baseaddr = addr;
  596. if (devres_release(&pdev->dev, pcim_addr_resource_release,
  597. pcim_addr_resources_match, &res_searched) != 0) {
  598. /* Doesn't exist. User passed nonsense. */
  599. return;
  600. }
  601. pcim_remove_mapping_from_legacy_table(pdev, addr);
  602. }
  603. EXPORT_SYMBOL(pcim_iounmap);
  604. /**
  605. * pcim_iomap_region - Request and iomap a PCI BAR
  606. * @pdev: PCI device to map IO resources for
  607. * @bar: Index of a BAR to map
  608. * @name: Name associated with the request
  609. *
  610. * Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
  611. *
  612. * Mapping and region will get automatically released on driver detach. If
  613. * desired, release manually only with pcim_iounmap_region().
  614. */
  615. void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
  616. const char *name)
  617. {
  618. int ret;
  619. struct pcim_addr_devres *res;
  620. if (!pci_bar_index_is_valid(bar))
  621. return IOMEM_ERR_PTR(-EINVAL);
  622. res = pcim_addr_devres_alloc(pdev);
  623. if (!res)
  624. return IOMEM_ERR_PTR(-ENOMEM);
  625. res->type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
  626. res->bar = bar;
  627. ret = __pcim_request_region(pdev, bar, name, 0);
  628. if (ret != 0)
  629. goto err_region;
  630. res->baseaddr = pci_iomap(pdev, bar, 0);
  631. if (!res->baseaddr) {
  632. ret = -EINVAL;
  633. goto err_iomap;
  634. }
  635. devres_add(&pdev->dev, res);
  636. return res->baseaddr;
  637. err_iomap:
  638. __pcim_release_region(pdev, bar);
  639. err_region:
  640. pcim_addr_devres_free(res);
  641. return IOMEM_ERR_PTR(ret);
  642. }
  643. EXPORT_SYMBOL(pcim_iomap_region);
  644. /**
  645. * pcim_iounmap_region - Unmap and release a PCI BAR
  646. * @pdev: PCI device to operate on
  647. * @bar: Index of BAR to unmap and release
  648. *
  649. * Unmap a BAR and release its region manually. Only pass BARs that were
  650. * previously mapped by pcim_iomap_region().
  651. */
  652. static void pcim_iounmap_region(struct pci_dev *pdev, int bar)
  653. {
  654. struct pcim_addr_devres res_searched;
  655. pcim_addr_devres_clear(&res_searched);
  656. res_searched.type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
  657. res_searched.bar = bar;
  658. devres_release(&pdev->dev, pcim_addr_resource_release,
  659. pcim_addr_resources_match, &res_searched);
  660. }
  661. /**
  662. * pcim_iomap_regions - Request and iomap PCI BARs (DEPRECATED)
  663. * @pdev: PCI device to map IO resources for
  664. * @mask: Mask of BARs to request and iomap
  665. * @name: Name associated with the requests
  666. *
  667. * Returns: 0 on success, negative error code on failure.
  668. *
  669. * Request and iomap regions specified by @mask.
  670. *
  671. * This function is DEPRECATED. Do not use it in new code.
  672. * Use pcim_iomap_region() instead.
  673. */
  674. int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
  675. {
  676. int ret;
  677. int bar;
  678. void __iomem *mapping;
  679. for (bar = 0; bar < DEVICE_COUNT_RESOURCE; bar++) {
  680. if (!mask_contains_bar(mask, bar))
  681. continue;
  682. mapping = pcim_iomap_region(pdev, bar, name);
  683. if (IS_ERR(mapping)) {
  684. ret = PTR_ERR(mapping);
  685. goto err;
  686. }
  687. ret = pcim_add_mapping_to_legacy_table(pdev, mapping, bar);
  688. if (ret != 0)
  689. goto err;
  690. }
  691. return 0;
  692. err:
  693. while (--bar >= 0) {
  694. pcim_iounmap_region(pdev, bar);
  695. pcim_remove_bar_from_legacy_table(pdev, bar);
  696. }
  697. return ret;
  698. }
  699. EXPORT_SYMBOL(pcim_iomap_regions);
  700. static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
  701. int request_flags)
  702. {
  703. int ret;
  704. struct pcim_addr_devres *res;
  705. if (!pci_bar_index_is_valid(bar))
  706. return -EINVAL;
  707. res = pcim_addr_devres_alloc(pdev);
  708. if (!res)
  709. return -ENOMEM;
  710. res->type = PCIM_ADDR_DEVRES_TYPE_REGION;
  711. res->bar = bar;
  712. ret = __pcim_request_region(pdev, bar, name, request_flags);
  713. if (ret != 0) {
  714. pcim_addr_devres_free(res);
  715. return ret;
  716. }
  717. devres_add(&pdev->dev, res);
  718. return 0;
  719. }
  720. /**
  721. * pcim_request_region - Request a PCI BAR
  722. * @pdev: PCI device to requestion region for
  723. * @bar: Index of BAR to request
  724. * @name: Name associated with the request
  725. *
  726. * Returns: 0 on success, a negative error code on failure.
  727. *
  728. * Request region specified by @bar.
  729. *
  730. * The region will automatically be released on driver detach. If desired,
  731. * release manually only with pcim_release_region().
  732. */
  733. int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
  734. {
  735. return _pcim_request_region(pdev, bar, name, 0);
  736. }
  737. EXPORT_SYMBOL(pcim_request_region);
  738. /**
  739. * pcim_request_region_exclusive - Request a PCI BAR exclusively
  740. * @pdev: PCI device to requestion region for
  741. * @bar: Index of BAR to request
  742. * @name: Name associated with the request
  743. *
  744. * Returns: 0 on success, a negative error code on failure.
  745. *
  746. * Request region specified by @bar exclusively.
  747. *
  748. * The region will automatically be released on driver detach. If desired,
  749. * release manually only with pcim_release_region().
  750. */
  751. int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *name)
  752. {
  753. return _pcim_request_region(pdev, bar, name, IORESOURCE_EXCLUSIVE);
  754. }
  755. /**
  756. * pcim_release_region - Release a PCI BAR
  757. * @pdev: PCI device to operate on
  758. * @bar: Index of BAR to release
  759. *
  760. * Release a region manually that was previously requested by
  761. * pcim_request_region().
  762. */
  763. void pcim_release_region(struct pci_dev *pdev, int bar)
  764. {
  765. struct pcim_addr_devres res_searched;
  766. pcim_addr_devres_clear(&res_searched);
  767. res_searched.type = PCIM_ADDR_DEVRES_TYPE_REGION;
  768. res_searched.bar = bar;
  769. devres_release(&pdev->dev, pcim_addr_resource_release,
  770. pcim_addr_resources_match, &res_searched);
  771. }
  772. /**
  773. * pcim_release_all_regions - Release all regions of a PCI-device
  774. * @pdev: the PCI device
  775. *
  776. * Release all regions previously requested through pcim_request_region()
  777. * or pcim_request_all_regions().
  778. *
  779. * Can be called from any context, i.e., not necessarily as a counterpart to
  780. * pcim_request_all_regions().
  781. */
  782. static void pcim_release_all_regions(struct pci_dev *pdev)
  783. {
  784. int bar;
  785. for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
  786. pcim_release_region(pdev, bar);
  787. }
  788. /**
  789. * pcim_request_all_regions - Request all regions
  790. * @pdev: PCI device to map IO resources for
  791. * @name: name associated with the request
  792. *
  793. * Returns: 0 on success, negative error code on failure.
  794. *
  795. * Requested regions will automatically be released at driver detach. If
  796. * desired, release individual regions with pcim_release_region() or all of
  797. * them at once with pcim_release_all_regions().
  798. */
  799. int pcim_request_all_regions(struct pci_dev *pdev, const char *name)
  800. {
  801. int ret;
  802. int bar;
  803. for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
  804. ret = pcim_request_region(pdev, bar, name);
  805. if (ret != 0)
  806. goto err;
  807. }
  808. return 0;
  809. err:
  810. pcim_release_all_regions(pdev);
  811. return ret;
  812. }
  813. EXPORT_SYMBOL(pcim_request_all_regions);
  814. /**
  815. * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
  816. * (DEPRECATED)
  817. * @pdev: PCI device to map IO resources for
  818. * @mask: Mask of BARs to iomap
  819. * @name: Name associated with the requests
  820. *
  821. * Returns: 0 on success, negative error code on failure.
  822. *
  823. * Request all PCI BARs and iomap regions specified by @mask.
  824. *
  825. * To release these resources manually, call pcim_release_region() for the
  826. * regions and pcim_iounmap() for the mappings.
  827. *
  828. * This function is DEPRECATED. Don't use it in new code. Instead, use one
  829. * of the pcim_* region request functions in combination with a pcim_*
  830. * mapping function.
  831. */
  832. int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
  833. const char *name)
  834. {
  835. int bar;
  836. int ret;
  837. void __iomem **legacy_iomap_table;
  838. ret = pcim_request_all_regions(pdev, name);
  839. if (ret != 0)
  840. return ret;
  841. for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
  842. if (!mask_contains_bar(mask, bar))
  843. continue;
  844. if (!pcim_iomap(pdev, bar, 0))
  845. goto err;
  846. }
  847. return 0;
  848. err:
  849. /*
  850. * If bar is larger than 0, then pcim_iomap() above has most likely
  851. * failed because of -EINVAL. If it is equal 0, most likely the table
  852. * couldn't be created, indicating -ENOMEM.
  853. */
  854. ret = bar > 0 ? -EINVAL : -ENOMEM;
  855. legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
  856. while (--bar >= 0)
  857. pcim_iounmap(pdev, legacy_iomap_table[bar]);
  858. pcim_release_all_regions(pdev);
  859. return ret;
  860. }
  861. EXPORT_SYMBOL(pcim_iomap_regions_request_all);
  862. /**
  863. * pcim_iounmap_regions - Unmap and release PCI BARs
  864. * @pdev: PCI device to map IO resources for
  865. * @mask: Mask of BARs to unmap and release
  866. *
  867. * Unmap and release regions specified by @mask.
  868. */
  869. void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
  870. {
  871. int i;
  872. for (i = 0; i < PCI_STD_NUM_BARS; i++) {
  873. if (!mask_contains_bar(mask, i))
  874. continue;
  875. pcim_iounmap_region(pdev, i);
  876. pcim_remove_bar_from_legacy_table(pdev, i);
  877. }
  878. }
  879. EXPORT_SYMBOL(pcim_iounmap_regions);
  880. /**
  881. * pcim_iomap_range - Create a ranged __iomap mapping within a PCI BAR
  882. * @pdev: PCI device to map IO resources for
  883. * @bar: Index of the BAR
  884. * @offset: Offset from the begin of the BAR
  885. * @len: Length in bytes for the mapping
  886. *
  887. * Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
  888. *
  889. * Creates a new IO-Mapping within the specified @bar, ranging from @offset to
  890. * @offset + @len.
  891. *
  892. * The mapping will automatically get unmapped on driver detach. If desired,
  893. * release manually only with pcim_iounmap().
  894. */
  895. void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
  896. unsigned long offset, unsigned long len)
  897. {
  898. void __iomem *mapping;
  899. struct pcim_addr_devres *res;
  900. if (!pci_bar_index_is_valid(bar))
  901. return IOMEM_ERR_PTR(-EINVAL);
  902. res = pcim_addr_devres_alloc(pdev);
  903. if (!res)
  904. return IOMEM_ERR_PTR(-ENOMEM);
  905. mapping = pci_iomap_range(pdev, bar, offset, len);
  906. if (!mapping) {
  907. pcim_addr_devres_free(res);
  908. return IOMEM_ERR_PTR(-EINVAL);
  909. }
  910. res->type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
  911. res->baseaddr = mapping;
  912. /*
  913. * Ranged mappings don't get added to the legacy-table, since the table
  914. * only ever keeps track of whole BARs.
  915. */
  916. devres_add(&pdev->dev, res);
  917. return mapping;
  918. }
  919. EXPORT_SYMBOL(pcim_iomap_range);