access.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/pci.h>
  3. #include <linux/module.h>
  4. #include <linux/slab.h>
  5. #include <linux/ioport.h>
  6. #include <linux/wait.h>
  7. #include "pci.h"
  8. /*
  9. * This interrupt-safe spinlock protects all accesses to PCI
  10. * configuration space.
  11. */
  12. DEFINE_RAW_SPINLOCK(pci_lock);
  13. /*
  14. * Wrappers for all PCI configuration access functions. They just check
  15. * alignment, do locking and call the low-level functions pointed to
  16. * by pci_dev->ops.
  17. */
  18. #define PCI_byte_BAD 0
  19. #define PCI_word_BAD (pos & 1)
  20. #define PCI_dword_BAD (pos & 3)
  21. #ifdef CONFIG_PCI_LOCKLESS_CONFIG
  22. # define pci_lock_config(f) do { (void)(f); } while (0)
  23. # define pci_unlock_config(f) do { (void)(f); } while (0)
  24. #else
  25. # define pci_lock_config(f) raw_spin_lock_irqsave(&pci_lock, f)
  26. # define pci_unlock_config(f) raw_spin_unlock_irqrestore(&pci_lock, f)
  27. #endif
  28. #define PCI_OP_READ(size, type, len) \
  29. int pci_bus_read_config_##size \
  30. (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
  31. { \
  32. int res; \
  33. unsigned long flags; \
  34. u32 data = 0; \
  35. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  36. pci_lock_config(flags); \
  37. res = bus->ops->read(bus, devfn, pos, len, &data); \
  38. *value = (type)data; \
  39. pci_unlock_config(flags); \
  40. return res; \
  41. }
  42. #define PCI_OP_WRITE(size, type, len) \
  43. int pci_bus_write_config_##size \
  44. (struct pci_bus *bus, unsigned int devfn, int pos, type value) \
  45. { \
  46. int res; \
  47. unsigned long flags; \
  48. if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
  49. pci_lock_config(flags); \
  50. res = bus->ops->write(bus, devfn, pos, len, value); \
  51. pci_unlock_config(flags); \
  52. return res; \
  53. }
  54. PCI_OP_READ(byte, u8, 1)
  55. PCI_OP_READ(word, u16, 2)
  56. PCI_OP_READ(dword, u32, 4)
  57. PCI_OP_WRITE(byte, u8, 1)
  58. PCI_OP_WRITE(word, u16, 2)
  59. PCI_OP_WRITE(dword, u32, 4)
  60. EXPORT_SYMBOL(pci_bus_read_config_byte);
  61. EXPORT_SYMBOL(pci_bus_read_config_word);
  62. EXPORT_SYMBOL(pci_bus_read_config_dword);
  63. EXPORT_SYMBOL(pci_bus_write_config_byte);
  64. EXPORT_SYMBOL(pci_bus_write_config_word);
  65. EXPORT_SYMBOL(pci_bus_write_config_dword);
  66. int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
  67. int where, int size, u32 *val)
  68. {
  69. void __iomem *addr;
  70. addr = bus->ops->map_bus(bus, devfn, where);
  71. if (!addr) {
  72. *val = ~0;
  73. return PCIBIOS_DEVICE_NOT_FOUND;
  74. }
  75. if (size == 1)
  76. *val = readb(addr);
  77. else if (size == 2)
  78. *val = readw(addr);
  79. else
  80. *val = readl(addr);
  81. return PCIBIOS_SUCCESSFUL;
  82. }
  83. EXPORT_SYMBOL_GPL(pci_generic_config_read);
  84. int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
  85. int where, int size, u32 val)
  86. {
  87. void __iomem *addr;
  88. addr = bus->ops->map_bus(bus, devfn, where);
  89. if (!addr)
  90. return PCIBIOS_DEVICE_NOT_FOUND;
  91. if (size == 1)
  92. writeb(val, addr);
  93. else if (size == 2)
  94. writew(val, addr);
  95. else
  96. writel(val, addr);
  97. return PCIBIOS_SUCCESSFUL;
  98. }
  99. EXPORT_SYMBOL_GPL(pci_generic_config_write);
  100. int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
  101. int where, int size, u32 *val)
  102. {
  103. void __iomem *addr;
  104. addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
  105. if (!addr) {
  106. *val = ~0;
  107. return PCIBIOS_DEVICE_NOT_FOUND;
  108. }
  109. *val = readl(addr);
  110. if (size <= 2)
  111. *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
  112. return PCIBIOS_SUCCESSFUL;
  113. }
  114. EXPORT_SYMBOL_GPL(pci_generic_config_read32);
  115. int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
  116. int where, int size, u32 val)
  117. {
  118. void __iomem *addr;
  119. u32 mask, tmp;
  120. addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
  121. if (!addr)
  122. return PCIBIOS_DEVICE_NOT_FOUND;
  123. if (size == 4) {
  124. writel(val, addr);
  125. return PCIBIOS_SUCCESSFUL;
  126. }
  127. /*
  128. * In general, hardware that supports only 32-bit writes on PCI is
  129. * not spec-compliant. For example, software may perform a 16-bit
  130. * write. If the hardware only supports 32-bit accesses, we must
  131. * do a 32-bit read, merge in the 16 bits we intend to write,
  132. * followed by a 32-bit write. If the 16 bits we *don't* intend to
  133. * write happen to have any RW1C (write-one-to-clear) bits set, we
  134. * just inadvertently cleared something we shouldn't have.
  135. */
  136. dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
  137. size, pci_domain_nr(bus), bus->number,
  138. PCI_SLOT(devfn), PCI_FUNC(devfn), where);
  139. mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
  140. tmp = readl(addr) & mask;
  141. tmp |= val << ((where & 0x3) * 8);
  142. writel(tmp, addr);
  143. return PCIBIOS_SUCCESSFUL;
  144. }
  145. EXPORT_SYMBOL_GPL(pci_generic_config_write32);
  146. /**
  147. * pci_bus_set_ops - Set raw operations of pci bus
  148. * @bus: pci bus struct
  149. * @ops: new raw operations
  150. *
  151. * Return previous raw operations
  152. */
  153. struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
  154. {
  155. struct pci_ops *old_ops;
  156. unsigned long flags;
  157. raw_spin_lock_irqsave(&pci_lock, flags);
  158. old_ops = bus->ops;
  159. bus->ops = ops;
  160. raw_spin_unlock_irqrestore(&pci_lock, flags);
  161. return old_ops;
  162. }
  163. EXPORT_SYMBOL(pci_bus_set_ops);
  164. /*
  165. * The following routines are to prevent the user from accessing PCI config
  166. * space when it's unsafe to do so. Some devices require this during BIST and
  167. * we're required to prevent it during D-state transitions.
  168. *
  169. * We have a bit per device to indicate it's blocked and a global wait queue
  170. * for callers to sleep on until devices are unblocked.
  171. */
  172. static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
  173. static noinline void pci_wait_cfg(struct pci_dev *dev)
  174. __must_hold(&pci_lock)
  175. {
  176. do {
  177. raw_spin_unlock_irq(&pci_lock);
  178. wait_event(pci_cfg_wait, !dev->block_cfg_access);
  179. raw_spin_lock_irq(&pci_lock);
  180. } while (dev->block_cfg_access);
  181. }
  182. /* Returns 0 on success, negative values indicate error. */
  183. #define PCI_USER_READ_CONFIG(size, type) \
  184. int pci_user_read_config_##size \
  185. (struct pci_dev *dev, int pos, type *val) \
  186. { \
  187. int ret = PCIBIOS_SUCCESSFUL; \
  188. u32 data = -1; \
  189. if (PCI_##size##_BAD) \
  190. return -EINVAL; \
  191. raw_spin_lock_irq(&pci_lock); \
  192. if (unlikely(dev->block_cfg_access)) \
  193. pci_wait_cfg(dev); \
  194. ret = dev->bus->ops->read(dev->bus, dev->devfn, \
  195. pos, sizeof(type), &data); \
  196. raw_spin_unlock_irq(&pci_lock); \
  197. *val = (type)data; \
  198. return pcibios_err_to_errno(ret); \
  199. } \
  200. EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
  201. /* Returns 0 on success, negative values indicate error. */
  202. #define PCI_USER_WRITE_CONFIG(size, type) \
  203. int pci_user_write_config_##size \
  204. (struct pci_dev *dev, int pos, type val) \
  205. { \
  206. int ret = PCIBIOS_SUCCESSFUL; \
  207. if (PCI_##size##_BAD) \
  208. return -EINVAL; \
  209. raw_spin_lock_irq(&pci_lock); \
  210. if (unlikely(dev->block_cfg_access)) \
  211. pci_wait_cfg(dev); \
  212. ret = dev->bus->ops->write(dev->bus, dev->devfn, \
  213. pos, sizeof(type), val); \
  214. raw_spin_unlock_irq(&pci_lock); \
  215. return pcibios_err_to_errno(ret); \
  216. } \
  217. EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
  218. PCI_USER_READ_CONFIG(byte, u8)
  219. PCI_USER_READ_CONFIG(word, u16)
  220. PCI_USER_READ_CONFIG(dword, u32)
  221. PCI_USER_WRITE_CONFIG(byte, u8)
  222. PCI_USER_WRITE_CONFIG(word, u16)
  223. PCI_USER_WRITE_CONFIG(dword, u32)
  224. /**
  225. * pci_cfg_access_lock - Lock PCI config reads/writes
  226. * @dev: pci device struct
  227. *
  228. * When access is locked, any userspace reads or writes to config
  229. * space and concurrent lock requests will sleep until access is
  230. * allowed via pci_cfg_access_unlock() again.
  231. */
  232. void pci_cfg_access_lock(struct pci_dev *dev)
  233. {
  234. might_sleep();
  235. raw_spin_lock_irq(&pci_lock);
  236. if (dev->block_cfg_access)
  237. pci_wait_cfg(dev);
  238. dev->block_cfg_access = 1;
  239. raw_spin_unlock_irq(&pci_lock);
  240. }
  241. EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
  242. /**
  243. * pci_cfg_access_trylock - try to lock PCI config reads/writes
  244. * @dev: pci device struct
  245. *
  246. * Same as pci_cfg_access_lock, but will return 0 if access is
  247. * already locked, 1 otherwise. This function can be used from
  248. * atomic contexts.
  249. */
  250. bool pci_cfg_access_trylock(struct pci_dev *dev)
  251. {
  252. unsigned long flags;
  253. bool locked = true;
  254. raw_spin_lock_irqsave(&pci_lock, flags);
  255. if (dev->block_cfg_access)
  256. locked = false;
  257. else
  258. dev->block_cfg_access = 1;
  259. raw_spin_unlock_irqrestore(&pci_lock, flags);
  260. return locked;
  261. }
  262. EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
  263. /**
  264. * pci_cfg_access_unlock - Unlock PCI config reads/writes
  265. * @dev: pci device struct
  266. *
  267. * This function allows PCI config accesses to resume.
  268. */
  269. void pci_cfg_access_unlock(struct pci_dev *dev)
  270. {
  271. unsigned long flags;
  272. raw_spin_lock_irqsave(&pci_lock, flags);
  273. /*
  274. * This indicates a problem in the caller, but we don't need
  275. * to kill them, unlike a double-block above.
  276. */
  277. WARN_ON(!dev->block_cfg_access);
  278. dev->block_cfg_access = 0;
  279. raw_spin_unlock_irqrestore(&pci_lock, flags);
  280. wake_up_all(&pci_cfg_wait);
  281. }
  282. EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
  283. static inline int pcie_cap_version(const struct pci_dev *dev)
  284. {
  285. return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
  286. }
  287. static bool pcie_downstream_port(const struct pci_dev *dev)
  288. {
  289. int type = pci_pcie_type(dev);
  290. return type == PCI_EXP_TYPE_ROOT_PORT ||
  291. type == PCI_EXP_TYPE_DOWNSTREAM ||
  292. type == PCI_EXP_TYPE_PCIE_BRIDGE;
  293. }
  294. bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
  295. {
  296. int type = pci_pcie_type(dev);
  297. return type == PCI_EXP_TYPE_ENDPOINT ||
  298. type == PCI_EXP_TYPE_LEG_END ||
  299. type == PCI_EXP_TYPE_ROOT_PORT ||
  300. type == PCI_EXP_TYPE_UPSTREAM ||
  301. type == PCI_EXP_TYPE_DOWNSTREAM ||
  302. type == PCI_EXP_TYPE_PCI_BRIDGE ||
  303. type == PCI_EXP_TYPE_PCIE_BRIDGE;
  304. }
  305. static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
  306. {
  307. return pcie_downstream_port(dev) &&
  308. pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
  309. }
  310. static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
  311. {
  312. int type = pci_pcie_type(dev);
  313. return type == PCI_EXP_TYPE_ROOT_PORT ||
  314. type == PCI_EXP_TYPE_RC_EC;
  315. }
  316. static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
  317. {
  318. if (!pci_is_pcie(dev))
  319. return false;
  320. switch (pos) {
  321. case PCI_EXP_FLAGS:
  322. return true;
  323. case PCI_EXP_DEVCAP:
  324. case PCI_EXP_DEVCTL:
  325. case PCI_EXP_DEVSTA:
  326. return true;
  327. case PCI_EXP_LNKCAP:
  328. case PCI_EXP_LNKCTL:
  329. case PCI_EXP_LNKSTA:
  330. return pcie_cap_has_lnkctl(dev);
  331. case PCI_EXP_SLTCAP:
  332. case PCI_EXP_SLTCTL:
  333. case PCI_EXP_SLTSTA:
  334. return pcie_cap_has_sltctl(dev);
  335. case PCI_EXP_RTCTL:
  336. case PCI_EXP_RTCAP:
  337. case PCI_EXP_RTSTA:
  338. return pcie_cap_has_rtctl(dev);
  339. case PCI_EXP_DEVCAP2:
  340. case PCI_EXP_DEVCTL2:
  341. case PCI_EXP_LNKCAP2:
  342. case PCI_EXP_LNKCTL2:
  343. case PCI_EXP_LNKSTA2:
  344. return pcie_cap_version(dev) > 1;
  345. default:
  346. return false;
  347. }
  348. }
  349. /*
  350. * Note that these accessor functions are only for the "PCI Express
  351. * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
  352. * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
  353. */
  354. int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
  355. {
  356. int ret;
  357. *val = 0;
  358. if (pos & 1)
  359. return -EINVAL;
  360. if (pcie_capability_reg_implemented(dev, pos)) {
  361. ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
  362. /*
  363. * Reset *val to 0 if pci_read_config_word() fails, it may
  364. * have been written as 0xFFFF if hardware error happens
  365. * during pci_read_config_word().
  366. */
  367. if (ret)
  368. *val = 0;
  369. return ret;
  370. }
  371. /*
  372. * For Functions that do not implement the Slot Capabilities,
  373. * Slot Status, and Slot Control registers, these spaces must
  374. * be hardwired to 0b, with the exception of the Presence Detect
  375. * State bit in the Slot Status register of Downstream Ports,
  376. * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
  377. */
  378. if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
  379. pos == PCI_EXP_SLTSTA)
  380. *val = PCI_EXP_SLTSTA_PDS;
  381. return 0;
  382. }
  383. EXPORT_SYMBOL(pcie_capability_read_word);
  384. int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
  385. {
  386. int ret;
  387. *val = 0;
  388. if (pos & 3)
  389. return -EINVAL;
  390. if (pcie_capability_reg_implemented(dev, pos)) {
  391. ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
  392. /*
  393. * Reset *val to 0 if pci_read_config_dword() fails, it may
  394. * have been written as 0xFFFFFFFF if hardware error happens
  395. * during pci_read_config_dword().
  396. */
  397. if (ret)
  398. *val = 0;
  399. return ret;
  400. }
  401. if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
  402. pos == PCI_EXP_SLTSTA)
  403. *val = PCI_EXP_SLTSTA_PDS;
  404. return 0;
  405. }
  406. EXPORT_SYMBOL(pcie_capability_read_dword);
  407. int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
  408. {
  409. if (pos & 1)
  410. return -EINVAL;
  411. if (!pcie_capability_reg_implemented(dev, pos))
  412. return 0;
  413. return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
  414. }
  415. EXPORT_SYMBOL(pcie_capability_write_word);
  416. int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
  417. {
  418. if (pos & 3)
  419. return -EINVAL;
  420. if (!pcie_capability_reg_implemented(dev, pos))
  421. return 0;
  422. return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
  423. }
  424. EXPORT_SYMBOL(pcie_capability_write_dword);
  425. int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
  426. u16 clear, u16 set)
  427. {
  428. int ret;
  429. u16 val;
  430. ret = pcie_capability_read_word(dev, pos, &val);
  431. if (!ret) {
  432. val &= ~clear;
  433. val |= set;
  434. ret = pcie_capability_write_word(dev, pos, val);
  435. }
  436. return ret;
  437. }
  438. EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
  439. int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
  440. u32 clear, u32 set)
  441. {
  442. int ret;
  443. u32 val;
  444. ret = pcie_capability_read_dword(dev, pos, &val);
  445. if (!ret) {
  446. val &= ~clear;
  447. val |= set;
  448. ret = pcie_capability_write_dword(dev, pos, val);
  449. }
  450. return ret;
  451. }
  452. EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
  453. int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
  454. {
  455. if (pci_dev_is_disconnected(dev)) {
  456. *val = ~0;
  457. return PCIBIOS_DEVICE_NOT_FOUND;
  458. }
  459. return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
  460. }
  461. EXPORT_SYMBOL(pci_read_config_byte);
  462. int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
  463. {
  464. if (pci_dev_is_disconnected(dev)) {
  465. *val = ~0;
  466. return PCIBIOS_DEVICE_NOT_FOUND;
  467. }
  468. return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
  469. }
  470. EXPORT_SYMBOL(pci_read_config_word);
  471. int pci_read_config_dword(const struct pci_dev *dev, int where,
  472. u32 *val)
  473. {
  474. if (pci_dev_is_disconnected(dev)) {
  475. *val = ~0;
  476. return PCIBIOS_DEVICE_NOT_FOUND;
  477. }
  478. return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
  479. }
  480. EXPORT_SYMBOL(pci_read_config_dword);
  481. int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
  482. {
  483. if (pci_dev_is_disconnected(dev))
  484. return PCIBIOS_DEVICE_NOT_FOUND;
  485. return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
  486. }
  487. EXPORT_SYMBOL(pci_write_config_byte);
  488. int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
  489. {
  490. if (pci_dev_is_disconnected(dev))
  491. return PCIBIOS_DEVICE_NOT_FOUND;
  492. return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
  493. }
  494. EXPORT_SYMBOL(pci_write_config_word);
  495. int pci_write_config_dword(const struct pci_dev *dev, int where,
  496. u32 val)
  497. {
  498. if (pci_dev_is_disconnected(dev))
  499. return PCIBIOS_DEVICE_NOT_FOUND;
  500. return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
  501. }
  502. EXPORT_SYMBOL(pci_write_config_dword);