pciehp_hpc.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * PCI Express PCI Hot Plug Driver
  4. *
  5. * Copyright (C) 1995,2001 Compaq Computer Corporation
  6. * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
  7. * Copyright (C) 2001 IBM Corp.
  8. * Copyright (C) 2003-2004 Intel Corporation
  9. *
  10. * All rights reserved.
  11. *
  12. * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/types.h>
  17. #include <linux/signal.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/kthread.h>
  20. #include <linux/pci.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/time.h>
  24. #include <linux/slab.h>
  25. #include "../pci.h"
  26. #include "pciehp.h"
  27. static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
  28. {
  29. return ctrl->pcie->port;
  30. }
  31. static irqreturn_t pciehp_isr(int irq, void *dev_id);
  32. static irqreturn_t pciehp_ist(int irq, void *dev_id);
  33. static int pciehp_poll(void *data);
  34. static inline int pciehp_request_irq(struct controller *ctrl)
  35. {
  36. int retval, irq = ctrl->pcie->irq;
  37. if (pciehp_poll_mode) {
  38. ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
  39. "pciehp_poll-%s",
  40. slot_name(ctrl->slot));
  41. return PTR_ERR_OR_ZERO(ctrl->poll_thread);
  42. }
  43. /* Installs the interrupt handler */
  44. retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
  45. IRQF_SHARED, MY_NAME, ctrl);
  46. if (retval)
  47. ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
  48. irq);
  49. return retval;
  50. }
  51. static inline void pciehp_free_irq(struct controller *ctrl)
  52. {
  53. if (pciehp_poll_mode)
  54. kthread_stop(ctrl->poll_thread);
  55. else
  56. free_irq(ctrl->pcie->irq, ctrl);
  57. }
  58. static int pcie_poll_cmd(struct controller *ctrl, int timeout)
  59. {
  60. struct pci_dev *pdev = ctrl_dev(ctrl);
  61. u16 slot_status;
  62. while (true) {
  63. pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
  64. if (slot_status == (u16) ~0) {
  65. ctrl_info(ctrl, "%s: no response from device\n",
  66. __func__);
  67. return 0;
  68. }
  69. if (slot_status & PCI_EXP_SLTSTA_CC) {
  70. pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
  71. PCI_EXP_SLTSTA_CC);
  72. return 1;
  73. }
  74. if (timeout < 0)
  75. break;
  76. msleep(10);
  77. timeout -= 10;
  78. }
  79. return 0; /* timeout */
  80. }
  81. static void pcie_wait_cmd(struct controller *ctrl)
  82. {
  83. unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
  84. unsigned long duration = msecs_to_jiffies(msecs);
  85. unsigned long cmd_timeout = ctrl->cmd_started + duration;
  86. unsigned long now, timeout;
  87. int rc;
  88. /*
  89. * If the controller does not generate notifications for command
  90. * completions, we never need to wait between writes.
  91. */
  92. if (NO_CMD_CMPL(ctrl))
  93. return;
  94. if (!ctrl->cmd_busy)
  95. return;
  96. /*
  97. * Even if the command has already timed out, we want to call
  98. * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
  99. */
  100. now = jiffies;
  101. if (time_before_eq(cmd_timeout, now))
  102. timeout = 1;
  103. else
  104. timeout = cmd_timeout - now;
  105. if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
  106. ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
  107. rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
  108. else
  109. rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
  110. if (!rc)
  111. ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
  112. ctrl->slot_ctrl,
  113. jiffies_to_msecs(jiffies - ctrl->cmd_started));
  114. }
  115. #define CC_ERRATUM_MASK (PCI_EXP_SLTCTL_PCC | \
  116. PCI_EXP_SLTCTL_PIC | \
  117. PCI_EXP_SLTCTL_AIC | \
  118. PCI_EXP_SLTCTL_EIC)
  119. static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
  120. u16 mask, bool wait)
  121. {
  122. struct pci_dev *pdev = ctrl_dev(ctrl);
  123. u16 slot_ctrl_orig, slot_ctrl;
  124. mutex_lock(&ctrl->ctrl_lock);
  125. /*
  126. * Always wait for any previous command that might still be in progress
  127. */
  128. pcie_wait_cmd(ctrl);
  129. pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
  130. if (slot_ctrl == (u16) ~0) {
  131. ctrl_info(ctrl, "%s: no response from device\n", __func__);
  132. goto out;
  133. }
  134. slot_ctrl_orig = slot_ctrl;
  135. slot_ctrl &= ~mask;
  136. slot_ctrl |= (cmd & mask);
  137. ctrl->cmd_busy = 1;
  138. smp_mb();
  139. pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
  140. ctrl->cmd_started = jiffies;
  141. ctrl->slot_ctrl = slot_ctrl;
  142. /*
  143. * Controllers with the Intel CF118 and similar errata advertise
  144. * Command Completed support, but they only set Command Completed
  145. * if we change the "Control" bits for power, power indicator,
  146. * attention indicator, or interlock. If we only change the
  147. * "Enable" bits, they never set the Command Completed bit.
  148. */
  149. if (pdev->broken_cmd_compl &&
  150. (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
  151. ctrl->cmd_busy = 0;
  152. /*
  153. * Optionally wait for the hardware to be ready for a new command,
  154. * indicating completion of the above issued command.
  155. */
  156. if (wait)
  157. pcie_wait_cmd(ctrl);
  158. out:
  159. mutex_unlock(&ctrl->ctrl_lock);
  160. }
  161. /**
  162. * pcie_write_cmd - Issue controller command
  163. * @ctrl: controller to which the command is issued
  164. * @cmd: command value written to slot control register
  165. * @mask: bitmask of slot control register to be modified
  166. */
  167. static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
  168. {
  169. pcie_do_write_cmd(ctrl, cmd, mask, true);
  170. }
  171. /* Same as above without waiting for the hardware to latch */
  172. static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
  173. {
  174. pcie_do_write_cmd(ctrl, cmd, mask, false);
  175. }
  176. bool pciehp_check_link_active(struct controller *ctrl)
  177. {
  178. struct pci_dev *pdev = ctrl_dev(ctrl);
  179. u16 lnk_status;
  180. bool ret;
  181. pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
  182. ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
  183. if (ret)
  184. ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
  185. return ret;
  186. }
  187. static void pcie_wait_link_active(struct controller *ctrl)
  188. {
  189. struct pci_dev *pdev = ctrl_dev(ctrl);
  190. pcie_wait_for_link(pdev, true);
  191. }
  192. static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
  193. {
  194. u32 l;
  195. int count = 0;
  196. int delay = 1000, step = 20;
  197. bool found = false;
  198. do {
  199. found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0);
  200. count++;
  201. if (found)
  202. break;
  203. msleep(step);
  204. delay -= step;
  205. } while (delay > 0);
  206. if (count > 1 && pciehp_debug)
  207. printk(KERN_DEBUG "pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
  208. pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
  209. PCI_FUNC(devfn), count, step, l);
  210. return found;
  211. }
  212. int pciehp_check_link_status(struct controller *ctrl)
  213. {
  214. struct pci_dev *pdev = ctrl_dev(ctrl);
  215. bool found;
  216. u16 lnk_status;
  217. /*
  218. * Data Link Layer Link Active Reporting must be capable for
  219. * hot-plug capable downstream port. But old controller might
  220. * not implement it. In this case, we wait for 1000 ms.
  221. */
  222. if (ctrl->link_active_reporting)
  223. pcie_wait_link_active(ctrl);
  224. else
  225. msleep(1000);
  226. /* wait 100ms before read pci conf, and try in 1s */
  227. msleep(100);
  228. found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
  229. PCI_DEVFN(0, 0));
  230. /* ignore link or presence changes up to this point */
  231. if (found)
  232. atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
  233. &ctrl->pending_events);
  234. pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
  235. ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
  236. if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
  237. !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
  238. ctrl_err(ctrl, "link training error: status %#06x\n",
  239. lnk_status);
  240. return -1;
  241. }
  242. pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
  243. if (!found)
  244. return -1;
  245. return 0;
  246. }
  247. static int __pciehp_link_set(struct controller *ctrl, bool enable)
  248. {
  249. struct pci_dev *pdev = ctrl_dev(ctrl);
  250. u16 lnk_ctrl;
  251. pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
  252. if (enable)
  253. lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
  254. else
  255. lnk_ctrl |= PCI_EXP_LNKCTL_LD;
  256. pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
  257. ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
  258. return 0;
  259. }
  260. static int pciehp_link_enable(struct controller *ctrl)
  261. {
  262. return __pciehp_link_set(ctrl, true);
  263. }
  264. int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
  265. u8 *status)
  266. {
  267. struct slot *slot = hotplug_slot->private;
  268. struct pci_dev *pdev = ctrl_dev(slot->ctrl);
  269. u16 slot_ctrl;
  270. pci_config_pm_runtime_get(pdev);
  271. pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
  272. pci_config_pm_runtime_put(pdev);
  273. *status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
  274. return 0;
  275. }
  276. void pciehp_get_attention_status(struct slot *slot, u8 *status)
  277. {
  278. struct controller *ctrl = slot->ctrl;
  279. struct pci_dev *pdev = ctrl_dev(ctrl);
  280. u16 slot_ctrl;
  281. pci_config_pm_runtime_get(pdev);
  282. pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
  283. pci_config_pm_runtime_put(pdev);
  284. ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
  285. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
  286. switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
  287. case PCI_EXP_SLTCTL_ATTN_IND_ON:
  288. *status = 1; /* On */
  289. break;
  290. case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
  291. *status = 2; /* Blink */
  292. break;
  293. case PCI_EXP_SLTCTL_ATTN_IND_OFF:
  294. *status = 0; /* Off */
  295. break;
  296. default:
  297. *status = 0xFF;
  298. break;
  299. }
  300. }
  301. void pciehp_get_power_status(struct slot *slot, u8 *status)
  302. {
  303. struct controller *ctrl = slot->ctrl;
  304. struct pci_dev *pdev = ctrl_dev(ctrl);
  305. u16 slot_ctrl;
  306. pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
  307. ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
  308. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
  309. switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
  310. case PCI_EXP_SLTCTL_PWR_ON:
  311. *status = 1; /* On */
  312. break;
  313. case PCI_EXP_SLTCTL_PWR_OFF:
  314. *status = 0; /* Off */
  315. break;
  316. default:
  317. *status = 0xFF;
  318. break;
  319. }
  320. }
  321. void pciehp_get_latch_status(struct slot *slot, u8 *status)
  322. {
  323. struct pci_dev *pdev = ctrl_dev(slot->ctrl);
  324. u16 slot_status;
  325. pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
  326. *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
  327. }
  328. void pciehp_get_adapter_status(struct slot *slot, u8 *status)
  329. {
  330. struct pci_dev *pdev = ctrl_dev(slot->ctrl);
  331. u16 slot_status;
  332. pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
  333. *status = !!(slot_status & PCI_EXP_SLTSTA_PDS);
  334. }
  335. int pciehp_query_power_fault(struct slot *slot)
  336. {
  337. struct pci_dev *pdev = ctrl_dev(slot->ctrl);
  338. u16 slot_status;
  339. pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
  340. return !!(slot_status & PCI_EXP_SLTSTA_PFD);
  341. }
  342. int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
  343. u8 status)
  344. {
  345. struct slot *slot = hotplug_slot->private;
  346. struct controller *ctrl = slot->ctrl;
  347. struct pci_dev *pdev = ctrl_dev(ctrl);
  348. pci_config_pm_runtime_get(pdev);
  349. pcie_write_cmd_nowait(ctrl, status << 6,
  350. PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
  351. pci_config_pm_runtime_put(pdev);
  352. return 0;
  353. }
  354. void pciehp_set_attention_status(struct slot *slot, u8 value)
  355. {
  356. struct controller *ctrl = slot->ctrl;
  357. u16 slot_cmd;
  358. if (!ATTN_LED(ctrl))
  359. return;
  360. switch (value) {
  361. case 0: /* turn off */
  362. slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_OFF;
  363. break;
  364. case 1: /* turn on */
  365. slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_ON;
  366. break;
  367. case 2: /* turn blink */
  368. slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_BLINK;
  369. break;
  370. default:
  371. return;
  372. }
  373. pcie_write_cmd_nowait(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
  374. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  375. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
  376. }
  377. void pciehp_green_led_on(struct slot *slot)
  378. {
  379. struct controller *ctrl = slot->ctrl;
  380. if (!PWR_LED(ctrl))
  381. return;
  382. pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON,
  383. PCI_EXP_SLTCTL_PIC);
  384. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  385. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
  386. PCI_EXP_SLTCTL_PWR_IND_ON);
  387. }
  388. void pciehp_green_led_off(struct slot *slot)
  389. {
  390. struct controller *ctrl = slot->ctrl;
  391. if (!PWR_LED(ctrl))
  392. return;
  393. pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
  394. PCI_EXP_SLTCTL_PIC);
  395. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  396. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
  397. PCI_EXP_SLTCTL_PWR_IND_OFF);
  398. }
  399. void pciehp_green_led_blink(struct slot *slot)
  400. {
  401. struct controller *ctrl = slot->ctrl;
  402. if (!PWR_LED(ctrl))
  403. return;
  404. pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK,
  405. PCI_EXP_SLTCTL_PIC);
  406. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  407. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
  408. PCI_EXP_SLTCTL_PWR_IND_BLINK);
  409. }
  410. int pciehp_power_on_slot(struct slot *slot)
  411. {
  412. struct controller *ctrl = slot->ctrl;
  413. struct pci_dev *pdev = ctrl_dev(ctrl);
  414. u16 slot_status;
  415. int retval;
  416. /* Clear power-fault bit from previous power failures */
  417. pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
  418. if (slot_status & PCI_EXP_SLTSTA_PFD)
  419. pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
  420. PCI_EXP_SLTSTA_PFD);
  421. ctrl->power_fault_detected = 0;
  422. pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
  423. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  424. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
  425. PCI_EXP_SLTCTL_PWR_ON);
  426. retval = pciehp_link_enable(ctrl);
  427. if (retval)
  428. ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__);
  429. return retval;
  430. }
  431. void pciehp_power_off_slot(struct slot *slot)
  432. {
  433. struct controller *ctrl = slot->ctrl;
  434. pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
  435. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  436. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
  437. PCI_EXP_SLTCTL_PWR_OFF);
  438. }
  439. static irqreturn_t pciehp_isr(int irq, void *dev_id)
  440. {
  441. struct controller *ctrl = (struct controller *)dev_id;
  442. struct pci_dev *pdev = ctrl_dev(ctrl);
  443. struct device *parent = pdev->dev.parent;
  444. u16 status, events = 0;
  445. /*
  446. * Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4).
  447. */
  448. if (pdev->current_state == PCI_D3cold)
  449. return IRQ_NONE;
  450. /*
  451. * Keep the port accessible by holding a runtime PM ref on its parent.
  452. * Defer resume of the parent to the IRQ thread if it's suspended.
  453. * Mask the interrupt until then.
  454. */
  455. if (parent) {
  456. pm_runtime_get_noresume(parent);
  457. if (!pm_runtime_active(parent)) {
  458. pm_runtime_put(parent);
  459. disable_irq_nosync(irq);
  460. atomic_or(RERUN_ISR, &ctrl->pending_events);
  461. return IRQ_WAKE_THREAD;
  462. }
  463. }
  464. read_status:
  465. pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
  466. if (status == (u16) ~0) {
  467. ctrl_info(ctrl, "%s: no response from device\n", __func__);
  468. if (parent)
  469. pm_runtime_put(parent);
  470. return IRQ_NONE;
  471. }
  472. /*
  473. * Slot Status contains plain status bits as well as event
  474. * notification bits; right now we only want the event bits.
  475. */
  476. status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
  477. PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
  478. PCI_EXP_SLTSTA_DLLSC;
  479. /*
  480. * If we've already reported a power fault, don't report it again
  481. * until we've done something to handle it.
  482. */
  483. if (ctrl->power_fault_detected)
  484. status &= ~PCI_EXP_SLTSTA_PFD;
  485. events |= status;
  486. if (!events) {
  487. if (parent)
  488. pm_runtime_put(parent);
  489. return IRQ_NONE;
  490. }
  491. if (status) {
  492. pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
  493. /*
  494. * In MSI mode, all event bits must be zero before the port
  495. * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4).
  496. * So re-read the Slot Status register in case a bit was set
  497. * between read and write.
  498. */
  499. if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
  500. goto read_status;
  501. }
  502. ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
  503. if (parent)
  504. pm_runtime_put(parent);
  505. /*
  506. * Command Completed notifications are not deferred to the
  507. * IRQ thread because it may be waiting for their arrival.
  508. */
  509. if (events & PCI_EXP_SLTSTA_CC) {
  510. ctrl->cmd_busy = 0;
  511. smp_mb();
  512. wake_up(&ctrl->queue);
  513. if (events == PCI_EXP_SLTSTA_CC)
  514. return IRQ_HANDLED;
  515. events &= ~PCI_EXP_SLTSTA_CC;
  516. }
  517. if (pdev->ignore_hotplug) {
  518. ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
  519. return IRQ_HANDLED;
  520. }
  521. /* Save pending events for consumption by IRQ thread. */
  522. atomic_or(events, &ctrl->pending_events);
  523. return IRQ_WAKE_THREAD;
  524. }
  525. static irqreturn_t pciehp_ist(int irq, void *dev_id)
  526. {
  527. struct controller *ctrl = (struct controller *)dev_id;
  528. struct pci_dev *pdev = ctrl_dev(ctrl);
  529. struct slot *slot = ctrl->slot;
  530. irqreturn_t ret;
  531. u32 events;
  532. ctrl->ist_running = true;
  533. pci_config_pm_runtime_get(pdev);
  534. /* rerun pciehp_isr() if the port was inaccessible on interrupt */
  535. if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
  536. ret = pciehp_isr(irq, dev_id);
  537. enable_irq(irq);
  538. if (ret != IRQ_WAKE_THREAD)
  539. goto out;
  540. }
  541. synchronize_hardirq(irq);
  542. events = atomic_xchg(&ctrl->pending_events, 0);
  543. if (!events) {
  544. ret = IRQ_NONE;
  545. goto out;
  546. }
  547. /* Check Attention Button Pressed */
  548. if (events & PCI_EXP_SLTSTA_ABP) {
  549. ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
  550. slot_name(slot));
  551. pciehp_handle_button_press(slot);
  552. }
  553. /* Check Power Fault Detected */
  554. if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
  555. ctrl->power_fault_detected = 1;
  556. ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
  557. pciehp_set_attention_status(slot, 1);
  558. pciehp_green_led_off(slot);
  559. }
  560. /*
  561. * Disable requests have higher priority than Presence Detect Changed
  562. * or Data Link Layer State Changed events.
  563. */
  564. down_read(&ctrl->reset_lock);
  565. if (events & DISABLE_SLOT)
  566. pciehp_handle_disable_request(slot);
  567. else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
  568. pciehp_handle_presence_or_link_change(slot, events);
  569. up_read(&ctrl->reset_lock);
  570. ret = IRQ_HANDLED;
  571. out:
  572. pci_config_pm_runtime_put(pdev);
  573. ctrl->ist_running = false;
  574. wake_up(&ctrl->requester);
  575. return ret;
  576. }
  577. static int pciehp_poll(void *data)
  578. {
  579. struct controller *ctrl = data;
  580. schedule_timeout_idle(10 * HZ); /* start with 10 sec delay */
  581. while (!kthread_should_stop()) {
  582. /* poll for interrupt events or user requests */
  583. while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD ||
  584. atomic_read(&ctrl->pending_events))
  585. pciehp_ist(IRQ_NOTCONNECTED, ctrl);
  586. if (pciehp_poll_time <= 0 || pciehp_poll_time > 60)
  587. pciehp_poll_time = 2; /* clamp to sane value */
  588. schedule_timeout_idle(pciehp_poll_time * HZ);
  589. }
  590. return 0;
  591. }
  592. static void pcie_enable_notification(struct controller *ctrl)
  593. {
  594. u16 cmd, mask;
  595. /*
  596. * TBD: Power fault detected software notification support.
  597. *
  598. * Power fault detected software notification is not enabled
  599. * now, because it caused power fault detected interrupt storm
  600. * on some machines. On those machines, power fault detected
  601. * bit in the slot status register was set again immediately
  602. * when it is cleared in the interrupt service routine, and
  603. * next power fault detected interrupt was notified again.
  604. */
  605. /*
  606. * Always enable link events: thus link-up and link-down shall
  607. * always be treated as hotplug and unplug respectively. Enable
  608. * presence detect only if Attention Button is not present.
  609. */
  610. cmd = PCI_EXP_SLTCTL_DLLSCE;
  611. if (ATTN_BUTTN(ctrl))
  612. cmd |= PCI_EXP_SLTCTL_ABPE;
  613. else
  614. cmd |= PCI_EXP_SLTCTL_PDCE;
  615. if (!pciehp_poll_mode)
  616. cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE;
  617. mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
  618. PCI_EXP_SLTCTL_PFDE |
  619. PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
  620. PCI_EXP_SLTCTL_DLLSCE);
  621. pcie_write_cmd_nowait(ctrl, cmd, mask);
  622. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  623. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
  624. }
  625. static void pcie_disable_notification(struct controller *ctrl)
  626. {
  627. u16 mask;
  628. mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
  629. PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
  630. PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
  631. PCI_EXP_SLTCTL_DLLSCE);
  632. pcie_write_cmd(ctrl, 0, mask);
  633. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  634. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
  635. }
  636. void pcie_clear_hotplug_events(struct controller *ctrl)
  637. {
  638. pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA,
  639. PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
  640. }
  641. /*
  642. * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
  643. * bus reset of the bridge, but at the same time we want to ensure that it is
  644. * not seen as a hot-unplug, followed by the hot-plug of the device. Thus,
  645. * disable link state notification and presence detection change notification
  646. * momentarily, if we see that they could interfere. Also, clear any spurious
  647. * events after.
  648. */
  649. int pciehp_reset_slot(struct slot *slot, int probe)
  650. {
  651. struct controller *ctrl = slot->ctrl;
  652. struct pci_dev *pdev = ctrl_dev(ctrl);
  653. u16 stat_mask = 0, ctrl_mask = 0;
  654. int rc;
  655. if (probe)
  656. return 0;
  657. down_write(&ctrl->reset_lock);
  658. if (!ATTN_BUTTN(ctrl)) {
  659. ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
  660. stat_mask |= PCI_EXP_SLTSTA_PDC;
  661. }
  662. ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
  663. stat_mask |= PCI_EXP_SLTSTA_DLLSC;
  664. pcie_write_cmd(ctrl, 0, ctrl_mask);
  665. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  666. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
  667. rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
  668. pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
  669. pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
  670. ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
  671. pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
  672. up_write(&ctrl->reset_lock);
  673. return rc;
  674. }
  675. int pcie_init_notification(struct controller *ctrl)
  676. {
  677. if (pciehp_request_irq(ctrl))
  678. return -1;
  679. pcie_enable_notification(ctrl);
  680. ctrl->notification_enabled = 1;
  681. return 0;
  682. }
  683. void pcie_shutdown_notification(struct controller *ctrl)
  684. {
  685. if (ctrl->notification_enabled) {
  686. pcie_disable_notification(ctrl);
  687. pciehp_free_irq(ctrl);
  688. ctrl->notification_enabled = 0;
  689. }
  690. }
  691. static int pcie_init_slot(struct controller *ctrl)
  692. {
  693. struct pci_bus *subordinate = ctrl_dev(ctrl)->subordinate;
  694. struct slot *slot;
  695. slot = kzalloc(sizeof(*slot), GFP_KERNEL);
  696. if (!slot)
  697. return -ENOMEM;
  698. down_read(&pci_bus_sem);
  699. slot->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
  700. up_read(&pci_bus_sem);
  701. slot->ctrl = ctrl;
  702. mutex_init(&slot->lock);
  703. INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
  704. ctrl->slot = slot;
  705. return 0;
  706. }
  707. static void pcie_cleanup_slot(struct controller *ctrl)
  708. {
  709. struct slot *slot = ctrl->slot;
  710. cancel_delayed_work_sync(&slot->work);
  711. kfree(slot);
  712. }
  713. static inline void dbg_ctrl(struct controller *ctrl)
  714. {
  715. struct pci_dev *pdev = ctrl->pcie->port;
  716. u16 reg16;
  717. if (!pciehp_debug)
  718. return;
  719. ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
  720. pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
  721. ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16);
  722. pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
  723. ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16);
  724. }
  725. #define FLAG(x, y) (((x) & (y)) ? '+' : '-')
  726. struct controller *pcie_init(struct pcie_device *dev)
  727. {
  728. struct controller *ctrl;
  729. u32 slot_cap, link_cap;
  730. u8 occupied, poweron;
  731. struct pci_dev *pdev = dev->port;
  732. ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
  733. if (!ctrl)
  734. goto abort;
  735. ctrl->pcie = dev;
  736. pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
  737. if (pdev->hotplug_user_indicators)
  738. slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
  739. /*
  740. * We assume no Thunderbolt controllers support Command Complete events,
  741. * but some controllers falsely claim they do.
  742. */
  743. if (pdev->is_thunderbolt)
  744. slot_cap |= PCI_EXP_SLTCAP_NCCS;
  745. ctrl->slot_cap = slot_cap;
  746. mutex_init(&ctrl->ctrl_lock);
  747. init_rwsem(&ctrl->reset_lock);
  748. init_waitqueue_head(&ctrl->requester);
  749. init_waitqueue_head(&ctrl->queue);
  750. dbg_ctrl(ctrl);
  751. /* Check if Data Link Layer Link Active Reporting is implemented */
  752. pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
  753. if (link_cap & PCI_EXP_LNKCAP_DLLLARC)
  754. ctrl->link_active_reporting = 1;
  755. /* Clear all remaining event bits in Slot Status register. */
  756. pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
  757. PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
  758. PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
  759. PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
  760. ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
  761. (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
  762. FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
  763. FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
  764. FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
  765. FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
  766. FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
  767. FLAG(slot_cap, PCI_EXP_SLTCAP_HPC),
  768. FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
  769. FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
  770. FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
  771. FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
  772. pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
  773. if (pcie_init_slot(ctrl))
  774. goto abort_ctrl;
  775. /*
  776. * If empty slot's power status is on, turn power off. The IRQ isn't
  777. * requested yet, so avoid triggering a notification with this command.
  778. */
  779. if (POWER_CTRL(ctrl)) {
  780. pciehp_get_adapter_status(ctrl->slot, &occupied);
  781. pciehp_get_power_status(ctrl->slot, &poweron);
  782. if (!occupied && poweron) {
  783. pcie_disable_notification(ctrl);
  784. pciehp_power_off_slot(ctrl->slot);
  785. }
  786. }
  787. return ctrl;
  788. abort_ctrl:
  789. kfree(ctrl);
  790. abort:
  791. return NULL;
  792. }
  793. void pciehp_release_ctrl(struct controller *ctrl)
  794. {
  795. pcie_cleanup_slot(ctrl);
  796. kfree(ctrl);
  797. }
  798. static void quirk_cmd_compl(struct pci_dev *pdev)
  799. {
  800. u32 slot_cap;
  801. if (pci_is_pcie(pdev)) {
  802. pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
  803. if (slot_cap & PCI_EXP_SLTCAP_HPC &&
  804. !(slot_cap & PCI_EXP_SLTCAP_NCCS))
  805. pdev->broken_cmd_compl = 1;
  806. }
  807. }
  808. DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
  809. PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
  810. DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
  811. PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
  812. DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
  813. PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);