eeh_pseries.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884
  1. /*
  2. * The file intends to implement the platform dependent EEH operations on pseries.
  3. * Actually, the pseries platform is built based on RTAS heavily. That means the
  4. * pseries platform dependent EEH operations will be built on RTAS calls. The functions
  5. * are derived from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has
  6. * been done.
  7. *
  8. * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011.
  9. * Copyright IBM Corporation 2001, 2005, 2006
  10. * Copyright Dave Engebretsen & Todd Inglett 2001
  11. * Copyright Linas Vepstas 2005, 2006
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or
  16. * (at your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  26. */
  27. #include <linux/atomic.h>
  28. #include <linux/delay.h>
  29. #include <linux/export.h>
  30. #include <linux/init.h>
  31. #include <linux/list.h>
  32. #include <linux/of.h>
  33. #include <linux/pci.h>
  34. #include <linux/proc_fs.h>
  35. #include <linux/rbtree.h>
  36. #include <linux/sched.h>
  37. #include <linux/seq_file.h>
  38. #include <linux/spinlock.h>
  39. #include <asm/eeh.h>
  40. #include <asm/eeh_event.h>
  41. #include <asm/io.h>
  42. #include <asm/machdep.h>
  43. #include <asm/ppc-pci.h>
  44. #include <asm/rtas.h>
  45. /* RTAS tokens */
  46. static int ibm_set_eeh_option;
  47. static int ibm_set_slot_reset;
  48. static int ibm_read_slot_reset_state;
  49. static int ibm_read_slot_reset_state2;
  50. static int ibm_slot_error_detail;
  51. static int ibm_get_config_addr_info;
  52. static int ibm_get_config_addr_info2;
  53. static int ibm_configure_pe;
  54. #ifdef CONFIG_PCI_IOV
  55. void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
  56. {
  57. struct pci_dn *pdn = pci_get_pdn(pdev);
  58. struct pci_dn *physfn_pdn;
  59. struct eeh_dev *edev;
  60. if (!pdev->is_virtfn)
  61. return;
  62. pdn->device_id = pdev->device;
  63. pdn->vendor_id = pdev->vendor;
  64. pdn->class_code = pdev->class;
  65. /*
  66. * Last allow unfreeze return code used for retrieval
  67. * by user space in eeh-sysfs to show the last command
  68. * completion from platform.
  69. */
  70. pdn->last_allow_rc = 0;
  71. physfn_pdn = pci_get_pdn(pdev->physfn);
  72. pdn->pe_number = physfn_pdn->pe_num_map[pdn->vf_index];
  73. edev = pdn_to_eeh_dev(pdn);
  74. /*
  75. * The following operations will fail if VF's sysfs files
  76. * aren't created or its resources aren't finalized.
  77. */
  78. eeh_add_device_early(pdn);
  79. eeh_add_device_late(pdev);
  80. edev->pe_config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
  81. eeh_rmv_from_parent_pe(edev); /* Remove as it is adding to bus pe */
  82. eeh_add_to_parent_pe(edev); /* Add as VF PE type */
  83. eeh_sysfs_add_device(pdev);
  84. }
  85. #endif
  86. /*
  87. * Buffer for reporting slot-error-detail rtas calls. Its here
  88. * in BSS, and not dynamically alloced, so that it ends up in
  89. * RMO where RTAS can access it.
  90. */
  91. static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
  92. static DEFINE_SPINLOCK(slot_errbuf_lock);
  93. static int eeh_error_buf_size;
  94. /**
  95. * pseries_eeh_init - EEH platform dependent initialization
  96. *
  97. * EEH platform dependent initialization on pseries.
  98. */
  99. static int pseries_eeh_init(void)
  100. {
  101. /* figure out EEH RTAS function call tokens */
  102. ibm_set_eeh_option = rtas_token("ibm,set-eeh-option");
  103. ibm_set_slot_reset = rtas_token("ibm,set-slot-reset");
  104. ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2");
  105. ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
  106. ibm_slot_error_detail = rtas_token("ibm,slot-error-detail");
  107. ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
  108. ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
  109. ibm_configure_pe = rtas_token("ibm,configure-pe");
  110. /*
  111. * ibm,configure-pe and ibm,configure-bridge have the same semantics,
  112. * however ibm,configure-pe can be faster. If we can't find
  113. * ibm,configure-pe then fall back to using ibm,configure-bridge.
  114. */
  115. if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
  116. ibm_configure_pe = rtas_token("ibm,configure-bridge");
  117. /*
  118. * Necessary sanity check. We needn't check "get-config-addr-info"
  119. * and its variant since the old firmware probably support address
  120. * of domain/bus/slot/function for EEH RTAS operations.
  121. */
  122. if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE ||
  123. ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE ||
  124. (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
  125. ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
  126. ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE ||
  127. ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
  128. pr_info("EEH functionality not supported\n");
  129. return -EINVAL;
  130. }
  131. /* Initialize error log lock and size */
  132. spin_lock_init(&slot_errbuf_lock);
  133. eeh_error_buf_size = rtas_token("rtas-error-log-max");
  134. if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
  135. pr_info("%s: unknown EEH error log size\n",
  136. __func__);
  137. eeh_error_buf_size = 1024;
  138. } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
  139. pr_info("%s: EEH error log size %d exceeds the maximal %d\n",
  140. __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
  141. eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
  142. }
  143. /* Set EEH probe mode */
  144. eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG);
  145. #ifdef CONFIG_PCI_IOV
  146. /* Set EEH machine dependent code */
  147. ppc_md.pcibios_bus_add_device = pseries_pcibios_bus_add_device;
  148. #endif
  149. return 0;
  150. }
  151. static int pseries_eeh_cap_start(struct pci_dn *pdn)
  152. {
  153. u32 status;
  154. if (!pdn)
  155. return 0;
  156. rtas_read_config(pdn, PCI_STATUS, 2, &status);
  157. if (!(status & PCI_STATUS_CAP_LIST))
  158. return 0;
  159. return PCI_CAPABILITY_LIST;
  160. }
  161. static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap)
  162. {
  163. int pos = pseries_eeh_cap_start(pdn);
  164. int cnt = 48; /* Maximal number of capabilities */
  165. u32 id;
  166. if (!pos)
  167. return 0;
  168. while (cnt--) {
  169. rtas_read_config(pdn, pos, 1, &pos);
  170. if (pos < 0x40)
  171. break;
  172. pos &= ~3;
  173. rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
  174. if (id == 0xff)
  175. break;
  176. if (id == cap)
  177. return pos;
  178. pos += PCI_CAP_LIST_NEXT;
  179. }
  180. return 0;
  181. }
  182. static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap)
  183. {
  184. struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
  185. u32 header;
  186. int pos = 256;
  187. int ttl = (4096 - 256) / 8;
  188. if (!edev || !edev->pcie_cap)
  189. return 0;
  190. if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
  191. return 0;
  192. else if (!header)
  193. return 0;
  194. while (ttl-- > 0) {
  195. if (PCI_EXT_CAP_ID(header) == cap && pos)
  196. return pos;
  197. pos = PCI_EXT_CAP_NEXT(header);
  198. if (pos < 256)
  199. break;
  200. if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
  201. break;
  202. }
  203. return 0;
  204. }
  205. /**
  206. * pseries_eeh_probe - EEH probe on the given device
  207. * @pdn: PCI device node
  208. * @data: Unused
  209. *
  210. * When EEH module is installed during system boot, all PCI devices
  211. * are checked one by one to see if it supports EEH. The function
  212. * is introduced for the purpose.
  213. */
  214. static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
  215. {
  216. struct eeh_dev *edev;
  217. struct eeh_pe pe;
  218. u32 pcie_flags;
  219. int enable = 0;
  220. int ret;
  221. /* Retrieve OF node and eeh device */
  222. edev = pdn_to_eeh_dev(pdn);
  223. if (!edev || edev->pe)
  224. return NULL;
  225. /* Check class/vendor/device IDs */
  226. if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code)
  227. return NULL;
  228. /* Skip for PCI-ISA bridge */
  229. if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
  230. return NULL;
  231. /*
  232. * Update class code and mode of eeh device. We need
  233. * correctly reflects that current device is root port
  234. * or PCIe switch downstream port.
  235. */
  236. edev->class_code = pdn->class_code;
  237. edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
  238. edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
  239. edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
  240. edev->mode &= 0xFFFFFF00;
  241. if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
  242. edev->mode |= EEH_DEV_BRIDGE;
  243. if (edev->pcie_cap) {
  244. rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
  245. 2, &pcie_flags);
  246. pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
  247. if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
  248. edev->mode |= EEH_DEV_ROOT_PORT;
  249. else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
  250. edev->mode |= EEH_DEV_DS_PORT;
  251. }
  252. }
  253. /* Initialize the fake PE */
  254. memset(&pe, 0, sizeof(struct eeh_pe));
  255. pe.phb = pdn->phb;
  256. pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
  257. /* Enable EEH on the device */
  258. ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
  259. if (!ret) {
  260. /* Retrieve PE address */
  261. edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
  262. pe.addr = edev->pe_config_addr;
  263. /* Some older systems (Power4) allow the ibm,set-eeh-option
  264. * call to succeed even on nodes where EEH is not supported.
  265. * Verify support explicitly.
  266. */
  267. ret = eeh_ops->get_state(&pe, NULL);
  268. if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT)
  269. enable = 1;
  270. if (enable) {
  271. eeh_add_flag(EEH_ENABLED);
  272. eeh_add_to_parent_pe(edev);
  273. pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%x-PE#%x\n",
  274. __func__, pdn->busno, PCI_SLOT(pdn->devfn),
  275. PCI_FUNC(pdn->devfn), pe.phb->global_number,
  276. pe.addr);
  277. } else if (pdn->parent && pdn_to_eeh_dev(pdn->parent) &&
  278. (pdn_to_eeh_dev(pdn->parent))->pe) {
  279. /* This device doesn't support EEH, but it may have an
  280. * EEH parent, in which case we mark it as supported.
  281. */
  282. edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr;
  283. eeh_add_to_parent_pe(edev);
  284. }
  285. }
  286. /* Save memory bars */
  287. eeh_save_bars(edev);
  288. return NULL;
  289. }
  290. /**
  291. * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable
  292. * @pe: EEH PE
  293. * @option: operation to be issued
  294. *
  295. * The function is used to control the EEH functionality globally.
  296. * Currently, following options are support according to PAPR:
  297. * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
  298. */
  299. static int pseries_eeh_set_option(struct eeh_pe *pe, int option)
  300. {
  301. int ret = 0;
  302. int config_addr;
  303. /*
  304. * When we're enabling or disabling EEH functioality on
  305. * the particular PE, the PE config address is possibly
  306. * unavailable. Therefore, we have to figure it out from
  307. * the FDT node.
  308. */
  309. switch (option) {
  310. case EEH_OPT_DISABLE:
  311. case EEH_OPT_ENABLE:
  312. case EEH_OPT_THAW_MMIO:
  313. case EEH_OPT_THAW_DMA:
  314. config_addr = pe->config_addr;
  315. if (pe->addr)
  316. config_addr = pe->addr;
  317. break;
  318. case EEH_OPT_FREEZE_PE:
  319. /* Not support */
  320. return 0;
  321. default:
  322. pr_err("%s: Invalid option %d\n",
  323. __func__, option);
  324. return -EINVAL;
  325. }
  326. ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
  327. config_addr, BUID_HI(pe->phb->buid),
  328. BUID_LO(pe->phb->buid), option);
  329. return ret;
  330. }
  331. /**
  332. * pseries_eeh_get_pe_addr - Retrieve PE address
  333. * @pe: EEH PE
  334. *
  335. * Retrieve the assocated PE address. Actually, there're 2 RTAS
  336. * function calls dedicated for the purpose. We need implement
  337. * it through the new function and then the old one. Besides,
  338. * you should make sure the config address is figured out from
  339. * FDT node before calling the function.
  340. *
  341. * It's notable that zero'ed return value means invalid PE config
  342. * address.
  343. */
  344. static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
  345. {
  346. int ret = 0;
  347. int rets[3];
  348. if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
  349. /*
  350. * First of all, we need to make sure there has one PE
  351. * associated with the device. Otherwise, PE address is
  352. * meaningless.
  353. */
  354. ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
  355. pe->config_addr, BUID_HI(pe->phb->buid),
  356. BUID_LO(pe->phb->buid), 1);
  357. if (ret || (rets[0] == 0))
  358. return 0;
  359. /* Retrieve the associated PE config address */
  360. ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
  361. pe->config_addr, BUID_HI(pe->phb->buid),
  362. BUID_LO(pe->phb->buid), 0);
  363. if (ret) {
  364. pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
  365. __func__, pe->phb->global_number, pe->config_addr);
  366. return 0;
  367. }
  368. return rets[0];
  369. }
  370. if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
  371. ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets,
  372. pe->config_addr, BUID_HI(pe->phb->buid),
  373. BUID_LO(pe->phb->buid), 0);
  374. if (ret) {
  375. pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
  376. __func__, pe->phb->global_number, pe->config_addr);
  377. return 0;
  378. }
  379. return rets[0];
  380. }
  381. return ret;
  382. }
  383. /**
  384. * pseries_eeh_get_state - Retrieve PE state
  385. * @pe: EEH PE
  386. * @state: return value
  387. *
  388. * Retrieve the state of the specified PE. On RTAS compliant
  389. * pseries platform, there already has one dedicated RTAS function
  390. * for the purpose. It's notable that the associated PE config address
  391. * might be ready when calling the function. Therefore, endeavour to
  392. * use the PE config address if possible. Further more, there're 2
  393. * RTAS calls for the purpose, we need to try the new one and back
  394. * to the old one if the new one couldn't work properly.
  395. */
  396. static int pseries_eeh_get_state(struct eeh_pe *pe, int *state)
  397. {
  398. int config_addr;
  399. int ret;
  400. int rets[4];
  401. int result;
  402. /* Figure out PE config address if possible */
  403. config_addr = pe->config_addr;
  404. if (pe->addr)
  405. config_addr = pe->addr;
  406. if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
  407. ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets,
  408. config_addr, BUID_HI(pe->phb->buid),
  409. BUID_LO(pe->phb->buid));
  410. } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) {
  411. /* Fake PE unavailable info */
  412. rets[2] = 0;
  413. ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
  414. config_addr, BUID_HI(pe->phb->buid),
  415. BUID_LO(pe->phb->buid));
  416. } else {
  417. return EEH_STATE_NOT_SUPPORT;
  418. }
  419. if (ret)
  420. return ret;
  421. /* Parse the result out */
  422. if (!rets[1])
  423. return EEH_STATE_NOT_SUPPORT;
  424. switch(rets[0]) {
  425. case 0:
  426. result = EEH_STATE_MMIO_ACTIVE |
  427. EEH_STATE_DMA_ACTIVE;
  428. break;
  429. case 1:
  430. result = EEH_STATE_RESET_ACTIVE |
  431. EEH_STATE_MMIO_ACTIVE |
  432. EEH_STATE_DMA_ACTIVE;
  433. break;
  434. case 2:
  435. result = 0;
  436. break;
  437. case 4:
  438. result = EEH_STATE_MMIO_ENABLED;
  439. break;
  440. case 5:
  441. if (rets[2]) {
  442. if (state) *state = rets[2];
  443. result = EEH_STATE_UNAVAILABLE;
  444. } else {
  445. result = EEH_STATE_NOT_SUPPORT;
  446. }
  447. break;
  448. default:
  449. result = EEH_STATE_NOT_SUPPORT;
  450. }
  451. return result;
  452. }
  453. /**
  454. * pseries_eeh_reset - Reset the specified PE
  455. * @pe: EEH PE
  456. * @option: reset option
  457. *
  458. * Reset the specified PE
  459. */
  460. static int pseries_eeh_reset(struct eeh_pe *pe, int option)
  461. {
  462. int config_addr;
  463. int ret;
  464. /* Figure out PE address */
  465. config_addr = pe->config_addr;
  466. if (pe->addr)
  467. config_addr = pe->addr;
  468. /* Reset PE through RTAS call */
  469. ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
  470. config_addr, BUID_HI(pe->phb->buid),
  471. BUID_LO(pe->phb->buid), option);
  472. /* If fundamental-reset not supported, try hot-reset */
  473. if (option == EEH_RESET_FUNDAMENTAL &&
  474. ret == -8) {
  475. option = EEH_RESET_HOT;
  476. ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
  477. config_addr, BUID_HI(pe->phb->buid),
  478. BUID_LO(pe->phb->buid), option);
  479. }
  480. /* We need reset hold or settlement delay */
  481. if (option == EEH_RESET_FUNDAMENTAL ||
  482. option == EEH_RESET_HOT)
  483. msleep(EEH_PE_RST_HOLD_TIME);
  484. else
  485. msleep(EEH_PE_RST_SETTLE_TIME);
  486. return ret;
  487. }
  488. /**
  489. * pseries_eeh_wait_state - Wait for PE state
  490. * @pe: EEH PE
  491. * @max_wait: maximal period in millisecond
  492. *
  493. * Wait for the state of associated PE. It might take some time
  494. * to retrieve the PE's state.
  495. */
  496. static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait)
  497. {
  498. int ret;
  499. int mwait;
  500. /*
  501. * According to PAPR, the state of PE might be temporarily
  502. * unavailable. Under the circumstance, we have to wait
  503. * for indicated time determined by firmware. The maximal
  504. * wait time is 5 minutes, which is acquired from the original
  505. * EEH implementation. Also, the original implementation
  506. * also defined the minimal wait time as 1 second.
  507. */
  508. #define EEH_STATE_MIN_WAIT_TIME (1000)
  509. #define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
  510. while (1) {
  511. ret = pseries_eeh_get_state(pe, &mwait);
  512. /*
  513. * If the PE's state is temporarily unavailable,
  514. * we have to wait for the specified time. Otherwise,
  515. * the PE's state will be returned immediately.
  516. */
  517. if (ret != EEH_STATE_UNAVAILABLE)
  518. return ret;
  519. if (max_wait <= 0) {
  520. pr_warn("%s: Timeout when getting PE's state (%d)\n",
  521. __func__, max_wait);
  522. return EEH_STATE_NOT_SUPPORT;
  523. }
  524. if (mwait <= 0) {
  525. pr_warn("%s: Firmware returned bad wait value %d\n",
  526. __func__, mwait);
  527. mwait = EEH_STATE_MIN_WAIT_TIME;
  528. } else if (mwait > EEH_STATE_MAX_WAIT_TIME) {
  529. pr_warn("%s: Firmware returned too long wait value %d\n",
  530. __func__, mwait);
  531. mwait = EEH_STATE_MAX_WAIT_TIME;
  532. }
  533. max_wait -= mwait;
  534. msleep(mwait);
  535. }
  536. return EEH_STATE_NOT_SUPPORT;
  537. }
  538. /**
  539. * pseries_eeh_get_log - Retrieve error log
  540. * @pe: EEH PE
  541. * @severity: temporary or permanent error log
  542. * @drv_log: driver log to be combined with retrieved error log
  543. * @len: length of driver log
  544. *
  545. * Retrieve the temporary or permanent error from the PE.
  546. * Actually, the error will be retrieved through the dedicated
  547. * RTAS call.
  548. */
  549. static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len)
  550. {
  551. int config_addr;
  552. unsigned long flags;
  553. int ret;
  554. spin_lock_irqsave(&slot_errbuf_lock, flags);
  555. memset(slot_errbuf, 0, eeh_error_buf_size);
  556. /* Figure out the PE address */
  557. config_addr = pe->config_addr;
  558. if (pe->addr)
  559. config_addr = pe->addr;
  560. ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr,
  561. BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid),
  562. virt_to_phys(drv_log), len,
  563. virt_to_phys(slot_errbuf), eeh_error_buf_size,
  564. severity);
  565. if (!ret)
  566. log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
  567. spin_unlock_irqrestore(&slot_errbuf_lock, flags);
  568. return ret;
  569. }
  570. /**
  571. * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE
  572. * @pe: EEH PE
  573. *
  574. * The function will be called to reconfigure the bridges included
  575. * in the specified PE so that the mulfunctional PE would be recovered
  576. * again.
  577. */
  578. static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
  579. {
  580. int config_addr;
  581. int ret;
  582. /* Waiting 0.2s maximum before skipping configuration */
  583. int max_wait = 200;
  584. /* Figure out the PE address */
  585. config_addr = pe->config_addr;
  586. if (pe->addr)
  587. config_addr = pe->addr;
  588. while (max_wait > 0) {
  589. ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
  590. config_addr, BUID_HI(pe->phb->buid),
  591. BUID_LO(pe->phb->buid));
  592. if (!ret)
  593. return ret;
  594. /*
  595. * If RTAS returns a delay value that's above 100ms, cut it
  596. * down to 100ms in case firmware made a mistake. For more
  597. * on how these delay values work see rtas_busy_delay_time
  598. */
  599. if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
  600. ret <= RTAS_EXTENDED_DELAY_MAX)
  601. ret = RTAS_EXTENDED_DELAY_MIN+2;
  602. max_wait -= rtas_busy_delay_time(ret);
  603. if (max_wait < 0)
  604. break;
  605. rtas_busy_delay(ret);
  606. }
  607. pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n",
  608. __func__, pe->phb->global_number, pe->addr, ret);
  609. return ret;
  610. }
  611. /**
  612. * pseries_eeh_read_config - Read PCI config space
  613. * @pdn: PCI device node
  614. * @where: PCI address
  615. * @size: size to read
  616. * @val: return value
  617. *
  618. * Read config space from the speicifed device
  619. */
  620. static int pseries_eeh_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
  621. {
  622. return rtas_read_config(pdn, where, size, val);
  623. }
  624. /**
  625. * pseries_eeh_write_config - Write PCI config space
  626. * @pdn: PCI device node
  627. * @where: PCI address
  628. * @size: size to write
  629. * @val: value to be written
  630. *
  631. * Write config space to the specified device
  632. */
  633. static int pseries_eeh_write_config(struct pci_dn *pdn, int where, int size, u32 val)
  634. {
  635. return rtas_write_config(pdn, where, size, val);
  636. }
  637. static int pseries_eeh_restore_config(struct pci_dn *pdn)
  638. {
  639. struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
  640. s64 ret = 0;
  641. if (!edev)
  642. return -EEXIST;
  643. /*
  644. * FIXME: The MPS, error routing rules, timeout setting are worthy
  645. * to be exported by firmware in extendible way.
  646. */
  647. if (edev->physfn)
  648. ret = eeh_restore_vf_config(pdn);
  649. if (ret) {
  650. pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
  651. __func__, edev->pe_config_addr, ret);
  652. return -EIO;
  653. }
  654. return ret;
  655. }
  656. #ifdef CONFIG_PCI_IOV
  657. int pseries_send_allow_unfreeze(struct pci_dn *pdn,
  658. u16 *vf_pe_array, int cur_vfs)
  659. {
  660. int rc;
  661. int ibm_allow_unfreeze = rtas_token("ibm,open-sriov-allow-unfreeze");
  662. unsigned long buid, addr;
  663. addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
  664. buid = pdn->phb->buid;
  665. spin_lock(&rtas_data_buf_lock);
  666. memcpy(rtas_data_buf, vf_pe_array, RTAS_DATA_BUF_SIZE);
  667. rc = rtas_call(ibm_allow_unfreeze, 5, 1, NULL,
  668. addr,
  669. BUID_HI(buid),
  670. BUID_LO(buid),
  671. rtas_data_buf, cur_vfs * sizeof(u16));
  672. spin_unlock(&rtas_data_buf_lock);
  673. if (rc)
  674. pr_warn("%s: Failed to allow unfreeze for PHB#%x-PE#%lx, rc=%x\n",
  675. __func__,
  676. pdn->phb->global_number, addr, rc);
  677. return rc;
  678. }
  679. static int pseries_call_allow_unfreeze(struct eeh_dev *edev)
  680. {
  681. struct pci_dn *pdn, *tmp, *parent, *physfn_pdn;
  682. int cur_vfs = 0, rc = 0, vf_index, bus, devfn;
  683. u16 *vf_pe_array;
  684. vf_pe_array = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
  685. if (!vf_pe_array)
  686. return -ENOMEM;
  687. if (pci_num_vf(edev->physfn ? edev->physfn : edev->pdev)) {
  688. if (edev->pdev->is_physfn) {
  689. cur_vfs = pci_num_vf(edev->pdev);
  690. pdn = eeh_dev_to_pdn(edev);
  691. parent = pdn->parent;
  692. for (vf_index = 0; vf_index < cur_vfs; vf_index++)
  693. vf_pe_array[vf_index] =
  694. cpu_to_be16(pdn->pe_num_map[vf_index]);
  695. rc = pseries_send_allow_unfreeze(pdn, vf_pe_array,
  696. cur_vfs);
  697. pdn->last_allow_rc = rc;
  698. for (vf_index = 0; vf_index < cur_vfs; vf_index++) {
  699. list_for_each_entry_safe(pdn, tmp,
  700. &parent->child_list,
  701. list) {
  702. bus = pci_iov_virtfn_bus(edev->pdev,
  703. vf_index);
  704. devfn = pci_iov_virtfn_devfn(edev->pdev,
  705. vf_index);
  706. if (pdn->busno != bus ||
  707. pdn->devfn != devfn)
  708. continue;
  709. pdn->last_allow_rc = rc;
  710. }
  711. }
  712. } else {
  713. pdn = pci_get_pdn(edev->pdev);
  714. vf_pe_array[0] = cpu_to_be16(pdn->pe_number);
  715. physfn_pdn = pci_get_pdn(edev->physfn);
  716. rc = pseries_send_allow_unfreeze(physfn_pdn,
  717. vf_pe_array, 1);
  718. pdn->last_allow_rc = rc;
  719. }
  720. }
  721. kfree(vf_pe_array);
  722. return rc;
  723. }
  724. static int pseries_notify_resume(struct pci_dn *pdn)
  725. {
  726. struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
  727. if (!edev)
  728. return -EEXIST;
  729. if (rtas_token("ibm,open-sriov-allow-unfreeze")
  730. == RTAS_UNKNOWN_SERVICE)
  731. return -EINVAL;
  732. if (edev->pdev->is_physfn || edev->pdev->is_virtfn)
  733. return pseries_call_allow_unfreeze(edev);
  734. return 0;
  735. }
  736. #endif
  737. static struct eeh_ops pseries_eeh_ops = {
  738. .name = "pseries",
  739. .init = pseries_eeh_init,
  740. .probe = pseries_eeh_probe,
  741. .set_option = pseries_eeh_set_option,
  742. .get_pe_addr = pseries_eeh_get_pe_addr,
  743. .get_state = pseries_eeh_get_state,
  744. .reset = pseries_eeh_reset,
  745. .wait_state = pseries_eeh_wait_state,
  746. .get_log = pseries_eeh_get_log,
  747. .configure_bridge = pseries_eeh_configure_bridge,
  748. .err_inject = NULL,
  749. .read_config = pseries_eeh_read_config,
  750. .write_config = pseries_eeh_write_config,
  751. .next_error = NULL,
  752. .restore_config = pseries_eeh_restore_config,
  753. #ifdef CONFIG_PCI_IOV
  754. .notify_resume = pseries_notify_resume
  755. #endif
  756. };
  757. /**
  758. * eeh_pseries_init - Register platform dependent EEH operations
  759. *
  760. * EEH initialization on pseries platform. This function should be
  761. * called before any EEH related functions.
  762. */
  763. static int __init eeh_pseries_init(void)
  764. {
  765. int ret;
  766. ret = eeh_ops_register(&pseries_eeh_ops);
  767. if (!ret)
  768. pr_info("EEH: pSeries platform initialized\n");
  769. else
  770. pr_info("EEH: pSeries platform initialization failure (%d)\n",
  771. ret);
  772. return ret;
  773. }
  774. machine_early_initcall(pseries, eeh_pseries_init);