pci.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129
  1. /*
  2. * Support PCI/PCIe on PowerNV platforms
  3. *
  4. * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/pci.h>
  13. #include <linux/delay.h>
  14. #include <linux/string.h>
  15. #include <linux/init.h>
  16. #include <linux/irq.h>
  17. #include <linux/io.h>
  18. #include <linux/msi.h>
  19. #include <linux/iommu.h>
  20. #include <linux/sched/mm.h>
  21. #include <asm/sections.h>
  22. #include <asm/io.h>
  23. #include <asm/prom.h>
  24. #include <asm/pci-bridge.h>
  25. #include <asm/machdep.h>
  26. #include <asm/msi_bitmap.h>
  27. #include <asm/ppc-pci.h>
  28. #include <asm/pnv-pci.h>
  29. #include <asm/opal.h>
  30. #include <asm/iommu.h>
  31. #include <asm/tce.h>
  32. #include <asm/firmware.h>
  33. #include <asm/eeh_event.h>
  34. #include <asm/eeh.h>
  35. #include "powernv.h"
  36. #include "pci.h"
  37. static DEFINE_MUTEX(p2p_mutex);
  38. static DEFINE_MUTEX(tunnel_mutex);
  39. int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
  40. {
  41. struct device_node *parent = np;
  42. u32 bdfn;
  43. u64 phbid;
  44. int ret;
  45. ret = of_property_read_u32(np, "reg", &bdfn);
  46. if (ret)
  47. return -ENXIO;
  48. bdfn = ((bdfn & 0x00ffff00) >> 8);
  49. while ((parent = of_get_parent(parent))) {
  50. if (!PCI_DN(parent)) {
  51. of_node_put(parent);
  52. break;
  53. }
  54. if (!of_device_is_compatible(parent, "ibm,ioda2-phb")) {
  55. of_node_put(parent);
  56. continue;
  57. }
  58. ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid);
  59. if (ret) {
  60. of_node_put(parent);
  61. return -ENXIO;
  62. }
  63. *id = PCI_SLOT_ID(phbid, bdfn);
  64. return 0;
  65. }
  66. return -ENODEV;
  67. }
  68. EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id);
  69. int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len)
  70. {
  71. int64_t rc;
  72. if (!opal_check_token(OPAL_GET_DEVICE_TREE))
  73. return -ENXIO;
  74. rc = opal_get_device_tree(phandle, (uint64_t)buf, len);
  75. if (rc < OPAL_SUCCESS)
  76. return -EIO;
  77. return rc;
  78. }
  79. EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree);
  80. int pnv_pci_get_presence_state(uint64_t id, uint8_t *state)
  81. {
  82. int64_t rc;
  83. if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE))
  84. return -ENXIO;
  85. rc = opal_pci_get_presence_state(id, (uint64_t)state);
  86. if (rc != OPAL_SUCCESS)
  87. return -EIO;
  88. return 0;
  89. }
  90. EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state);
  91. int pnv_pci_get_power_state(uint64_t id, uint8_t *state)
  92. {
  93. int64_t rc;
  94. if (!opal_check_token(OPAL_PCI_GET_POWER_STATE))
  95. return -ENXIO;
  96. rc = opal_pci_get_power_state(id, (uint64_t)state);
  97. if (rc != OPAL_SUCCESS)
  98. return -EIO;
  99. return 0;
  100. }
  101. EXPORT_SYMBOL_GPL(pnv_pci_get_power_state);
  102. int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg)
  103. {
  104. struct opal_msg m;
  105. int token, ret;
  106. int64_t rc;
  107. if (!opal_check_token(OPAL_PCI_SET_POWER_STATE))
  108. return -ENXIO;
  109. token = opal_async_get_token_interruptible();
  110. if (unlikely(token < 0))
  111. return token;
  112. rc = opal_pci_set_power_state(token, id, (uint64_t)&state);
  113. if (rc == OPAL_SUCCESS) {
  114. ret = 0;
  115. goto exit;
  116. } else if (rc != OPAL_ASYNC_COMPLETION) {
  117. ret = -EIO;
  118. goto exit;
  119. }
  120. ret = opal_async_wait_response(token, &m);
  121. if (ret < 0)
  122. goto exit;
  123. if (msg) {
  124. ret = 1;
  125. memcpy(msg, &m, sizeof(m));
  126. }
  127. exit:
  128. opal_async_release_token(token);
  129. return ret;
  130. }
  131. EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
  132. #ifdef CONFIG_PCI_MSI
  133. int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  134. {
  135. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  136. struct pnv_phb *phb = hose->private_data;
  137. struct msi_desc *entry;
  138. struct msi_msg msg;
  139. int hwirq;
  140. unsigned int virq;
  141. int rc;
  142. if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
  143. return -ENODEV;
  144. if (pdev->no_64bit_msi && !phb->msi32_support)
  145. return -ENODEV;
  146. for_each_pci_msi_entry(entry, pdev) {
  147. if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
  148. pr_warn("%s: Supports only 64-bit MSIs\n",
  149. pci_name(pdev));
  150. return -ENXIO;
  151. }
  152. hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
  153. if (hwirq < 0) {
  154. pr_warn("%s: Failed to find a free MSI\n",
  155. pci_name(pdev));
  156. return -ENOSPC;
  157. }
  158. virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
  159. if (!virq) {
  160. pr_warn("%s: Failed to map MSI to linux irq\n",
  161. pci_name(pdev));
  162. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
  163. return -ENOMEM;
  164. }
  165. rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
  166. virq, entry->msi_attrib.is_64, &msg);
  167. if (rc) {
  168. pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
  169. irq_dispose_mapping(virq);
  170. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
  171. return rc;
  172. }
  173. irq_set_msi_desc(virq, entry);
  174. pci_write_msi_msg(virq, &msg);
  175. }
  176. return 0;
  177. }
  178. void pnv_teardown_msi_irqs(struct pci_dev *pdev)
  179. {
  180. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  181. struct pnv_phb *phb = hose->private_data;
  182. struct msi_desc *entry;
  183. irq_hw_number_t hwirq;
  184. if (WARN_ON(!phb))
  185. return;
  186. for_each_pci_msi_entry(entry, pdev) {
  187. if (!entry->irq)
  188. continue;
  189. hwirq = virq_to_hw(entry->irq);
  190. irq_set_msi_desc(entry->irq, NULL);
  191. irq_dispose_mapping(entry->irq);
  192. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
  193. }
  194. }
  195. #endif /* CONFIG_PCI_MSI */
  196. /* Nicely print the contents of the PE State Tables (PEST). */
  197. static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size)
  198. {
  199. __be64 prevA = ULONG_MAX, prevB = ULONG_MAX;
  200. bool dup = false;
  201. int i;
  202. for (i = 0; i < pest_size; i++) {
  203. __be64 peA = be64_to_cpu(pestA[i]);
  204. __be64 peB = be64_to_cpu(pestB[i]);
  205. if (peA != prevA || peB != prevB) {
  206. if (dup) {
  207. pr_info("PE[..%03x] A/B: as above\n", i-1);
  208. dup = false;
  209. }
  210. prevA = peA;
  211. prevB = peB;
  212. if (peA & PNV_IODA_STOPPED_STATE ||
  213. peB & PNV_IODA_STOPPED_STATE)
  214. pr_info("PE[%03x] A/B: %016llx %016llx\n",
  215. i, peA, peB);
  216. } else if (!dup && (peA & PNV_IODA_STOPPED_STATE ||
  217. peB & PNV_IODA_STOPPED_STATE)) {
  218. dup = true;
  219. }
  220. }
  221. }
  222. static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
  223. struct OpalIoPhbErrorCommon *common)
  224. {
  225. struct OpalIoP7IOCPhbErrorData *data;
  226. data = (struct OpalIoP7IOCPhbErrorData *)common;
  227. pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
  228. hose->global_number, be32_to_cpu(common->version));
  229. if (data->brdgCtl)
  230. pr_info("brdgCtl: %08x\n",
  231. be32_to_cpu(data->brdgCtl));
  232. if (data->portStatusReg || data->rootCmplxStatus ||
  233. data->busAgentStatus)
  234. pr_info("UtlSts: %08x %08x %08x\n",
  235. be32_to_cpu(data->portStatusReg),
  236. be32_to_cpu(data->rootCmplxStatus),
  237. be32_to_cpu(data->busAgentStatus));
  238. if (data->deviceStatus || data->slotStatus ||
  239. data->linkStatus || data->devCmdStatus ||
  240. data->devSecStatus)
  241. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  242. be32_to_cpu(data->deviceStatus),
  243. be32_to_cpu(data->slotStatus),
  244. be32_to_cpu(data->linkStatus),
  245. be32_to_cpu(data->devCmdStatus),
  246. be32_to_cpu(data->devSecStatus));
  247. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  248. data->corrErrorStatus)
  249. pr_info("RootErrSts: %08x %08x %08x\n",
  250. be32_to_cpu(data->rootErrorStatus),
  251. be32_to_cpu(data->uncorrErrorStatus),
  252. be32_to_cpu(data->corrErrorStatus));
  253. if (data->tlpHdr1 || data->tlpHdr2 ||
  254. data->tlpHdr3 || data->tlpHdr4)
  255. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  256. be32_to_cpu(data->tlpHdr1),
  257. be32_to_cpu(data->tlpHdr2),
  258. be32_to_cpu(data->tlpHdr3),
  259. be32_to_cpu(data->tlpHdr4));
  260. if (data->sourceId || data->errorClass ||
  261. data->correlator)
  262. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  263. be32_to_cpu(data->sourceId),
  264. be64_to_cpu(data->errorClass),
  265. be64_to_cpu(data->correlator));
  266. if (data->p7iocPlssr || data->p7iocCsr)
  267. pr_info("PhbSts: %016llx %016llx\n",
  268. be64_to_cpu(data->p7iocPlssr),
  269. be64_to_cpu(data->p7iocCsr));
  270. if (data->lemFir)
  271. pr_info("Lem: %016llx %016llx %016llx\n",
  272. be64_to_cpu(data->lemFir),
  273. be64_to_cpu(data->lemErrorMask),
  274. be64_to_cpu(data->lemWOF));
  275. if (data->phbErrorStatus)
  276. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  277. be64_to_cpu(data->phbErrorStatus),
  278. be64_to_cpu(data->phbFirstErrorStatus),
  279. be64_to_cpu(data->phbErrorLog0),
  280. be64_to_cpu(data->phbErrorLog1));
  281. if (data->mmioErrorStatus)
  282. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  283. be64_to_cpu(data->mmioErrorStatus),
  284. be64_to_cpu(data->mmioFirstErrorStatus),
  285. be64_to_cpu(data->mmioErrorLog0),
  286. be64_to_cpu(data->mmioErrorLog1));
  287. if (data->dma0ErrorStatus)
  288. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  289. be64_to_cpu(data->dma0ErrorStatus),
  290. be64_to_cpu(data->dma0FirstErrorStatus),
  291. be64_to_cpu(data->dma0ErrorLog0),
  292. be64_to_cpu(data->dma0ErrorLog1));
  293. if (data->dma1ErrorStatus)
  294. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  295. be64_to_cpu(data->dma1ErrorStatus),
  296. be64_to_cpu(data->dma1FirstErrorStatus),
  297. be64_to_cpu(data->dma1ErrorLog0),
  298. be64_to_cpu(data->dma1ErrorLog1));
  299. pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS);
  300. }
  301. static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
  302. struct OpalIoPhbErrorCommon *common)
  303. {
  304. struct OpalIoPhb3ErrorData *data;
  305. data = (struct OpalIoPhb3ErrorData*)common;
  306. pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
  307. hose->global_number, be32_to_cpu(common->version));
  308. if (data->brdgCtl)
  309. pr_info("brdgCtl: %08x\n",
  310. be32_to_cpu(data->brdgCtl));
  311. if (data->portStatusReg || data->rootCmplxStatus ||
  312. data->busAgentStatus)
  313. pr_info("UtlSts: %08x %08x %08x\n",
  314. be32_to_cpu(data->portStatusReg),
  315. be32_to_cpu(data->rootCmplxStatus),
  316. be32_to_cpu(data->busAgentStatus));
  317. if (data->deviceStatus || data->slotStatus ||
  318. data->linkStatus || data->devCmdStatus ||
  319. data->devSecStatus)
  320. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  321. be32_to_cpu(data->deviceStatus),
  322. be32_to_cpu(data->slotStatus),
  323. be32_to_cpu(data->linkStatus),
  324. be32_to_cpu(data->devCmdStatus),
  325. be32_to_cpu(data->devSecStatus));
  326. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  327. data->corrErrorStatus)
  328. pr_info("RootErrSts: %08x %08x %08x\n",
  329. be32_to_cpu(data->rootErrorStatus),
  330. be32_to_cpu(data->uncorrErrorStatus),
  331. be32_to_cpu(data->corrErrorStatus));
  332. if (data->tlpHdr1 || data->tlpHdr2 ||
  333. data->tlpHdr3 || data->tlpHdr4)
  334. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  335. be32_to_cpu(data->tlpHdr1),
  336. be32_to_cpu(data->tlpHdr2),
  337. be32_to_cpu(data->tlpHdr3),
  338. be32_to_cpu(data->tlpHdr4));
  339. if (data->sourceId || data->errorClass ||
  340. data->correlator)
  341. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  342. be32_to_cpu(data->sourceId),
  343. be64_to_cpu(data->errorClass),
  344. be64_to_cpu(data->correlator));
  345. if (data->nFir)
  346. pr_info("nFir: %016llx %016llx %016llx\n",
  347. be64_to_cpu(data->nFir),
  348. be64_to_cpu(data->nFirMask),
  349. be64_to_cpu(data->nFirWOF));
  350. if (data->phbPlssr || data->phbCsr)
  351. pr_info("PhbSts: %016llx %016llx\n",
  352. be64_to_cpu(data->phbPlssr),
  353. be64_to_cpu(data->phbCsr));
  354. if (data->lemFir)
  355. pr_info("Lem: %016llx %016llx %016llx\n",
  356. be64_to_cpu(data->lemFir),
  357. be64_to_cpu(data->lemErrorMask),
  358. be64_to_cpu(data->lemWOF));
  359. if (data->phbErrorStatus)
  360. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  361. be64_to_cpu(data->phbErrorStatus),
  362. be64_to_cpu(data->phbFirstErrorStatus),
  363. be64_to_cpu(data->phbErrorLog0),
  364. be64_to_cpu(data->phbErrorLog1));
  365. if (data->mmioErrorStatus)
  366. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  367. be64_to_cpu(data->mmioErrorStatus),
  368. be64_to_cpu(data->mmioFirstErrorStatus),
  369. be64_to_cpu(data->mmioErrorLog0),
  370. be64_to_cpu(data->mmioErrorLog1));
  371. if (data->dma0ErrorStatus)
  372. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  373. be64_to_cpu(data->dma0ErrorStatus),
  374. be64_to_cpu(data->dma0FirstErrorStatus),
  375. be64_to_cpu(data->dma0ErrorLog0),
  376. be64_to_cpu(data->dma0ErrorLog1));
  377. if (data->dma1ErrorStatus)
  378. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  379. be64_to_cpu(data->dma1ErrorStatus),
  380. be64_to_cpu(data->dma1FirstErrorStatus),
  381. be64_to_cpu(data->dma1ErrorLog0),
  382. be64_to_cpu(data->dma1ErrorLog1));
  383. pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS);
  384. }
  385. static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose,
  386. struct OpalIoPhbErrorCommon *common)
  387. {
  388. struct OpalIoPhb4ErrorData *data;
  389. data = (struct OpalIoPhb4ErrorData*)common;
  390. pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n",
  391. hose->global_number, be32_to_cpu(common->version));
  392. if (data->brdgCtl)
  393. pr_info("brdgCtl: %08x\n",
  394. be32_to_cpu(data->brdgCtl));
  395. if (data->deviceStatus || data->slotStatus ||
  396. data->linkStatus || data->devCmdStatus ||
  397. data->devSecStatus)
  398. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  399. be32_to_cpu(data->deviceStatus),
  400. be32_to_cpu(data->slotStatus),
  401. be32_to_cpu(data->linkStatus),
  402. be32_to_cpu(data->devCmdStatus),
  403. be32_to_cpu(data->devSecStatus));
  404. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  405. data->corrErrorStatus)
  406. pr_info("RootErrSts: %08x %08x %08x\n",
  407. be32_to_cpu(data->rootErrorStatus),
  408. be32_to_cpu(data->uncorrErrorStatus),
  409. be32_to_cpu(data->corrErrorStatus));
  410. if (data->tlpHdr1 || data->tlpHdr2 ||
  411. data->tlpHdr3 || data->tlpHdr4)
  412. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  413. be32_to_cpu(data->tlpHdr1),
  414. be32_to_cpu(data->tlpHdr2),
  415. be32_to_cpu(data->tlpHdr3),
  416. be32_to_cpu(data->tlpHdr4));
  417. if (data->sourceId)
  418. pr_info("sourceId: %08x\n", be32_to_cpu(data->sourceId));
  419. if (data->nFir)
  420. pr_info("nFir: %016llx %016llx %016llx\n",
  421. be64_to_cpu(data->nFir),
  422. be64_to_cpu(data->nFirMask),
  423. be64_to_cpu(data->nFirWOF));
  424. if (data->phbPlssr || data->phbCsr)
  425. pr_info("PhbSts: %016llx %016llx\n",
  426. be64_to_cpu(data->phbPlssr),
  427. be64_to_cpu(data->phbCsr));
  428. if (data->lemFir)
  429. pr_info("Lem: %016llx %016llx %016llx\n",
  430. be64_to_cpu(data->lemFir),
  431. be64_to_cpu(data->lemErrorMask),
  432. be64_to_cpu(data->lemWOF));
  433. if (data->phbErrorStatus)
  434. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  435. be64_to_cpu(data->phbErrorStatus),
  436. be64_to_cpu(data->phbFirstErrorStatus),
  437. be64_to_cpu(data->phbErrorLog0),
  438. be64_to_cpu(data->phbErrorLog1));
  439. if (data->phbTxeErrorStatus)
  440. pr_info("PhbTxeErr: %016llx %016llx %016llx %016llx\n",
  441. be64_to_cpu(data->phbTxeErrorStatus),
  442. be64_to_cpu(data->phbTxeFirstErrorStatus),
  443. be64_to_cpu(data->phbTxeErrorLog0),
  444. be64_to_cpu(data->phbTxeErrorLog1));
  445. if (data->phbRxeArbErrorStatus)
  446. pr_info("RxeArbErr: %016llx %016llx %016llx %016llx\n",
  447. be64_to_cpu(data->phbRxeArbErrorStatus),
  448. be64_to_cpu(data->phbRxeArbFirstErrorStatus),
  449. be64_to_cpu(data->phbRxeArbErrorLog0),
  450. be64_to_cpu(data->phbRxeArbErrorLog1));
  451. if (data->phbRxeMrgErrorStatus)
  452. pr_info("RxeMrgErr: %016llx %016llx %016llx %016llx\n",
  453. be64_to_cpu(data->phbRxeMrgErrorStatus),
  454. be64_to_cpu(data->phbRxeMrgFirstErrorStatus),
  455. be64_to_cpu(data->phbRxeMrgErrorLog0),
  456. be64_to_cpu(data->phbRxeMrgErrorLog1));
  457. if (data->phbRxeTceErrorStatus)
  458. pr_info("RxeTceErr: %016llx %016llx %016llx %016llx\n",
  459. be64_to_cpu(data->phbRxeTceErrorStatus),
  460. be64_to_cpu(data->phbRxeTceFirstErrorStatus),
  461. be64_to_cpu(data->phbRxeTceErrorLog0),
  462. be64_to_cpu(data->phbRxeTceErrorLog1));
  463. if (data->phbPblErrorStatus)
  464. pr_info("PblErr: %016llx %016llx %016llx %016llx\n",
  465. be64_to_cpu(data->phbPblErrorStatus),
  466. be64_to_cpu(data->phbPblFirstErrorStatus),
  467. be64_to_cpu(data->phbPblErrorLog0),
  468. be64_to_cpu(data->phbPblErrorLog1));
  469. if (data->phbPcieDlpErrorStatus)
  470. pr_info("PcieDlp: %016llx %016llx %016llx\n",
  471. be64_to_cpu(data->phbPcieDlpErrorLog1),
  472. be64_to_cpu(data->phbPcieDlpErrorLog2),
  473. be64_to_cpu(data->phbPcieDlpErrorStatus));
  474. if (data->phbRegbErrorStatus)
  475. pr_info("RegbErr: %016llx %016llx %016llx %016llx\n",
  476. be64_to_cpu(data->phbRegbErrorStatus),
  477. be64_to_cpu(data->phbRegbFirstErrorStatus),
  478. be64_to_cpu(data->phbRegbErrorLog0),
  479. be64_to_cpu(data->phbRegbErrorLog1));
  480. pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS);
  481. }
  482. void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
  483. unsigned char *log_buff)
  484. {
  485. struct OpalIoPhbErrorCommon *common;
  486. if (!hose || !log_buff)
  487. return;
  488. common = (struct OpalIoPhbErrorCommon *)log_buff;
  489. switch (be32_to_cpu(common->ioType)) {
  490. case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
  491. pnv_pci_dump_p7ioc_diag_data(hose, common);
  492. break;
  493. case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
  494. pnv_pci_dump_phb3_diag_data(hose, common);
  495. break;
  496. case OPAL_PHB_ERROR_DATA_TYPE_PHB4:
  497. pnv_pci_dump_phb4_diag_data(hose, common);
  498. break;
  499. default:
  500. pr_warn("%s: Unrecognized ioType %d\n",
  501. __func__, be32_to_cpu(common->ioType));
  502. }
  503. }
  504. static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
  505. {
  506. unsigned long flags, rc;
  507. int has_diag, ret = 0;
  508. spin_lock_irqsave(&phb->lock, flags);
  509. /* Fetch PHB diag-data */
  510. rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
  511. phb->diag_data_size);
  512. has_diag = (rc == OPAL_SUCCESS);
  513. /* If PHB supports compound PE, to handle it */
  514. if (phb->unfreeze_pe) {
  515. ret = phb->unfreeze_pe(phb,
  516. pe_no,
  517. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  518. } else {
  519. rc = opal_pci_eeh_freeze_clear(phb->opal_id,
  520. pe_no,
  521. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  522. if (rc) {
  523. pr_warn("%s: Failure %ld clearing frozen "
  524. "PHB#%x-PE#%x\n",
  525. __func__, rc, phb->hose->global_number,
  526. pe_no);
  527. ret = -EIO;
  528. }
  529. }
  530. /*
  531. * For now, let's only display the diag buffer when we fail to clear
  532. * the EEH status. We'll do more sensible things later when we have
  533. * proper EEH support. We need to make sure we don't pollute ourselves
  534. * with the normal errors generated when probing empty slots
  535. */
  536. if (has_diag && ret)
  537. pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
  538. spin_unlock_irqrestore(&phb->lock, flags);
  539. }
  540. static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
  541. {
  542. struct pnv_phb *phb = pdn->phb->private_data;
  543. u8 fstate = 0;
  544. __be16 pcierr = 0;
  545. unsigned int pe_no;
  546. s64 rc;
  547. /*
  548. * Get the PE#. During the PCI probe stage, we might not
  549. * setup that yet. So all ER errors should be mapped to
  550. * reserved PE.
  551. */
  552. pe_no = pdn->pe_number;
  553. if (pe_no == IODA_INVALID_PE) {
  554. pe_no = phb->ioda.reserved_pe_idx;
  555. }
  556. /*
  557. * Fetch frozen state. If the PHB support compound PE,
  558. * we need handle that case.
  559. */
  560. if (phb->get_pe_state) {
  561. fstate = phb->get_pe_state(phb, pe_no);
  562. } else {
  563. rc = opal_pci_eeh_freeze_status(phb->opal_id,
  564. pe_no,
  565. &fstate,
  566. &pcierr,
  567. NULL);
  568. if (rc) {
  569. pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
  570. __func__, rc, phb->hose->global_number, pe_no);
  571. return;
  572. }
  573. }
  574. pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n",
  575. (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
  576. /* Clear the frozen state if applicable */
  577. if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
  578. fstate == OPAL_EEH_STOPPED_DMA_FREEZE ||
  579. fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
  580. /*
  581. * If PHB supports compound PE, freeze it for
  582. * consistency.
  583. */
  584. if (phb->freeze_pe)
  585. phb->freeze_pe(phb, pe_no);
  586. pnv_pci_handle_eeh_config(phb, pe_no);
  587. }
  588. }
  589. int pnv_pci_cfg_read(struct pci_dn *pdn,
  590. int where, int size, u32 *val)
  591. {
  592. struct pnv_phb *phb = pdn->phb->private_data;
  593. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  594. s64 rc;
  595. switch (size) {
  596. case 1: {
  597. u8 v8;
  598. rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
  599. *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
  600. break;
  601. }
  602. case 2: {
  603. __be16 v16;
  604. rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
  605. &v16);
  606. *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
  607. break;
  608. }
  609. case 4: {
  610. __be32 v32;
  611. rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
  612. *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
  613. break;
  614. }
  615. default:
  616. return PCIBIOS_FUNC_NOT_SUPPORTED;
  617. }
  618. pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  619. __func__, pdn->busno, pdn->devfn, where, size, *val);
  620. return PCIBIOS_SUCCESSFUL;
  621. }
  622. int pnv_pci_cfg_write(struct pci_dn *pdn,
  623. int where, int size, u32 val)
  624. {
  625. struct pnv_phb *phb = pdn->phb->private_data;
  626. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  627. pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  628. __func__, pdn->busno, pdn->devfn, where, size, val);
  629. switch (size) {
  630. case 1:
  631. opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
  632. break;
  633. case 2:
  634. opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
  635. break;
  636. case 4:
  637. opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
  638. break;
  639. default:
  640. return PCIBIOS_FUNC_NOT_SUPPORTED;
  641. }
  642. return PCIBIOS_SUCCESSFUL;
  643. }
  644. #if CONFIG_EEH
  645. static bool pnv_pci_cfg_check(struct pci_dn *pdn)
  646. {
  647. struct eeh_dev *edev = NULL;
  648. struct pnv_phb *phb = pdn->phb->private_data;
  649. /* EEH not enabled ? */
  650. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  651. return true;
  652. /* PE reset or device removed ? */
  653. edev = pdn->edev;
  654. if (edev) {
  655. if (edev->pe &&
  656. (edev->pe->state & EEH_PE_CFG_BLOCKED))
  657. return false;
  658. if (edev->mode & EEH_DEV_REMOVED)
  659. return false;
  660. }
  661. return true;
  662. }
  663. #else
  664. static inline pnv_pci_cfg_check(struct pci_dn *pdn)
  665. {
  666. return true;
  667. }
  668. #endif /* CONFIG_EEH */
  669. static int pnv_pci_read_config(struct pci_bus *bus,
  670. unsigned int devfn,
  671. int where, int size, u32 *val)
  672. {
  673. struct pci_dn *pdn;
  674. struct pnv_phb *phb;
  675. int ret;
  676. *val = 0xFFFFFFFF;
  677. pdn = pci_get_pdn_by_devfn(bus, devfn);
  678. if (!pdn)
  679. return PCIBIOS_DEVICE_NOT_FOUND;
  680. if (!pnv_pci_cfg_check(pdn))
  681. return PCIBIOS_DEVICE_NOT_FOUND;
  682. ret = pnv_pci_cfg_read(pdn, where, size, val);
  683. phb = pdn->phb->private_data;
  684. if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
  685. if (*val == EEH_IO_ERROR_VALUE(size) &&
  686. eeh_dev_check_failure(pdn->edev))
  687. return PCIBIOS_DEVICE_NOT_FOUND;
  688. } else {
  689. pnv_pci_config_check_eeh(pdn);
  690. }
  691. return ret;
  692. }
  693. static int pnv_pci_write_config(struct pci_bus *bus,
  694. unsigned int devfn,
  695. int where, int size, u32 val)
  696. {
  697. struct pci_dn *pdn;
  698. struct pnv_phb *phb;
  699. int ret;
  700. pdn = pci_get_pdn_by_devfn(bus, devfn);
  701. if (!pdn)
  702. return PCIBIOS_DEVICE_NOT_FOUND;
  703. if (!pnv_pci_cfg_check(pdn))
  704. return PCIBIOS_DEVICE_NOT_FOUND;
  705. ret = pnv_pci_cfg_write(pdn, where, size, val);
  706. phb = pdn->phb->private_data;
  707. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  708. pnv_pci_config_check_eeh(pdn);
  709. return ret;
  710. }
  711. struct pci_ops pnv_pci_ops = {
  712. .read = pnv_pci_read_config,
  713. .write = pnv_pci_write_config,
  714. };
  715. struct iommu_table *pnv_pci_table_alloc(int nid)
  716. {
  717. struct iommu_table *tbl;
  718. tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
  719. if (!tbl)
  720. return NULL;
  721. INIT_LIST_HEAD_RCU(&tbl->it_group_list);
  722. kref_init(&tbl->it_kref);
  723. return tbl;
  724. }
  725. void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
  726. {
  727. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  728. struct pnv_phb *phb = hose->private_data;
  729. if (phb && phb->dma_dev_setup)
  730. phb->dma_dev_setup(phb, pdev);
  731. }
  732. void pnv_pci_dma_bus_setup(struct pci_bus *bus)
  733. {
  734. struct pci_controller *hose = bus->sysdata;
  735. struct pnv_phb *phb = hose->private_data;
  736. struct pnv_ioda_pe *pe;
  737. list_for_each_entry(pe, &phb->ioda.pe_list, list) {
  738. if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
  739. continue;
  740. if (!pe->pbus)
  741. continue;
  742. if (bus->number == ((pe->rid >> 8) & 0xFF)) {
  743. pe->pbus = bus;
  744. break;
  745. }
  746. }
  747. }
  748. int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target, u64 desc)
  749. {
  750. struct pci_controller *hose;
  751. struct pnv_phb *phb_init, *phb_target;
  752. struct pnv_ioda_pe *pe_init;
  753. int rc;
  754. if (!opal_check_token(OPAL_PCI_SET_P2P))
  755. return -ENXIO;
  756. hose = pci_bus_to_host(initiator->bus);
  757. phb_init = hose->private_data;
  758. hose = pci_bus_to_host(target->bus);
  759. phb_target = hose->private_data;
  760. pe_init = pnv_ioda_get_pe(initiator);
  761. if (!pe_init)
  762. return -ENODEV;
  763. /*
  764. * Configuring the initiator's PHB requires to adjust its
  765. * TVE#1 setting. Since the same device can be an initiator
  766. * several times for different target devices, we need to keep
  767. * a reference count to know when we can restore the default
  768. * bypass setting on its TVE#1 when disabling. Opal is not
  769. * tracking PE states, so we add a reference count on the PE
  770. * in linux.
  771. *
  772. * For the target, the configuration is per PHB, so we keep a
  773. * target reference count on the PHB.
  774. */
  775. mutex_lock(&p2p_mutex);
  776. if (desc & OPAL_PCI_P2P_ENABLE) {
  777. /* always go to opal to validate the configuration */
  778. rc = opal_pci_set_p2p(phb_init->opal_id, phb_target->opal_id,
  779. desc, pe_init->pe_number);
  780. if (rc != OPAL_SUCCESS) {
  781. rc = -EIO;
  782. goto out;
  783. }
  784. pe_init->p2p_initiator_count++;
  785. phb_target->p2p_target_count++;
  786. } else {
  787. if (!pe_init->p2p_initiator_count ||
  788. !phb_target->p2p_target_count) {
  789. rc = -EINVAL;
  790. goto out;
  791. }
  792. if (--pe_init->p2p_initiator_count == 0)
  793. pnv_pci_ioda2_set_bypass(pe_init, true);
  794. if (--phb_target->p2p_target_count == 0) {
  795. rc = opal_pci_set_p2p(phb_init->opal_id,
  796. phb_target->opal_id, desc,
  797. pe_init->pe_number);
  798. if (rc != OPAL_SUCCESS) {
  799. rc = -EIO;
  800. goto out;
  801. }
  802. }
  803. }
  804. rc = 0;
  805. out:
  806. mutex_unlock(&p2p_mutex);
  807. return rc;
  808. }
  809. EXPORT_SYMBOL_GPL(pnv_pci_set_p2p);
  810. struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
  811. {
  812. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  813. return of_node_get(hose->dn);
  814. }
  815. EXPORT_SYMBOL(pnv_pci_get_phb_node);
  816. int pnv_pci_enable_tunnel(struct pci_dev *dev, u64 *asnind)
  817. {
  818. struct device_node *np;
  819. const __be32 *prop;
  820. struct pnv_ioda_pe *pe;
  821. uint16_t window_id;
  822. int rc;
  823. if (!radix_enabled())
  824. return -ENXIO;
  825. if (!(np = pnv_pci_get_phb_node(dev)))
  826. return -ENXIO;
  827. prop = of_get_property(np, "ibm,phb-indications", NULL);
  828. of_node_put(np);
  829. if (!prop || !prop[1])
  830. return -ENXIO;
  831. *asnind = (u64)be32_to_cpu(prop[1]);
  832. pe = pnv_ioda_get_pe(dev);
  833. if (!pe)
  834. return -ENODEV;
  835. /* Increase real window size to accept as_notify messages. */
  836. window_id = (pe->pe_number << 1 ) + 1;
  837. rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, pe->pe_number,
  838. window_id, pe->tce_bypass_base,
  839. (uint64_t)1 << 48);
  840. return opal_error_code(rc);
  841. }
  842. EXPORT_SYMBOL_GPL(pnv_pci_enable_tunnel);
  843. int pnv_pci_disable_tunnel(struct pci_dev *dev)
  844. {
  845. struct pnv_ioda_pe *pe;
  846. pe = pnv_ioda_get_pe(dev);
  847. if (!pe)
  848. return -ENODEV;
  849. /* Restore default real window size. */
  850. pnv_pci_ioda2_set_bypass(pe, true);
  851. return 0;
  852. }
  853. EXPORT_SYMBOL_GPL(pnv_pci_disable_tunnel);
  854. int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
  855. {
  856. __be64 val;
  857. struct pci_controller *hose;
  858. struct pnv_phb *phb;
  859. u64 tunnel_bar;
  860. int rc;
  861. if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR))
  862. return -ENXIO;
  863. if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR))
  864. return -ENXIO;
  865. hose = pci_bus_to_host(dev->bus);
  866. phb = hose->private_data;
  867. mutex_lock(&tunnel_mutex);
  868. rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val);
  869. if (rc != OPAL_SUCCESS) {
  870. rc = -EIO;
  871. goto out;
  872. }
  873. tunnel_bar = be64_to_cpu(val);
  874. if (enable) {
  875. /*
  876. * Only one device per PHB can use atomics.
  877. * Our policy is first-come, first-served.
  878. */
  879. if (tunnel_bar) {
  880. if (tunnel_bar != addr)
  881. rc = -EBUSY;
  882. else
  883. rc = 0; /* Setting same address twice is ok */
  884. goto out;
  885. }
  886. } else {
  887. /*
  888. * The device that owns atomics and wants to release
  889. * them must pass the same address with enable == 0.
  890. */
  891. if (tunnel_bar != addr) {
  892. rc = -EPERM;
  893. goto out;
  894. }
  895. addr = 0x0ULL;
  896. }
  897. rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr);
  898. rc = opal_error_code(rc);
  899. out:
  900. mutex_unlock(&tunnel_mutex);
  901. return rc;
  902. }
  903. EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar);
  904. #ifdef CONFIG_PPC64 /* for thread.tidr */
  905. int pnv_pci_get_as_notify_info(struct task_struct *task, u32 *lpid, u32 *pid,
  906. u32 *tid)
  907. {
  908. struct mm_struct *mm = NULL;
  909. if (task == NULL)
  910. return -EINVAL;
  911. mm = get_task_mm(task);
  912. if (mm == NULL)
  913. return -EINVAL;
  914. *pid = mm->context.id;
  915. mmput(mm);
  916. *tid = task->thread.tidr;
  917. *lpid = mfspr(SPRN_LPID);
  918. return 0;
  919. }
  920. EXPORT_SYMBOL_GPL(pnv_pci_get_as_notify_info);
  921. #endif
  922. void pnv_pci_shutdown(void)
  923. {
  924. struct pci_controller *hose;
  925. list_for_each_entry(hose, &hose_list, list_node)
  926. if (hose->controller_ops.shutdown)
  927. hose->controller_ops.shutdown(hose);
  928. }
  929. /* Fixup wrong class code in p7ioc and p8 root complex */
  930. static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
  931. {
  932. dev->class = PCI_CLASS_BRIDGE_PCI << 8;
  933. }
  934. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
  935. void __init pnv_pci_init(void)
  936. {
  937. struct device_node *np;
  938. pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
  939. /* If we don't have OPAL, eg. in sim, just skip PCI probe */
  940. if (!firmware_has_feature(FW_FEATURE_OPAL))
  941. return;
  942. #ifdef CONFIG_PCIEPORTBUS
  943. /*
  944. * On PowerNV PCIe devices are (currently) managed in cooperation
  945. * with firmware. This isn't *strictly* required, but there's enough
  946. * assumptions baked into both firmware and the platform code that
  947. * it's unwise to allow the portbus services to be used.
  948. *
  949. * We need to fix this eventually, but for now set this flag to disable
  950. * the portbus driver. The AER service isn't required since that AER
  951. * events are handled via EEH. The pciehp hotplug driver can't work
  952. * without kernel changes (and portbus binding breaks pnv_php). The
  953. * other services also require some thinking about how we're going
  954. * to integrate them.
  955. */
  956. pcie_ports_disabled = true;
  957. #endif
  958. /* Look for IODA IO-Hubs. */
  959. for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
  960. pnv_pci_init_ioda_hub(np);
  961. }
  962. /* Look for ioda2 built-in PHB3's */
  963. for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
  964. pnv_pci_init_ioda2_phb(np);
  965. /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */
  966. for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
  967. pnv_pci_init_ioda2_phb(np);
  968. /* Look for NPU PHBs */
  969. for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
  970. pnv_pci_init_npu_phb(np);
  971. /*
  972. * Look for NPU2 PHBs which we treat mostly as NPU PHBs with
  973. * the exception of TCE kill which requires an OPAL call.
  974. */
  975. for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb")
  976. pnv_pci_init_npu_phb(np);
  977. /* Look for NPU2 OpenCAPI PHBs */
  978. for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb")
  979. pnv_pci_init_npu2_opencapi_phb(np);
  980. /* Configure IOMMU DMA hooks */
  981. set_pci_dma_ops(&dma_iommu_ops);
  982. }
  983. machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);