aiutils.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103
  1. /*
  2. * Misc utility routines for accessing chip-specific features
  3. * of the SiliconBackplane-based Broadcom chips.
  4. *
  5. * Portions of this code are copyright (c) 2020 Cypress Semiconductor Corporation
  6. *
  7. * Copyright (C) 1999-2020, Broadcom Corporation
  8. *
  9. * Unless you and Broadcom execute a separate written software license
  10. * agreement governing use of this software, this software is licensed to you
  11. * under the terms of the GNU General Public License version 2 (the "GPL"),
  12. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  13. * following added to such license:
  14. *
  15. * As a special exception, the copyright holders of this software give you
  16. * permission to link this software with independent modules, and to copy and
  17. * distribute the resulting executable under terms of your choice, provided that
  18. * you also meet, for each linked independent module, the terms and conditions of
  19. * the license of that module. An independent module is a module which is not
  20. * derived from this software. The special exception does not apply to any
  21. * modifications of the software.
  22. *
  23. * Notwithstanding the above, under no circumstances may you combine this
  24. * software in any way with any other Broadcom software provided under a license
  25. * other than the GPL, without Broadcom's express prior written consent.
  26. *
  27. *
  28. * <<Broadcom-WL-IPTag/Open:>>
  29. *
  30. * $Id: aiutils.c 701122 2017-05-23 19:32:45Z $
  31. */
  32. #include <bcm_cfg.h>
  33. #include <typedefs.h>
  34. #include <bcmdefs.h>
  35. #include <osl.h>
  36. #include <bcmutils.h>
  37. #include <siutils.h>
  38. #include <hndsoc.h>
  39. #include <sbchipc.h>
  40. #include <pcicfg.h>
  41. #include "siutils_priv.h"
  42. #include <bcmdevs.h>
  43. #define BCM53573_DMP() (0)
  44. #define BCM4707_DMP() (0)
  45. #define PMU_DMP() (0)
  46. #define GCI_DMP() (0)
  47. #if defined(BCM_BACKPLANE_TIMEOUT)
  48. static bool ai_get_apb_bridge(si_t *sih, uint32 coreidx, uint32 *apb_id, uint32 *apb_coreuinit);
  49. #endif /* BCM_BACKPLANE_TIMEOUT */
  50. #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
  51. static void ai_reset_axi_to(si_info_t *sii, aidmp_t *ai);
  52. #endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
  53. /* EROM parsing */
  54. static uint32
  55. get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
  56. {
  57. uint32 ent;
  58. uint inv = 0, nom = 0;
  59. uint32 size = 0;
  60. while (TRUE) {
  61. ent = R_REG(si_osh(sih), *eromptr);
  62. (*eromptr)++;
  63. if (mask == 0)
  64. break;
  65. if ((ent & ER_VALID) == 0) {
  66. inv++;
  67. continue;
  68. }
  69. if (ent == (ER_END | ER_VALID))
  70. break;
  71. if ((ent & mask) == match)
  72. break;
  73. /* escape condition related EROM size if it has invalid values */
  74. size += sizeof(*eromptr);
  75. if (size >= ER_SZ_MAX) {
  76. SI_ERROR(("Failed to find end of EROM marker\n"));
  77. break;
  78. }
  79. nom++;
  80. }
  81. SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
  82. if (inv + nom) {
  83. SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom));
  84. }
  85. return ent;
  86. }
  87. static uint32
  88. get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
  89. uint32 *sizel, uint32 *sizeh)
  90. {
  91. uint32 asd, sz, szd;
  92. BCM_REFERENCE(ad);
  93. asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
  94. if (((asd & ER_TAG1) != ER_ADD) ||
  95. (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
  96. ((asd & AD_ST_MASK) != st)) {
  97. /* This is not what we want, "push" it back */
  98. (*eromptr)--;
  99. return 0;
  100. }
  101. *addrl = asd & AD_ADDR_MASK;
  102. if (asd & AD_AG32)
  103. *addrh = get_erom_ent(sih, eromptr, 0, 0);
  104. else
  105. *addrh = 0;
  106. *sizeh = 0;
  107. sz = asd & AD_SZ_MASK;
  108. if (sz == AD_SZ_SZD) {
  109. szd = get_erom_ent(sih, eromptr, 0, 0);
  110. *sizel = szd & SD_SZ_MASK;
  111. if (szd & SD_SG32)
  112. *sizeh = get_erom_ent(sih, eromptr, 0, 0);
  113. } else
  114. *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
  115. SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
  116. sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
  117. return asd;
  118. }
  119. /* Parse the enumeration rom to identify all cores
  120. * Erom content format can be found in:
  121. * http://hwnbu-twiki.broadcom.com/twiki/pub/Mwgroup/ArmDocumentation/SystemDiscovery.pdf
  122. */
  123. void
  124. ai_scan(si_t *sih, void *regs, uint devid)
  125. {
  126. si_info_t *sii = SI_INFO(sih);
  127. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  128. chipcregs_t *cc = (chipcregs_t *)regs;
  129. uint32 erombase, *eromptr, *eromlim;
  130. axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
  131. BCM_REFERENCE(devid);
  132. erombase = R_REG(sii->osh, &cc->eromptr);
  133. switch (BUSTYPE(sih->bustype)) {
  134. case SI_BUS:
  135. eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
  136. break;
  137. case PCI_BUS:
  138. /* Set wrappers address */
  139. sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
  140. /* Now point the window at the erom */
  141. OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
  142. eromptr = regs;
  143. break;
  144. #ifdef BCMSDIO
  145. case SPI_BUS:
  146. case SDIO_BUS:
  147. eromptr = (uint32 *)(uintptr)erombase;
  148. break;
  149. #endif /* BCMSDIO */
  150. case PCMCIA_BUS:
  151. default:
  152. SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
  153. ASSERT(0);
  154. return;
  155. }
  156. eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
  157. sii->axi_num_wrappers = 0;
  158. SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
  159. OSL_OBFUSCATE_BUF(regs), erombase,
  160. OSL_OBFUSCATE_BUF(eromptr), OSL_OBFUSATE_BUF(eromlim)));
  161. while (eromptr < eromlim) {
  162. uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
  163. uint32 mpd, asd, addrl, addrh, sizel, sizeh;
  164. uint i, j, idx;
  165. bool br;
  166. br = FALSE;
  167. /* Grok a component */
  168. cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
  169. if (cia == (ER_END | ER_VALID)) {
  170. SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
  171. return;
  172. }
  173. cib = get_erom_ent(sih, &eromptr, 0, 0);
  174. if ((cib & ER_TAG) != ER_CI) {
  175. SI_ERROR(("CIA not followed by CIB\n"));
  176. goto error;
  177. }
  178. cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
  179. mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
  180. crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
  181. nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
  182. nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
  183. nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
  184. nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
  185. #ifdef BCMDBG_SI
  186. SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
  187. "nsw = %d, nmp = %d & nsp = %d\n",
  188. mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp, nsp));
  189. #else
  190. BCM_REFERENCE(crev);
  191. #endif // endif
  192. if (BCM4347_CHIP(sih->chip)) {
  193. /* 4347 has more entries for ARM core
  194. * This should apply to all chips but crashes on router
  195. * This is a temp fix to be further analyze
  196. */
  197. if (nsp == 0)
  198. continue;
  199. } else
  200. {
  201. /* Include Default slave wrapper for timeout monitoring */
  202. if ((nsp == 0) ||
  203. #if !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT)
  204. ((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
  205. #else
  206. ((CHIPTYPE(sii->pub.socitype) == SOCI_NAI) &&
  207. (mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
  208. #endif /* !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT) */
  209. FALSE) {
  210. continue;
  211. }
  212. }
  213. if ((nmw + nsw == 0)) {
  214. /* A component which is not a core */
  215. if (cid == OOB_ROUTER_CORE_ID) {
  216. asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
  217. &addrl, &addrh, &sizel, &sizeh);
  218. if (asd != 0) {
  219. if ((sii->oob_router != 0) && (sii->oob_router != addrl)) {
  220. sii->oob_router1 = addrl;
  221. } else {
  222. sii->oob_router = addrl;
  223. }
  224. }
  225. }
  226. if (cid != NS_CCB_CORE_ID &&
  227. cid != PMU_CORE_ID && cid != GCI_CORE_ID && cid != SR_CORE_ID &&
  228. cid != HUB_CORE_ID && cid != HND_OOBR_CORE_ID)
  229. continue;
  230. }
  231. idx = sii->numcores;
  232. cores_info->cia[idx] = cia;
  233. cores_info->cib[idx] = cib;
  234. cores_info->coreid[idx] = cid;
  235. for (i = 0; i < nmp; i++) {
  236. mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
  237. if ((mpd & ER_TAG) != ER_MP) {
  238. SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
  239. goto error;
  240. }
  241. SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
  242. (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
  243. (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
  244. }
  245. /* First Slave Address Descriptor should be port 0:
  246. * the main register space for the core
  247. */
  248. asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
  249. if (asd == 0) {
  250. do {
  251. /* Try again to see if it is a bridge */
  252. asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
  253. &sizel, &sizeh);
  254. if (asd != 0)
  255. br = TRUE;
  256. else {
  257. if (br == TRUE) {
  258. break;
  259. }
  260. else if ((addrh != 0) || (sizeh != 0) ||
  261. (sizel != SI_CORE_SIZE)) {
  262. SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
  263. "0x%x\n", addrh, sizeh, sizel));
  264. SI_ERROR(("First Slave ASD for"
  265. "core 0x%04x malformed "
  266. "(0x%08x)\n", cid, asd));
  267. goto error;
  268. }
  269. }
  270. } while (1);
  271. }
  272. cores_info->coresba[idx] = addrl;
  273. cores_info->coresba_size[idx] = sizel;
  274. /* Get any more ASDs in first port */
  275. j = 1;
  276. do {
  277. asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
  278. &sizel, &sizeh);
  279. if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
  280. cores_info->coresba2[idx] = addrl;
  281. cores_info->coresba2_size[idx] = sizel;
  282. }
  283. j++;
  284. } while (asd != 0);
  285. /* Go through the ASDs for other slave ports */
  286. for (i = 1; i < nsp; i++) {
  287. j = 0;
  288. do {
  289. asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
  290. &sizel, &sizeh);
  291. /* To get the first base address of second slave port */
  292. if ((asd != 0) && (i == 1) && (j == 0)) {
  293. cores_info->csp2ba[idx] = addrl;
  294. cores_info->csp2ba_size[idx] = sizel;
  295. }
  296. if (asd == 0)
  297. break;
  298. j++;
  299. } while (1);
  300. if (j == 0) {
  301. SI_ERROR((" SP %d has no address descriptors\n", i));
  302. goto error;
  303. }
  304. }
  305. /* Now get master wrappers */
  306. for (i = 0; i < nmw; i++) {
  307. asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
  308. &sizel, &sizeh);
  309. if (asd == 0) {
  310. SI_ERROR(("Missing descriptor for MW %d\n", i));
  311. goto error;
  312. }
  313. if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
  314. SI_ERROR(("Master wrapper %d is not 4KB\n", i));
  315. goto error;
  316. }
  317. if (i == 0) {
  318. cores_info->wrapba[idx] = addrl;
  319. } else if (i == 1) {
  320. cores_info->wrapba2[idx] = addrl;
  321. } else if (i == 2) {
  322. cores_info->wrapba3[idx] = addrl;
  323. }
  324. if (axi_wrapper &&
  325. (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
  326. axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
  327. axi_wrapper[sii->axi_num_wrappers].cid = cid;
  328. axi_wrapper[sii->axi_num_wrappers].rev = crev;
  329. axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER;
  330. axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
  331. sii->axi_num_wrappers++;
  332. SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x,"
  333. "rev:%x, addr:%x, size:%x\n",
  334. sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
  335. }
  336. }
  337. /* And finally slave wrappers */
  338. for (i = 0; i < nsw; i++) {
  339. uint fwp = (nsp == 1) ? 0 : 1;
  340. asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
  341. &sizel, &sizeh);
  342. /* cache APB bridge wrapper address for set/clear timeout */
  343. if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) {
  344. ASSERT(sii->num_br < SI_MAXBR);
  345. sii->br_wrapba[sii->num_br++] = addrl;
  346. }
  347. if (asd == 0) {
  348. SI_ERROR(("Missing descriptor for SW %d\n", i));
  349. goto error;
  350. }
  351. if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
  352. SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
  353. goto error;
  354. }
  355. if ((nmw == 0) && (i == 0)) {
  356. cores_info->wrapba[idx] = addrl;
  357. } else if ((nmw == 0) && (i == 1)) {
  358. cores_info->wrapba2[idx] = addrl;
  359. } else if ((nmw == 0) && (i == 2)) {
  360. cores_info->wrapba3[idx] = addrl;
  361. }
  362. /* Include all slave wrappers to the list to
  363. * enable and monitor watchdog timeouts
  364. */
  365. if (axi_wrapper &&
  366. (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
  367. axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
  368. axi_wrapper[sii->axi_num_wrappers].cid = cid;
  369. axi_wrapper[sii->axi_num_wrappers].rev = crev;
  370. axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER;
  371. /* Software WAR as discussed with hardware team, to ensure proper
  372. * Slave Wrapper Base address is set for 4364 Chip ID.
  373. * Current address is 0x1810c000, Corrected the same to 0x1810e000.
  374. * This ensures AXI default slave wrapper is registered along with
  375. * other slave wrapper cores and is useful while generating trap info
  376. * when write operation is tried on Invalid Core / Wrapper register
  377. */
  378. if ((CHIPID(sih->chip) == BCM4364_CHIP_ID) &&
  379. (cid == DEF_AI_COMP)) {
  380. axi_wrapper[sii->axi_num_wrappers].wrapper_addr =
  381. 0x1810e000;
  382. } else {
  383. axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
  384. }
  385. sii->axi_num_wrappers++;
  386. SI_VMSG(("SLAVE WRAPPER: %d, mfg:%x, cid:%x,"
  387. "rev:%x, addr:%x, size:%x\n",
  388. sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
  389. }
  390. }
  391. #ifndef BCM_BACKPLANE_TIMEOUT
  392. /* Don't record bridges */
  393. if (br)
  394. continue;
  395. #endif // endif
  396. /* Done with core */
  397. sii->numcores++;
  398. }
  399. SI_ERROR(("Reached end of erom without finding END"));
  400. error:
  401. sii->numcores = 0;
  402. return;
  403. }
  404. #define AI_SETCOREIDX_MAPSIZE(coreid) \
  405. (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
  406. /* This function changes the logical "focus" to the indicated core.
  407. * Return the current core's virtual address.
  408. */
  409. static volatile void *
  410. _ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrapn)
  411. {
  412. si_info_t *sii = SI_INFO(sih);
  413. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  414. uint32 addr, wrap, wrap2, wrap3;
  415. volatile void *regs;
  416. if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
  417. return (NULL);
  418. addr = cores_info->coresba[coreidx];
  419. wrap = cores_info->wrapba[coreidx];
  420. wrap2 = cores_info->wrapba2[coreidx];
  421. wrap3 = cores_info->wrapba3[coreidx];
  422. #ifdef BCM_BACKPLANE_TIMEOUT
  423. /* No need to disable interrupts while entering/exiting APB bridge core */
  424. if ((cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) &&
  425. (cores_info->coreid[sii->curidx] != APB_BRIDGE_CORE_ID))
  426. #endif /* BCM_BACKPLANE_TIMEOUT */
  427. {
  428. /*
  429. * If the user has provided an interrupt mask enabled function,
  430. * then assert interrupts are disabled before switching the core.
  431. */
  432. ASSERT((sii->intrsenabled_fn == NULL) ||
  433. !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
  434. }
  435. switch (BUSTYPE(sih->bustype)) {
  436. case SI_BUS:
  437. /* map new one */
  438. if (!cores_info->regs[coreidx]) {
  439. cores_info->regs[coreidx] = REG_MAP(addr,
  440. AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
  441. ASSERT(GOODREGS(cores_info->regs[coreidx]));
  442. }
  443. sii->curmap = regs = cores_info->regs[coreidx];
  444. if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
  445. cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
  446. ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
  447. }
  448. if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) {
  449. cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE);
  450. ASSERT(GOODREGS(cores_info->wrappers2[coreidx]));
  451. }
  452. if (!cores_info->wrappers3[coreidx] && (wrap3 != 0)) {
  453. cores_info->wrappers3[coreidx] = REG_MAP(wrap3, SI_CORE_SIZE);
  454. ASSERT(GOODREGS(cores_info->wrappers3[coreidx]));
  455. }
  456. if (use_wrapn == 2) {
  457. sii->curwrap = cores_info->wrappers3[coreidx];
  458. } else if (use_wrapn == 1) {
  459. sii->curwrap = cores_info->wrappers2[coreidx];
  460. } else {
  461. sii->curwrap = cores_info->wrappers[coreidx];
  462. }
  463. break;
  464. case PCI_BUS:
  465. #ifdef BCM_BACKPLANE_TIMEOUT
  466. /* No need to set the BAR0 if core is APB Bridge.
  467. * This is to reduce 2 PCI writes while checkng for errlog
  468. */
  469. if (cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID)
  470. #endif /* BCM_BACKPLANE_TIMEOUT */
  471. {
  472. /* point bar0 window */
  473. OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
  474. }
  475. regs = sii->curmap;
  476. /* point bar0 2nd 4KB window to the primary wrapper */
  477. if (use_wrapn)
  478. wrap = wrap2;
  479. if (PCIE_GEN2(sii))
  480. OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
  481. else
  482. OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
  483. break;
  484. #ifdef BCMSDIO
  485. case SPI_BUS:
  486. case SDIO_BUS:
  487. sii->curmap = regs = (void *)((uintptr)addr);
  488. if (use_wrapn)
  489. sii->curwrap = (void *)((uintptr)wrap2);
  490. else
  491. sii->curwrap = (void *)((uintptr)wrap);
  492. break;
  493. #endif /* BCMSDIO */
  494. case PCMCIA_BUS:
  495. default:
  496. ASSERT(0);
  497. regs = NULL;
  498. break;
  499. }
  500. sii->curmap = regs;
  501. sii->curidx = coreidx;
  502. return regs;
  503. }
  504. volatile void *
  505. ai_setcoreidx(si_t *sih, uint coreidx)
  506. {
  507. return _ai_setcoreidx(sih, coreidx, 0);
  508. }
  509. volatile void *
  510. ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx)
  511. {
  512. return _ai_setcoreidx(sih, coreidx, 1);
  513. }
  514. volatile void *
  515. ai_setcoreidx_3rdwrap(si_t *sih, uint coreidx)
  516. {
  517. return _ai_setcoreidx(sih, coreidx, 2);
  518. }
  519. void
  520. ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
  521. {
  522. si_info_t *sii = SI_INFO(sih);
  523. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  524. chipcregs_t *cc = NULL;
  525. uint32 erombase, *eromptr, *eromlim;
  526. uint i, j, cidx;
  527. uint32 cia, cib, nmp, nsp;
  528. uint32 asd, addrl, addrh, sizel, sizeh;
  529. for (i = 0; i < sii->numcores; i++) {
  530. if (cores_info->coreid[i] == CC_CORE_ID) {
  531. cc = (chipcregs_t *)cores_info->regs[i];
  532. break;
  533. }
  534. }
  535. if (cc == NULL)
  536. goto error;
  537. erombase = R_REG(sii->osh, &cc->eromptr);
  538. eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
  539. eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
  540. cidx = sii->curidx;
  541. cia = cores_info->cia[cidx];
  542. cib = cores_info->cib[cidx];
  543. nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
  544. nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
  545. /* scan for cores */
  546. while (eromptr < eromlim) {
  547. if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
  548. (get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
  549. break;
  550. }
  551. }
  552. /* skip master ports */
  553. for (i = 0; i < nmp; i++)
  554. get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
  555. /* Skip ASDs in port 0 */
  556. asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
  557. if (asd == 0) {
  558. /* Try again to see if it is a bridge */
  559. asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
  560. &sizel, &sizeh);
  561. }
  562. j = 1;
  563. do {
  564. asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
  565. &sizel, &sizeh);
  566. j++;
  567. } while (asd != 0);
  568. /* Go through the ASDs for other slave ports */
  569. for (i = 1; i < nsp; i++) {
  570. j = 0;
  571. do {
  572. asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
  573. &sizel, &sizeh);
  574. if (asd == 0)
  575. break;
  576. if (!asidx--) {
  577. *addr = addrl;
  578. *size = sizel;
  579. return;
  580. }
  581. j++;
  582. } while (1);
  583. if (j == 0) {
  584. SI_ERROR((" SP %d has no address descriptors\n", i));
  585. break;
  586. }
  587. }
  588. error:
  589. *size = 0;
  590. return;
  591. }
  592. /* Return the number of address spaces in current core */
  593. int
  594. ai_numaddrspaces(si_t *sih)
  595. {
  596. BCM_REFERENCE(sih);
  597. return 2;
  598. }
  599. /* Return the address of the nth address space in the current core
  600. * Arguments:
  601. * sih : Pointer to struct si_t
  602. * spidx : slave port index
  603. * baidx : base address index
  604. */
  605. uint32
  606. ai_addrspace(si_t *sih, uint spidx, uint baidx)
  607. {
  608. si_info_t *sii = SI_INFO(sih);
  609. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  610. uint cidx;
  611. cidx = sii->curidx;
  612. if (spidx == CORE_SLAVE_PORT_0) {
  613. if (baidx == CORE_BASE_ADDR_0)
  614. return cores_info->coresba[cidx];
  615. else if (baidx == CORE_BASE_ADDR_1)
  616. return cores_info->coresba2[cidx];
  617. }
  618. else if (spidx == CORE_SLAVE_PORT_1) {
  619. if (baidx == CORE_BASE_ADDR_0)
  620. return cores_info->csp2ba[cidx];
  621. }
  622. SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d slave port\n",
  623. __FUNCTION__, baidx, spidx));
  624. return 0;
  625. }
  626. /* Return the size of the nth address space in the current core
  627. * Arguments:
  628. * sih : Pointer to struct si_t
  629. * spidx : slave port index
  630. * baidx : base address index
  631. */
  632. uint32
  633. ai_addrspacesize(si_t *sih, uint spidx, uint baidx)
  634. {
  635. si_info_t *sii = SI_INFO(sih);
  636. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  637. uint cidx;
  638. cidx = sii->curidx;
  639. if (spidx == CORE_SLAVE_PORT_0) {
  640. if (baidx == CORE_BASE_ADDR_0)
  641. return cores_info->coresba_size[cidx];
  642. else if (baidx == CORE_BASE_ADDR_1)
  643. return cores_info->coresba2_size[cidx];
  644. }
  645. else if (spidx == CORE_SLAVE_PORT_1) {
  646. if (baidx == CORE_BASE_ADDR_0)
  647. return cores_info->csp2ba_size[cidx];
  648. }
  649. SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d slave port\n",
  650. __FUNCTION__, baidx, spidx));
  651. return 0;
  652. }
  653. uint
  654. ai_flag(si_t *sih)
  655. {
  656. si_info_t *sii = SI_INFO(sih);
  657. aidmp_t *ai;
  658. if (BCM4707_DMP()) {
  659. SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
  660. __FUNCTION__));
  661. return sii->curidx;
  662. }
  663. if (BCM53573_DMP()) {
  664. SI_ERROR(("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__));
  665. return sii->curidx;
  666. }
  667. if (PMU_DMP()) {
  668. uint idx, flag;
  669. idx = sii->curidx;
  670. ai_setcoreidx(sih, SI_CC_IDX);
  671. flag = ai_flag_alt(sih);
  672. ai_setcoreidx(sih, idx);
  673. return flag;
  674. }
  675. ai = sii->curwrap;
  676. ASSERT(ai != NULL);
  677. return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
  678. }
  679. uint
  680. ai_flag_alt(si_t *sih)
  681. {
  682. si_info_t *sii = SI_INFO(sih);
  683. aidmp_t *ai;
  684. if (BCM4707_DMP()) {
  685. SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
  686. __FUNCTION__));
  687. return sii->curidx;
  688. }
  689. ai = sii->curwrap;
  690. return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
  691. }
  692. void
  693. ai_setint(si_t *sih, int siflag)
  694. {
  695. BCM_REFERENCE(sih);
  696. BCM_REFERENCE(siflag);
  697. }
  698. uint
  699. ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
  700. {
  701. si_info_t *sii = SI_INFO(sih);
  702. uint32 *addr = (uint32 *) ((uchar *)(sii->curwrap) + offset);
  703. if (mask || val) {
  704. uint32 w = R_REG(sii->osh, addr);
  705. w &= ~mask;
  706. w |= val;
  707. W_REG(sii->osh, addr, w);
  708. }
  709. return (R_REG(sii->osh, addr));
  710. }
  711. uint
  712. ai_corevendor(si_t *sih)
  713. {
  714. si_info_t *sii = SI_INFO(sih);
  715. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  716. uint32 cia;
  717. cia = cores_info->cia[sii->curidx];
  718. return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
  719. }
  720. uint
  721. ai_corerev(si_t *sih)
  722. {
  723. si_info_t *sii = SI_INFO(sih);
  724. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  725. uint32 cib;
  726. cib = cores_info->cib[sii->curidx];
  727. return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
  728. }
  729. uint
  730. ai_corerev_minor(si_t *sih)
  731. {
  732. return (ai_core_sflags(sih, 0, 0) >> SISF_MINORREV_D11_SHIFT) &
  733. SISF_MINORREV_D11_MASK;
  734. }
  735. bool
  736. ai_iscoreup(si_t *sih)
  737. {
  738. si_info_t *sii = SI_INFO(sih);
  739. aidmp_t *ai;
  740. ai = sii->curwrap;
  741. return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
  742. ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
  743. }
  744. /*
  745. * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
  746. * switch back to the original core, and return the new value.
  747. *
  748. * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
  749. *
  750. * Also, when using pci/pcie, we can optimize away the core switching for pci registers
  751. * and (on newer pci cores) chipcommon registers.
  752. */
  753. uint
  754. ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
  755. {
  756. uint origidx = 0;
  757. volatile uint32 *r = NULL;
  758. uint w;
  759. uint intr_val = 0;
  760. bool fast = FALSE;
  761. si_info_t *sii = SI_INFO(sih);
  762. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  763. ASSERT(GOODIDX(coreidx));
  764. ASSERT(regoff < SI_CORE_SIZE);
  765. ASSERT((val & ~mask) == 0);
  766. if (coreidx >= SI_MAXCORES)
  767. return 0;
  768. if (BUSTYPE(sih->bustype) == SI_BUS) {
  769. /* If internal bus, we can always get at everything */
  770. fast = TRUE;
  771. /* map if does not exist */
  772. if (!cores_info->regs[coreidx]) {
  773. cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
  774. SI_CORE_SIZE);
  775. ASSERT(GOODREGS(cores_info->regs[coreidx]));
  776. }
  777. r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
  778. } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
  779. /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
  780. if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
  781. /* Chipc registers are mapped at 12KB */
  782. fast = TRUE;
  783. r = (volatile uint32 *)((volatile char *)sii->curmap +
  784. PCI_16KB0_CCREGS_OFFSET + regoff);
  785. } else if (sii->pub.buscoreidx == coreidx) {
  786. /* pci registers are at either in the last 2KB of an 8KB window
  787. * or, in pcie and pci rev 13 at 8KB
  788. */
  789. fast = TRUE;
  790. if (SI_FAST(sii))
  791. r = (volatile uint32 *)((volatile char *)sii->curmap +
  792. PCI_16KB0_PCIREGS_OFFSET + regoff);
  793. else
  794. r = (volatile uint32 *)((volatile char *)sii->curmap +
  795. ((regoff >= SBCONFIGOFF) ?
  796. PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
  797. regoff);
  798. }
  799. }
  800. if (!fast) {
  801. INTR_OFF(sii, intr_val);
  802. /* save current core index */
  803. origidx = si_coreidx(&sii->pub);
  804. /* switch core */
  805. r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
  806. regoff);
  807. }
  808. ASSERT(r != NULL);
  809. /* mask and set */
  810. if (mask || val) {
  811. w = (R_REG(sii->osh, r) & ~mask) | val;
  812. W_REG(sii->osh, r, w);
  813. }
  814. /* readback */
  815. w = R_REG(sii->osh, r);
  816. if (!fast) {
  817. /* restore core index */
  818. if (origidx != coreidx)
  819. ai_setcoreidx(&sii->pub, origidx);
  820. INTR_RESTORE(sii, intr_val);
  821. }
  822. return (w);
  823. }
  824. /*
  825. * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
  826. * switch back to the original core, and return the new value.
  827. *
  828. * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
  829. *
  830. * Also, when using pci/pcie, we can optimize away the core switching for pci registers
  831. * and (on newer pci cores) chipcommon registers.
  832. */
  833. uint
  834. ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
  835. {
  836. uint origidx = 0;
  837. volatile uint32 *r = NULL;
  838. uint w = 0;
  839. uint intr_val = 0;
  840. bool fast = FALSE;
  841. si_info_t *sii = SI_INFO(sih);
  842. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  843. ASSERT(GOODIDX(coreidx));
  844. ASSERT(regoff < SI_CORE_SIZE);
  845. ASSERT((val & ~mask) == 0);
  846. if (coreidx >= SI_MAXCORES)
  847. return 0;
  848. if (BUSTYPE(sih->bustype) == SI_BUS) {
  849. /* If internal bus, we can always get at everything */
  850. fast = TRUE;
  851. /* map if does not exist */
  852. if (!cores_info->regs[coreidx]) {
  853. cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
  854. SI_CORE_SIZE);
  855. ASSERT(GOODREGS(cores_info->regs[coreidx]));
  856. }
  857. r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
  858. } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
  859. /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
  860. if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
  861. /* Chipc registers are mapped at 12KB */
  862. fast = TRUE;
  863. r = (volatile uint32 *)((volatile char *)sii->curmap +
  864. PCI_16KB0_CCREGS_OFFSET + regoff);
  865. } else if (sii->pub.buscoreidx == coreidx) {
  866. /* pci registers are at either in the last 2KB of an 8KB window
  867. * or, in pcie and pci rev 13 at 8KB
  868. */
  869. fast = TRUE;
  870. if (SI_FAST(sii))
  871. r = (volatile uint32 *)((volatile char *)sii->curmap +
  872. PCI_16KB0_PCIREGS_OFFSET + regoff);
  873. else
  874. r = (volatile uint32 *)((volatile char *)sii->curmap +
  875. ((regoff >= SBCONFIGOFF) ?
  876. PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
  877. regoff);
  878. }
  879. }
  880. if (!fast) {
  881. INTR_OFF(sii, intr_val);
  882. /* save current core index */
  883. origidx = si_coreidx(&sii->pub);
  884. /* switch core */
  885. r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
  886. regoff);
  887. }
  888. ASSERT(r != NULL);
  889. /* mask and set */
  890. if (mask || val) {
  891. w = (R_REG(sii->osh, r) & ~mask) | val;
  892. W_REG(sii->osh, r, w);
  893. }
  894. if (!fast) {
  895. /* restore core index */
  896. if (origidx != coreidx)
  897. ai_setcoreidx(&sii->pub, origidx);
  898. INTR_RESTORE(sii, intr_val);
  899. }
  900. return (w);
  901. }
  902. /*
  903. * If there is no need for fiddling with interrupts or core switches (typically silicon
  904. * back plane registers, pci registers and chipcommon registers), this function
  905. * returns the register offset on this core to a mapped address. This address can
  906. * be used for W_REG/R_REG directly.
  907. *
  908. * For accessing registers that would need a core switch, this function will return
  909. * NULL.
  910. */
  911. volatile uint32 *
  912. ai_corereg_addr(si_t *sih, uint coreidx, uint regoff)
  913. {
  914. volatile uint32 *r = NULL;
  915. bool fast = FALSE;
  916. si_info_t *sii = SI_INFO(sih);
  917. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  918. ASSERT(GOODIDX(coreidx));
  919. ASSERT(regoff < SI_CORE_SIZE);
  920. if (coreidx >= SI_MAXCORES)
  921. return 0;
  922. if (BUSTYPE(sih->bustype) == SI_BUS) {
  923. /* If internal bus, we can always get at everything */
  924. fast = TRUE;
  925. /* map if does not exist */
  926. if (!cores_info->regs[coreidx]) {
  927. cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
  928. SI_CORE_SIZE);
  929. ASSERT(GOODREGS(cores_info->regs[coreidx]));
  930. }
  931. r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
  932. } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
  933. /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
  934. if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
  935. /* Chipc registers are mapped at 12KB */
  936. fast = TRUE;
  937. r = (volatile uint32 *)((volatile char *)sii->curmap +
  938. PCI_16KB0_CCREGS_OFFSET + regoff);
  939. } else if (sii->pub.buscoreidx == coreidx) {
  940. /* pci registers are at either in the last 2KB of an 8KB window
  941. * or, in pcie and pci rev 13 at 8KB
  942. */
  943. fast = TRUE;
  944. if (SI_FAST(sii))
  945. r = (volatile uint32 *)((volatile char *)sii->curmap +
  946. PCI_16KB0_PCIREGS_OFFSET + regoff);
  947. else
  948. r = (volatile uint32 *)((volatile char *)sii->curmap +
  949. ((regoff >= SBCONFIGOFF) ?
  950. PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
  951. regoff);
  952. }
  953. }
  954. if (!fast) {
  955. ASSERT(sii->curidx == coreidx);
  956. r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff);
  957. }
  958. return (r);
  959. }
  960. void
  961. ai_core_disable(si_t *sih, uint32 bits)
  962. {
  963. si_info_t *sii = SI_INFO(sih);
  964. volatile uint32 dummy;
  965. uint32 status;
  966. aidmp_t *ai;
  967. ASSERT(GOODREGS(sii->curwrap));
  968. ai = sii->curwrap;
  969. /* if core is already in reset, just return */
  970. if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
  971. return;
  972. }
  973. /* ensure there are no pending backplane operations */
  974. SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
  975. /* if pending backplane ops still, try waiting longer */
  976. if (status != 0) {
  977. /* 300usecs was sufficient to allow backplane ops to clear for big hammer */
  978. /* during driver load we may need more time */
  979. SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
  980. /* if still pending ops, continue on and try disable anyway */
  981. /* this is in big hammer path, so don't call wl_reinit in this case... */
  982. }
  983. W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
  984. dummy = R_REG(sii->osh, &ai->resetctrl);
  985. BCM_REFERENCE(dummy);
  986. OSL_DELAY(1);
  987. W_REG(sii->osh, &ai->ioctrl, bits);
  988. dummy = R_REG(sii->osh, &ai->ioctrl);
  989. BCM_REFERENCE(dummy);
  990. OSL_DELAY(10);
  991. }
  992. /* reset and re-enable a core
  993. * inputs:
  994. * bits - core specific bits that are set during and after reset sequence
  995. * resetbits - core specific bits that are set only during reset sequence
  996. */
  997. static void
  998. _ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
  999. {
  1000. si_info_t *sii = SI_INFO(sih);
  1001. #if defined(UCM_CORRUPTION_WAR)
  1002. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  1003. #endif // endif
  1004. aidmp_t *ai;
  1005. volatile uint32 dummy;
  1006. uint loop_counter = 10;
  1007. ASSERT(GOODREGS(sii->curwrap));
  1008. ai = sii->curwrap;
  1009. /* ensure there are no pending backplane operations */
  1010. SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
  1011. /* put core into reset state */
  1012. W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
  1013. OSL_DELAY(10);
  1014. /* ensure there are no pending backplane operations */
  1015. SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
  1016. W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
  1017. dummy = R_REG(sii->osh, &ai->ioctrl);
  1018. BCM_REFERENCE(dummy);
  1019. #ifdef UCM_CORRUPTION_WAR
  1020. if (cores_info->coreid[sii->curidx] == D11_CORE_ID) {
  1021. /* Reset FGC */
  1022. OSL_DELAY(1);
  1023. W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
  1024. }
  1025. #endif /* UCM_CORRUPTION_WAR */
  1026. /* ensure there are no pending backplane operations */
  1027. SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
  1028. while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
  1029. /* ensure there are no pending backplane operations */
  1030. SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
  1031. /* take core out of reset */
  1032. W_REG(sii->osh, &ai->resetctrl, 0);
  1033. /* ensure there are no pending backplane operations */
  1034. SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
  1035. }
  1036. #ifdef UCM_CORRUPTION_WAR
  1037. /* Pulse FGC after lifting Reset */
  1038. W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
  1039. #else
  1040. W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
  1041. #endif /* UCM_CORRUPTION_WAR */
  1042. dummy = R_REG(sii->osh, &ai->ioctrl);
  1043. BCM_REFERENCE(dummy);
  1044. #ifdef UCM_CORRUPTION_WAR
  1045. if (cores_info->coreid[sii->curidx] == D11_CORE_ID) {
  1046. /* Reset FGC */
  1047. OSL_DELAY(1);
  1048. W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
  1049. }
  1050. #endif /* UCM_CORRUPTION_WAR */
  1051. OSL_DELAY(1);
  1052. }
  1053. void
  1054. ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
  1055. {
  1056. si_info_t *sii = SI_INFO(sih);
  1057. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  1058. uint idx = sii->curidx;
  1059. if (cores_info->wrapba3[idx] != 0) {
  1060. ai_setcoreidx_3rdwrap(sih, idx);
  1061. _ai_core_reset(sih, bits, resetbits);
  1062. ai_setcoreidx(sih, idx);
  1063. }
  1064. if (cores_info->wrapba2[idx] != 0) {
  1065. ai_setcoreidx_2ndwrap(sih, idx);
  1066. _ai_core_reset(sih, bits, resetbits);
  1067. ai_setcoreidx(sih, idx);
  1068. }
  1069. _ai_core_reset(sih, bits, resetbits);
  1070. }
  1071. void
  1072. ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
  1073. {
  1074. si_info_t *sii = SI_INFO(sih);
  1075. aidmp_t *ai;
  1076. uint32 w;
  1077. if (BCM4707_DMP()) {
  1078. SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
  1079. __FUNCTION__));
  1080. return;
  1081. }
  1082. if (PMU_DMP()) {
  1083. SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
  1084. __FUNCTION__));
  1085. return;
  1086. }
  1087. ASSERT(GOODREGS(sii->curwrap));
  1088. ai = sii->curwrap;
  1089. ASSERT((val & ~mask) == 0);
  1090. if (mask || val) {
  1091. w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
  1092. W_REG(sii->osh, &ai->ioctrl, w);
  1093. }
  1094. }
  1095. uint32
  1096. ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
  1097. {
  1098. si_info_t *sii = SI_INFO(sih);
  1099. aidmp_t *ai;
  1100. uint32 w;
  1101. if (BCM4707_DMP()) {
  1102. SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
  1103. __FUNCTION__));
  1104. return 0;
  1105. }
  1106. if (PMU_DMP()) {
  1107. SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
  1108. __FUNCTION__));
  1109. return 0;
  1110. }
  1111. ASSERT(GOODREGS(sii->curwrap));
  1112. ai = sii->curwrap;
  1113. ASSERT((val & ~mask) == 0);
  1114. if (mask || val) {
  1115. w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
  1116. W_REG(sii->osh, &ai->ioctrl, w);
  1117. }
  1118. return R_REG(sii->osh, &ai->ioctrl);
  1119. }
  1120. uint32
  1121. ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
  1122. {
  1123. si_info_t *sii = SI_INFO(sih);
  1124. aidmp_t *ai;
  1125. uint32 w;
  1126. if (BCM4707_DMP()) {
  1127. SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
  1128. __FUNCTION__));
  1129. return 0;
  1130. }
  1131. if (PMU_DMP()) {
  1132. SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
  1133. __FUNCTION__));
  1134. return 0;
  1135. }
  1136. ASSERT(GOODREGS(sii->curwrap));
  1137. ai = sii->curwrap;
  1138. ASSERT((val & ~mask) == 0);
  1139. ASSERT((mask & ~SISF_CORE_BITS) == 0);
  1140. if (mask || val) {
  1141. w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
  1142. W_REG(sii->osh, &ai->iostatus, w);
  1143. }
  1144. return R_REG(sii->osh, &ai->iostatus);
  1145. }
  1146. #if defined(BCMDBG_PHYDUMP)
  1147. /* print interesting aidmp registers */
  1148. void
  1149. ai_dumpregs(si_t *sih, struct bcmstrbuf *b)
  1150. {
  1151. si_info_t *sii = SI_INFO(sih);
  1152. osl_t *osh;
  1153. aidmp_t *ai;
  1154. uint i;
  1155. uint32 prev_value = 0;
  1156. axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
  1157. uint32 cfg_reg = 0;
  1158. uint bar0_win_offset = 0;
  1159. osh = sii->osh;
  1160. /* Save and restore wrapper access window */
  1161. if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
  1162. if (PCIE_GEN2(sii)) {
  1163. cfg_reg = PCIE2_BAR0_CORE2_WIN2;
  1164. bar0_win_offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
  1165. } else {
  1166. cfg_reg = PCI_BAR0_WIN2;
  1167. bar0_win_offset = PCI_BAR0_WIN2_OFFSET;
  1168. }
  1169. prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
  1170. if (prev_value == ID32_INVALID) {
  1171. SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
  1172. return;
  1173. }
  1174. }
  1175. bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n",
  1176. sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor);
  1177. for (i = 0; i < sii->axi_num_wrappers; i++) {
  1178. if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
  1179. /* Set BAR0 window to bridge wapper base address */
  1180. OSL_PCI_WRITE_CONFIG(osh,
  1181. cfg_reg, 4, axi_wrapper[i].wrapper_addr);
  1182. ai = (aidmp_t *) ((volatile uint8*)sii->curmap + bar0_win_offset);
  1183. } else {
  1184. ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
  1185. }
  1186. bcm_bprintf(b, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n", axi_wrapper[i].cid,
  1187. axi_wrapper[i].rev,
  1188. axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE" : "MASTER",
  1189. axi_wrapper[i].wrapper_addr);
  1190. /* BCM4707_DMP() */
  1191. if (BCM4707_CHIP(CHIPID(sih->chip)) &&
  1192. (axi_wrapper[i].cid == NS_CCB_CORE_ID)) {
  1193. bcm_bprintf(b, "Skipping chipcommonb in 4707\n");
  1194. continue;
  1195. }
  1196. bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x "
  1197. "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
  1198. "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
  1199. "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x "
  1200. "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
  1201. "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
  1202. "intstatus 0x%x config 0x%x itcr 0x%x\n\n",
  1203. R_REG(osh, &ai->ioctrlset),
  1204. R_REG(osh, &ai->ioctrlclear),
  1205. R_REG(osh, &ai->ioctrl),
  1206. R_REG(osh, &ai->iostatus),
  1207. R_REG(osh, &ai->ioctrlwidth),
  1208. R_REG(osh, &ai->iostatuswidth),
  1209. R_REG(osh, &ai->resetctrl),
  1210. R_REG(osh, &ai->resetstatus),
  1211. R_REG(osh, &ai->resetreadid),
  1212. R_REG(osh, &ai->resetwriteid),
  1213. R_REG(osh, &ai->errlogctrl),
  1214. R_REG(osh, &ai->errlogdone),
  1215. R_REG(osh, &ai->errlogstatus),
  1216. R_REG(osh, &ai->errlogaddrlo),
  1217. R_REG(osh, &ai->errlogaddrhi),
  1218. R_REG(osh, &ai->errlogid),
  1219. R_REG(osh, &ai->errloguser),
  1220. R_REG(osh, &ai->errlogflags),
  1221. R_REG(osh, &ai->intstatus),
  1222. R_REG(osh, &ai->config),
  1223. R_REG(osh, &ai->itcr));
  1224. }
  1225. /* Restore the initial wrapper space */
  1226. if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
  1227. if (prev_value && cfg_reg) {
  1228. OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
  1229. }
  1230. }
  1231. }
  1232. #endif // endif
  1233. void
  1234. ai_update_backplane_timeouts(si_t *sih, bool enable, uint32 timeout_exp, uint32 cid)
  1235. {
  1236. #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
  1237. si_info_t *sii = SI_INFO(sih);
  1238. aidmp_t *ai;
  1239. uint32 i;
  1240. axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
  1241. uint32 errlogctrl = (enable << AIELC_TO_ENAB_SHIFT) |
  1242. ((timeout_exp << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK);
  1243. #ifdef BCM_BACKPLANE_TIMEOUT
  1244. uint32 prev_value = 0;
  1245. osl_t *osh = sii->osh;
  1246. uint32 cfg_reg = 0;
  1247. uint32 offset = 0;
  1248. #endif /* BCM_BACKPLANE_TIMEOUT */
  1249. if ((sii->axi_num_wrappers == 0) ||
  1250. #ifdef BCM_BACKPLANE_TIMEOUT
  1251. (!PCIE(sii)) ||
  1252. #endif /* BCM_BACKPLANE_TIMEOUT */
  1253. FALSE) {
  1254. SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
  1255. __FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
  1256. BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
  1257. return;
  1258. }
  1259. #ifdef BCM_BACKPLANE_TIMEOUT
  1260. /* Save and restore the wrapper access window */
  1261. if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
  1262. if (PCIE_GEN1(sii)) {
  1263. cfg_reg = PCI_BAR0_WIN2;
  1264. offset = PCI_BAR0_WIN2_OFFSET;
  1265. } else if (PCIE_GEN2(sii)) {
  1266. cfg_reg = PCIE2_BAR0_CORE2_WIN2;
  1267. offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
  1268. }
  1269. else {
  1270. ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
  1271. }
  1272. prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
  1273. if (prev_value == ID32_INVALID) {
  1274. SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
  1275. return;
  1276. }
  1277. }
  1278. #endif /* BCM_BACKPLANE_TIMEOUT */
  1279. for (i = 0; i < sii->axi_num_wrappers; ++i) {
  1280. if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
  1281. SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n",
  1282. axi_wrapper[i].mfg,
  1283. axi_wrapper[i].cid,
  1284. axi_wrapper[i].wrapper_addr));
  1285. continue;
  1286. }
  1287. /* Update only given core if requested */
  1288. if ((cid != 0) && (axi_wrapper[i].cid != cid)) {
  1289. continue;
  1290. }
  1291. #ifdef BCM_BACKPLANE_TIMEOUT
  1292. if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
  1293. /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
  1294. OSL_PCI_WRITE_CONFIG(osh,
  1295. cfg_reg, 4, axi_wrapper[i].wrapper_addr);
  1296. /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
  1297. ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
  1298. }
  1299. else
  1300. #endif /* BCM_BACKPLANE_TIMEOUT */
  1301. {
  1302. ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
  1303. }
  1304. W_REG(sii->osh, &ai->errlogctrl, errlogctrl);
  1305. SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n",
  1306. axi_wrapper[i].mfg,
  1307. axi_wrapper[i].cid,
  1308. axi_wrapper[i].wrapper_addr,
  1309. R_REG(sii->osh, &ai->errlogctrl)));
  1310. }
  1311. #ifdef BCM_BACKPLANE_TIMEOUT
  1312. /* Restore the initial wrapper space */
  1313. if (prev_value) {
  1314. OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
  1315. }
  1316. #endif /* BCM_BACKPLANE_TIMEOUT */
  1317. #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
  1318. }
  1319. #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
  1320. /* slave error is ignored, so account for those cases */
  1321. static uint32 si_ignore_errlog_cnt = 0;
  1322. static bool
  1323. ai_ignore_errlog(si_info_t *sii, aidmp_t *ai,
  1324. uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
  1325. {
  1326. uint32 axi_id;
  1327. #ifdef BCMPCIE_BTLOG
  1328. uint32 axi_id2 = BCM4347_UNUSED_AXI_ID;
  1329. #endif /* BCMPCIE_BTLOG */
  1330. uint32 ignore_errsts = AIELS_SLAVE_ERR;
  1331. uint32 ignore_hi = BT_CC_SPROM_BADREG_HI;
  1332. uint32 ignore_lo = BT_CC_SPROM_BADREG_LO;
  1333. uint32 ignore_size = BT_CC_SPROM_BADREG_SIZE;
  1334. /* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
  1335. switch (CHIPID(sii->pub.chip)) {
  1336. case BCM4350_CHIP_ID:
  1337. axi_id = BCM4350_BT_AXI_ID;
  1338. break;
  1339. case BCM4345_CHIP_ID:
  1340. axi_id = BCM4345_BT_AXI_ID;
  1341. break;
  1342. case BCM4349_CHIP_GRPID:
  1343. axi_id = BCM4349_BT_AXI_ID;
  1344. break;
  1345. case BCM4364_CHIP_ID:
  1346. case BCM4373_CHIP_ID:
  1347. axi_id = BCM4364_BT_AXI_ID;
  1348. break;
  1349. #ifdef BCMPCIE_BTLOG
  1350. case BCM4347_CHIP_ID:
  1351. case BCM4357_CHIP_ID:
  1352. axi_id = BCM4347_CC_AXI_ID;
  1353. axi_id2 = BCM4347_PCIE_AXI_ID;
  1354. ignore_errsts = AIELS_TIMEOUT;
  1355. ignore_hi = BCM4347_BT_ADDR_HI;
  1356. ignore_lo = BCM4347_BT_ADDR_LO;
  1357. ignore_size = BCM4347_BT_SIZE;
  1358. break;
  1359. #endif /* BCMPCIE_BTLOG */
  1360. default:
  1361. return FALSE;
  1362. }
  1363. /* AXI ID check */
  1364. err_axi_id &= AI_ERRLOGID_AXI_ID_MASK;
  1365. if (!(err_axi_id == axi_id ||
  1366. #ifdef BCMPCIE_BTLOG
  1367. (axi_id2 != BCM4347_UNUSED_AXI_ID && err_axi_id == axi_id2)))
  1368. #else
  1369. FALSE))
  1370. #endif /* BCMPCIE_BTLOG */
  1371. return FALSE;
  1372. /* slave errors */
  1373. if ((errsts & AIELS_TIMEOUT_MASK) != ignore_errsts)
  1374. return FALSE;
  1375. /* address range check */
  1376. if ((hi_addr != ignore_hi) ||
  1377. (lo_addr < ignore_lo) || (lo_addr >= (ignore_lo + ignore_size)))
  1378. return FALSE;
  1379. #ifdef BCMPCIE_BTLOG
  1380. if (ignore_errsts == AIELS_TIMEOUT) {
  1381. /* reset AXI timeout */
  1382. ai_reset_axi_to(sii, ai);
  1383. }
  1384. #endif /* BCMPCIE_BTLOG */
  1385. return TRUE;
  1386. }
  1387. #endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
  1388. #ifdef BCM_BACKPLANE_TIMEOUT
  1389. /* Function to return the APB bridge details corresponding to the core */
  1390. static bool
  1391. ai_get_apb_bridge(si_t * sih, uint32 coreidx, uint32 *apb_id, uint32 * apb_coreuinit)
  1392. {
  1393. uint i;
  1394. uint32 core_base, core_end;
  1395. si_info_t *sii = SI_INFO(sih);
  1396. static uint32 coreidx_cached = 0, apb_id_cached = 0, apb_coreunit_cached = 0;
  1397. uint32 tmp_coreunit = 0;
  1398. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  1399. if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
  1400. return FALSE;
  1401. /* Most of the time apb bridge query will be for d11 core.
  1402. * Maintain the last cache and return if found rather than iterating the table
  1403. */
  1404. if (coreidx_cached == coreidx) {
  1405. *apb_id = apb_id_cached;
  1406. *apb_coreuinit = apb_coreunit_cached;
  1407. return TRUE;
  1408. }
  1409. core_base = cores_info->coresba[coreidx];
  1410. core_end = core_base + cores_info->coresba_size[coreidx];
  1411. for (i = 0; i < sii->numcores; i++) {
  1412. if (cores_info->coreid[i] == APB_BRIDGE_ID) {
  1413. uint32 apb_base;
  1414. uint32 apb_end;
  1415. apb_base = cores_info->coresba[i];
  1416. apb_end = apb_base + cores_info->coresba_size[i];
  1417. if ((core_base >= apb_base) &&
  1418. (core_end <= apb_end)) {
  1419. /* Current core is attached to this APB bridge */
  1420. *apb_id = apb_id_cached = APB_BRIDGE_ID;
  1421. *apb_coreuinit = apb_coreunit_cached = tmp_coreunit;
  1422. coreidx_cached = coreidx;
  1423. return TRUE;
  1424. }
  1425. /* Increment the coreunit */
  1426. tmp_coreunit++;
  1427. }
  1428. }
  1429. return FALSE;
  1430. }
  1431. uint32
  1432. ai_clear_backplane_to_fast(si_t *sih, void *addr)
  1433. {
  1434. si_info_t *sii = SI_INFO(sih);
  1435. volatile void *curmap = sii->curmap;
  1436. bool core_reg = FALSE;
  1437. /* Use fast path only for core register access */
  1438. if (((uintptr)addr >= (uintptr)curmap) &&
  1439. ((uintptr)addr < ((uintptr)curmap + SI_CORE_SIZE))) {
  1440. /* address being accessed is within current core reg map */
  1441. core_reg = TRUE;
  1442. }
  1443. if (core_reg) {
  1444. uint32 apb_id, apb_coreuinit;
  1445. if (ai_get_apb_bridge(sih, si_coreidx(&sii->pub),
  1446. &apb_id, &apb_coreuinit) == TRUE) {
  1447. /* Found the APB bridge corresponding to current core,
  1448. * Check for bus errors in APB wrapper
  1449. */
  1450. return ai_clear_backplane_to_per_core(sih,
  1451. apb_id, apb_coreuinit, NULL);
  1452. }
  1453. }
  1454. /* Default is to poll for errors on all slave wrappers */
  1455. return si_clear_backplane_to(sih);
  1456. }
  1457. #endif /* BCM_BACKPLANE_TIMEOUT */
  1458. #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
  1459. static bool g_disable_backplane_logs = FALSE;
  1460. #if defined(ETD)
  1461. static uint32 last_axi_error = AXI_WRAP_STS_NONE;
  1462. static uint32 last_axi_error_core = 0;
  1463. static uint32 last_axi_error_wrap = 0;
  1464. #endif /* ETD */
  1465. /*
  1466. * API to clear the back plane timeout per core.
  1467. * Caller may passs optional wrapper address. If present this will be used as
  1468. * the wrapper base address. If wrapper base address is provided then caller
  1469. * must provide the coreid also.
  1470. * If both coreid and wrapper is zero, then err status of current bridge
  1471. * will be verified.
  1472. */
  1473. uint32
  1474. ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void *wrap)
  1475. {
  1476. int ret = AXI_WRAP_STS_NONE;
  1477. aidmp_t *ai = NULL;
  1478. uint32 errlog_status = 0;
  1479. si_info_t *sii = SI_INFO(sih);
  1480. uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0;
  1481. uint32 current_coreidx = si_coreidx(sih);
  1482. uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit);
  1483. #if defined(BCM_BACKPLANE_TIMEOUT)
  1484. si_axi_error_t * axi_error = sih->err_info ?
  1485. &sih->err_info->axi_error[sih->err_info->count] : NULL;
  1486. #endif /* BCM_BACKPLANE_TIMEOUT */
  1487. bool restore_core = FALSE;
  1488. if ((sii->axi_num_wrappers == 0) ||
  1489. #ifdef BCM_BACKPLANE_TIMEOUT
  1490. (!PCIE(sii)) ||
  1491. #endif /* BCM_BACKPLANE_TIMEOUT */
  1492. FALSE) {
  1493. SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
  1494. __FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
  1495. BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
  1496. return AXI_WRAP_STS_NONE;
  1497. }
  1498. if (wrap != NULL) {
  1499. ai = (aidmp_t *)wrap;
  1500. } else if (coreid && (target_coreidx != current_coreidx)) {
  1501. if (ai_setcoreidx(sih, target_coreidx) == NULL) {
  1502. /* Unable to set the core */
  1503. SI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n",
  1504. coreid, coreunit, target_coreidx));
  1505. errlog_lo = target_coreidx;
  1506. ret = AXI_WRAP_STS_SET_CORE_FAIL;
  1507. goto end;
  1508. }
  1509. restore_core = TRUE;
  1510. ai = (aidmp_t *)si_wrapperregs(sih);
  1511. } else {
  1512. /* Read error status of current wrapper */
  1513. ai = (aidmp_t *)si_wrapperregs(sih);
  1514. /* Update CoreID to current Code ID */
  1515. coreid = si_coreid(sih);
  1516. }
  1517. /* read error log status */
  1518. errlog_status = R_REG(sii->osh, &ai->errlogstatus);
  1519. if (errlog_status == ID32_INVALID) {
  1520. /* Do not try to peek further */
  1521. SI_PRINT(("%s, errlogstatus:%x - Slave Wrapper:%x\n",
  1522. __FUNCTION__, errlog_status, coreid));
  1523. ret = AXI_WRAP_STS_WRAP_RD_ERR;
  1524. errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
  1525. goto end;
  1526. }
  1527. if ((errlog_status & AIELS_TIMEOUT_MASK) != 0) {
  1528. uint32 tmp;
  1529. uint32 count = 0;
  1530. /* set ErrDone to clear the condition */
  1531. W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
  1532. /* SPINWAIT on errlogstatus timeout status bits */
  1533. while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_TIMEOUT_MASK) {
  1534. if (tmp == ID32_INVALID) {
  1535. SI_PRINT(("%s: prev errlogstatus:%x, errlogstatus:%x\n",
  1536. __FUNCTION__, errlog_status, tmp));
  1537. ret = AXI_WRAP_STS_WRAP_RD_ERR;
  1538. errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
  1539. goto end;
  1540. }
  1541. /*
  1542. * Clear again, to avoid getting stuck in the loop, if a new error
  1543. * is logged after we cleared the first timeout
  1544. */
  1545. W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
  1546. count++;
  1547. OSL_DELAY(10);
  1548. if ((10 * count) > AI_REG_READ_TIMEOUT) {
  1549. errlog_status = tmp;
  1550. break;
  1551. }
  1552. }
  1553. errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo);
  1554. errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi);
  1555. errlog_id = R_REG(sii->osh, &ai->errlogid);
  1556. errlog_flags = R_REG(sii->osh, &ai->errlogflags);
  1557. /* we are already in the error path, so OK to check for the slave error */
  1558. if (ai_ignore_errlog(sii, ai, errlog_lo, errlog_hi, errlog_id,
  1559. errlog_status)) {
  1560. si_ignore_errlog_cnt++;
  1561. goto end;
  1562. }
  1563. /* only reset APB Bridge on timeout (not slave error, or dec error) */
  1564. switch (errlog_status & AIELS_TIMEOUT_MASK) {
  1565. case AIELS_SLAVE_ERR:
  1566. SI_PRINT(("AXI slave error\n"));
  1567. ret = AXI_WRAP_STS_SLAVE_ERR;
  1568. break;
  1569. case AIELS_TIMEOUT:
  1570. ai_reset_axi_to(sii, ai);
  1571. ret = AXI_WRAP_STS_TIMEOUT;
  1572. break;
  1573. case AIELS_DECODE:
  1574. SI_PRINT(("AXI decode error\n"));
  1575. ret = AXI_WRAP_STS_DECODE_ERR;
  1576. break;
  1577. default:
  1578. ASSERT(0); /* should be impossible */
  1579. }
  1580. SI_PRINT(("\tCoreID: %x\n", coreid));
  1581. SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x"
  1582. ", status 0x%08x\n",
  1583. errlog_lo, errlog_hi, errlog_id, errlog_flags,
  1584. errlog_status));
  1585. }
  1586. end:
  1587. #if defined(ETD)
  1588. if (ret != AXI_WRAP_STS_NONE) {
  1589. last_axi_error = ret;
  1590. last_axi_error_core = coreid;
  1591. last_axi_error_wrap = (uint32)ai;
  1592. }
  1593. #endif /* ETD */
  1594. #if defined(BCM_BACKPLANE_TIMEOUT)
  1595. if (axi_error && (ret != AXI_WRAP_STS_NONE)) {
  1596. axi_error->error = ret;
  1597. axi_error->coreid = coreid;
  1598. axi_error->errlog_lo = errlog_lo;
  1599. axi_error->errlog_hi = errlog_hi;
  1600. axi_error->errlog_id = errlog_id;
  1601. axi_error->errlog_flags = errlog_flags;
  1602. axi_error->errlog_status = errlog_status;
  1603. sih->err_info->count++;
  1604. if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
  1605. sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
  1606. SI_PRINT(("AXI Error log overflow\n"));
  1607. }
  1608. }
  1609. #endif /* BCM_BACKPLANE_TIMEOUT */
  1610. if (restore_core) {
  1611. if (ai_setcoreidx(sih, current_coreidx) == NULL) {
  1612. /* Unable to set the core */
  1613. return ID32_INVALID;
  1614. }
  1615. }
  1616. return ret;
  1617. }
  1618. /* reset AXI timeout */
  1619. static void
  1620. ai_reset_axi_to(si_info_t *sii, aidmp_t *ai)
  1621. {
  1622. /* reset APB Bridge */
  1623. OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
  1624. /* sync write */
  1625. (void)R_REG(sii->osh, &ai->resetctrl);
  1626. /* clear Reset bit */
  1627. AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
  1628. /* sync write */
  1629. (void)R_REG(sii->osh, &ai->resetctrl);
  1630. SI_PRINT(("AXI timeout\n"));
  1631. if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
  1632. SI_PRINT(("reset failed on wrapper %p\n", ai));
  1633. g_disable_backplane_logs = TRUE;
  1634. }
  1635. }
  1636. #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
  1637. /*
  1638. * This API polls all slave wrappers for errors and returns bit map of
  1639. * all reported errors.
  1640. * return - bit map of
  1641. * AXI_WRAP_STS_NONE
  1642. * AXI_WRAP_STS_TIMEOUT
  1643. * AXI_WRAP_STS_SLAVE_ERR
  1644. * AXI_WRAP_STS_DECODE_ERR
  1645. * AXI_WRAP_STS_PCI_RD_ERR
  1646. * AXI_WRAP_STS_WRAP_RD_ERR
  1647. * AXI_WRAP_STS_SET_CORE_FAIL
  1648. * On timeout detection, correspondign bridge will be reset to
  1649. * unblock the bus.
  1650. * Error reported in each wrapper can be retrieved using the API
  1651. * si_get_axi_errlog_info()
  1652. */
  1653. uint32
  1654. ai_clear_backplane_to(si_t *sih)
  1655. {
  1656. uint32 ret = 0;
  1657. #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
  1658. si_info_t *sii = SI_INFO(sih);
  1659. aidmp_t *ai;
  1660. uint32 i;
  1661. axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
  1662. #ifdef BCM_BACKPLANE_TIMEOUT
  1663. uint32 prev_value = 0;
  1664. osl_t *osh = sii->osh;
  1665. uint32 cfg_reg = 0;
  1666. uint32 offset = 0;
  1667. if ((sii->axi_num_wrappers == 0) || (!PCIE(sii)))
  1668. #else
  1669. if (sii->axi_num_wrappers == 0)
  1670. #endif // endif
  1671. {
  1672. SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
  1673. __FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
  1674. BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
  1675. return AXI_WRAP_STS_NONE;
  1676. }
  1677. #ifdef BCM_BACKPLANE_TIMEOUT
  1678. /* Save and restore wrapper access window */
  1679. if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
  1680. if (PCIE_GEN1(sii)) {
  1681. cfg_reg = PCI_BAR0_WIN2;
  1682. offset = PCI_BAR0_WIN2_OFFSET;
  1683. } else if (PCIE_GEN2(sii)) {
  1684. cfg_reg = PCIE2_BAR0_CORE2_WIN2;
  1685. offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
  1686. }
  1687. else {
  1688. ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
  1689. }
  1690. prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
  1691. if (prev_value == ID32_INVALID) {
  1692. si_axi_error_t * axi_error =
  1693. sih->err_info ?
  1694. &sih->err_info->axi_error[sih->err_info->count] :
  1695. NULL;
  1696. SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
  1697. if (axi_error) {
  1698. axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR;
  1699. axi_error->errlog_lo = cfg_reg;
  1700. sih->err_info->count++;
  1701. if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
  1702. sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
  1703. SI_PRINT(("AXI Error log overflow\n"));
  1704. }
  1705. }
  1706. return ret;
  1707. }
  1708. }
  1709. #endif /* BCM_BACKPLANE_TIMEOUT */
  1710. for (i = 0; i < sii->axi_num_wrappers; ++i) {
  1711. uint32 tmp;
  1712. if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
  1713. continue;
  1714. }
  1715. #ifdef BCM_BACKPLANE_TIMEOUT
  1716. if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
  1717. /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
  1718. OSL_PCI_WRITE_CONFIG(osh,
  1719. cfg_reg, 4, axi_wrapper[i].wrapper_addr);
  1720. /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
  1721. ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
  1722. }
  1723. else
  1724. #endif /* BCM_BACKPLANE_TIMEOUT */
  1725. {
  1726. ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
  1727. }
  1728. tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0,
  1729. DISCARD_QUAL(ai, void));
  1730. ret |= tmp;
  1731. }
  1732. #ifdef BCM_BACKPLANE_TIMEOUT
  1733. /* Restore the initial wrapper space */
  1734. if (prev_value) {
  1735. OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
  1736. }
  1737. #endif /* BCM_BACKPLANE_TIMEOUT */
  1738. #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
  1739. return ret;
  1740. }
  1741. uint
  1742. ai_num_slaveports(si_t *sih, uint coreidx)
  1743. {
  1744. si_info_t *sii = SI_INFO(sih);
  1745. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  1746. uint32 cib;
  1747. cib = cores_info->cib[coreidx];
  1748. return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT);
  1749. }
  1750. #ifdef UART_TRAP_DBG
  1751. void
  1752. ai_dump_APB_Bridge_registers(si_t *sih)
  1753. {
  1754. aidmp_t *ai;
  1755. si_info_t *sii = SI_INFO(sih);
  1756. ai = (aidmp_t *) sii->br_wrapba[0];
  1757. printf("APB Bridge 0\n");
  1758. printf("lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x",
  1759. R_REG(sii->osh, &ai->errlogaddrlo),
  1760. R_REG(sii->osh, &ai->errlogaddrhi),
  1761. R_REG(sii->osh, &ai->errlogid),
  1762. R_REG(sii->osh, &ai->errlogflags));
  1763. printf("\n status 0x%08x\n", R_REG(sii->osh, &ai->errlogstatus));
  1764. }
  1765. #endif /* UART_TRAP_DBG */
  1766. void
  1767. ai_force_clocks(si_t *sih, uint clock_state)
  1768. {
  1769. si_info_t *sii = SI_INFO(sih);
  1770. aidmp_t *ai, *ai_sec = NULL;
  1771. volatile uint32 dummy;
  1772. uint32 ioctrl;
  1773. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  1774. ASSERT(GOODREGS(sii->curwrap));
  1775. ai = sii->curwrap;
  1776. if (cores_info->wrapba2[sii->curidx])
  1777. ai_sec = REG_MAP(cores_info->wrapba2[sii->curidx], SI_CORE_SIZE);
  1778. /* ensure there are no pending backplane operations */
  1779. SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
  1780. if (clock_state == FORCE_CLK_ON) {
  1781. ioctrl = R_REG(sii->osh, &ai->ioctrl);
  1782. W_REG(sii->osh, &ai->ioctrl, (ioctrl | SICF_FGC));
  1783. dummy = R_REG(sii->osh, &ai->ioctrl);
  1784. BCM_REFERENCE(dummy);
  1785. if (ai_sec) {
  1786. ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
  1787. W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl | SICF_FGC));
  1788. dummy = R_REG(sii->osh, &ai_sec->ioctrl);
  1789. BCM_REFERENCE(dummy);
  1790. }
  1791. } else {
  1792. ioctrl = R_REG(sii->osh, &ai->ioctrl);
  1793. W_REG(sii->osh, &ai->ioctrl, (ioctrl & (~SICF_FGC)));
  1794. dummy = R_REG(sii->osh, &ai->ioctrl);
  1795. BCM_REFERENCE(dummy);
  1796. if (ai_sec) {
  1797. ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
  1798. W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl & (~SICF_FGC)));
  1799. dummy = R_REG(sii->osh, &ai_sec->ioctrl);
  1800. BCM_REFERENCE(dummy);
  1801. }
  1802. }
  1803. /* ensure there are no pending backplane operations */
  1804. SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
  1805. }