sbutils.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095
  1. /*
  2. * Misc utility routines for accessing chip-specific features
  3. * of the SiliconBackplane-based Broadcom chips.
  4. *
  5. * Portions of this code are copyright (c) 2020 Cypress Semiconductor Corporation
  6. *
  7. * Copyright (C) 1999-2020, Broadcom Corporation
  8. *
  9. * Unless you and Broadcom execute a separate written software license
  10. * agreement governing use of this software, this software is licensed to you
  11. * under the terms of the GNU General Public License version 2 (the "GPL"),
  12. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  13. * following added to such license:
  14. *
  15. * As a special exception, the copyright holders of this software give you
  16. * permission to link this software with independent modules, and to copy and
  17. * distribute the resulting executable under terms of your choice, provided that
  18. * you also meet, for each linked independent module, the terms and conditions of
  19. * the license of that module. An independent module is a module which is not
  20. * derived from this software. The special exception does not apply to any
  21. * modifications of the software.
  22. *
  23. * Notwithstanding the above, under no circumstances may you combine this
  24. * software in any way with any other Broadcom software provided under a license
  25. * other than the GPL, without Broadcom's express prior written consent.
  26. *
  27. *
  28. * <<Broadcom-WL-IPTag/Open:>>
  29. *
  30. * $Id: sbutils.c 700323 2017-05-18 16:12:11Z $
  31. */
  32. #include <bcm_cfg.h>
  33. #include <typedefs.h>
  34. #include <bcmdefs.h>
  35. #include <osl.h>
  36. #include <bcmutils.h>
  37. #include <siutils.h>
  38. #include <bcmdevs.h>
  39. #include <hndsoc.h>
  40. #include <sbchipc.h>
  41. #include <pcicfg.h>
  42. #include <sbpcmcia.h>
  43. #include "siutils_priv.h"
  44. /* local prototypes */
  45. static uint _sb_coreidx(si_info_t *sii, uint32 sba);
  46. static uint _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus, uint32 sbba,
  47. uint ncores, uint devid);
  48. static uint32 _sb_coresba(si_info_t *sii);
  49. static volatile void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
  50. #define SET_SBREG(sii, r, mask, val) \
  51. W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
  52. #define REGS2SB(va) (sbconfig_t*) ((volatile int8*)(va) + SBCONFIGOFF)
  53. /* sonicsrev */
  54. #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
  55. #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
  56. #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
  57. #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
  58. #define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
  59. #define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
  60. static uint32
  61. sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
  62. {
  63. uint8 tmp;
  64. uint32 val, intr_val = 0;
  65. /*
  66. * compact flash only has 11 bits address, while we needs 12 bits address.
  67. * MEM_SEG will be OR'd with other 11 bits address in hardware,
  68. * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
  69. * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
  70. */
  71. if (PCMCIA(sii)) {
  72. INTR_OFF(sii, intr_val);
  73. tmp = 1;
  74. OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
  75. sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
  76. }
  77. val = R_REG(sii->osh, sbr);
  78. if (PCMCIA(sii)) {
  79. tmp = 0;
  80. OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
  81. INTR_RESTORE(sii, intr_val);
  82. }
  83. return (val);
  84. }
  85. static void
  86. sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
  87. {
  88. uint8 tmp;
  89. volatile uint32 dummy;
  90. uint32 intr_val = 0;
  91. /*
  92. * compact flash only has 11 bits address, while we needs 12 bits address.
  93. * MEM_SEG will be OR'd with other 11 bits address in hardware,
  94. * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
  95. * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
  96. */
  97. if (PCMCIA(sii)) {
  98. INTR_OFF(sii, intr_val);
  99. tmp = 1;
  100. OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
  101. sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
  102. }
  103. if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
  104. dummy = R_REG(sii->osh, sbr);
  105. BCM_REFERENCE(dummy);
  106. W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
  107. dummy = R_REG(sii->osh, sbr);
  108. BCM_REFERENCE(dummy);
  109. W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
  110. } else
  111. W_REG(sii->osh, sbr, v);
  112. if (PCMCIA(sii)) {
  113. tmp = 0;
  114. OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
  115. INTR_RESTORE(sii, intr_val);
  116. }
  117. }
  118. uint
  119. sb_coreid(si_t *sih)
  120. {
  121. si_info_t *sii;
  122. sbconfig_t *sb;
  123. sii = SI_INFO(sih);
  124. sb = REGS2SB(sii->curmap);
  125. return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
  126. }
  127. uint
  128. sb_intflag(si_t *sih)
  129. {
  130. si_info_t *sii = SI_INFO(sih);
  131. volatile void *corereg;
  132. sbconfig_t *sb;
  133. uint origidx, intflag, intr_val = 0;
  134. INTR_OFF(sii, intr_val);
  135. origidx = si_coreidx(sih);
  136. corereg = si_setcore(sih, CC_CORE_ID, 0);
  137. ASSERT(corereg != NULL);
  138. sb = REGS2SB(corereg);
  139. intflag = R_SBREG(sii, &sb->sbflagst);
  140. sb_setcoreidx(sih, origidx);
  141. INTR_RESTORE(sii, intr_val);
  142. return intflag;
  143. }
  144. uint
  145. sb_flag(si_t *sih)
  146. {
  147. si_info_t *sii;
  148. sbconfig_t *sb;
  149. sii = SI_INFO(sih);
  150. sb = REGS2SB(sii->curmap);
  151. return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
  152. }
  153. void
  154. sb_setint(si_t *sih, int siflag)
  155. {
  156. si_info_t *sii;
  157. sbconfig_t *sb;
  158. uint32 vec;
  159. sii = SI_INFO(sih);
  160. sb = REGS2SB(sii->curmap);
  161. if (siflag == -1)
  162. vec = 0;
  163. else
  164. vec = 1 << siflag;
  165. W_SBREG(sii, &sb->sbintvec, vec);
  166. }
  167. /* return core index of the core with address 'sba' */
  168. static uint
  169. _sb_coreidx(si_info_t *sii, uint32 sba)
  170. {
  171. uint i;
  172. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  173. for (i = 0; i < sii->numcores; i ++)
  174. if (sba == cores_info->coresba[i])
  175. return i;
  176. return BADIDX;
  177. }
  178. /* return core address of the current core */
  179. static uint32
  180. _sb_coresba(si_info_t *sii)
  181. {
  182. uint32 sbaddr;
  183. switch (BUSTYPE(sii->pub.bustype)) {
  184. case SI_BUS: {
  185. sbconfig_t *sb = REGS2SB(sii->curmap);
  186. sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
  187. break;
  188. }
  189. case PCI_BUS:
  190. sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
  191. break;
  192. case PCMCIA_BUS: {
  193. uint8 tmp = 0;
  194. OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
  195. sbaddr = (uint32)tmp << 12;
  196. OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
  197. sbaddr |= (uint32)tmp << 16;
  198. OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
  199. sbaddr |= (uint32)tmp << 24;
  200. break;
  201. }
  202. #ifdef BCMSDIO
  203. case SPI_BUS:
  204. case SDIO_BUS:
  205. sbaddr = (uint32)(uintptr)sii->curmap;
  206. break;
  207. #endif // endif
  208. default:
  209. sbaddr = BADCOREADDR;
  210. break;
  211. }
  212. return sbaddr;
  213. }
  214. uint
  215. sb_corevendor(si_t *sih)
  216. {
  217. si_info_t *sii;
  218. sbconfig_t *sb;
  219. sii = SI_INFO(sih);
  220. sb = REGS2SB(sii->curmap);
  221. return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
  222. }
  223. uint
  224. sb_corerev(si_t *sih)
  225. {
  226. si_info_t *sii;
  227. sbconfig_t *sb;
  228. uint sbidh;
  229. sii = SI_INFO(sih);
  230. sb = REGS2SB(sii->curmap);
  231. sbidh = R_SBREG(sii, &sb->sbidhigh);
  232. return (SBCOREREV(sbidh));
  233. }
  234. /* set core-specific control flags */
  235. void
  236. sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
  237. {
  238. si_info_t *sii;
  239. sbconfig_t *sb;
  240. uint32 w;
  241. sii = SI_INFO(sih);
  242. sb = REGS2SB(sii->curmap);
  243. ASSERT((val & ~mask) == 0);
  244. /* mask and set */
  245. w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
  246. (val << SBTML_SICF_SHIFT);
  247. W_SBREG(sii, &sb->sbtmstatelow, w);
  248. }
  249. /* set/clear core-specific control flags */
  250. uint32
  251. sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
  252. {
  253. si_info_t *sii;
  254. sbconfig_t *sb;
  255. uint32 w;
  256. sii = SI_INFO(sih);
  257. sb = REGS2SB(sii->curmap);
  258. ASSERT((val & ~mask) == 0);
  259. /* mask and set */
  260. if (mask || val) {
  261. w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
  262. (val << SBTML_SICF_SHIFT);
  263. W_SBREG(sii, &sb->sbtmstatelow, w);
  264. }
  265. /* return the new value
  266. * for write operation, the following readback ensures the completion of write opration.
  267. */
  268. return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
  269. }
  270. /* set/clear core-specific status flags */
  271. uint32
  272. sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
  273. {
  274. si_info_t *sii;
  275. sbconfig_t *sb;
  276. uint32 w;
  277. sii = SI_INFO(sih);
  278. sb = REGS2SB(sii->curmap);
  279. ASSERT((val & ~mask) == 0);
  280. ASSERT((mask & ~SISF_CORE_BITS) == 0);
  281. /* mask and set */
  282. if (mask || val) {
  283. w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
  284. (val << SBTMH_SISF_SHIFT);
  285. W_SBREG(sii, &sb->sbtmstatehigh, w);
  286. }
  287. /* return the new value */
  288. return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
  289. }
  290. bool
  291. sb_iscoreup(si_t *sih)
  292. {
  293. si_info_t *sii;
  294. sbconfig_t *sb;
  295. sii = SI_INFO(sih);
  296. sb = REGS2SB(sii->curmap);
  297. return ((R_SBREG(sii, &sb->sbtmstatelow) &
  298. (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
  299. (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
  300. }
  301. /*
  302. * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
  303. * switch back to the original core, and return the new value.
  304. *
  305. * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
  306. *
  307. * Also, when using pci/pcie, we can optimize away the core switching for pci registers
  308. * and (on newer pci cores) chipcommon registers.
  309. */
  310. uint
  311. sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
  312. {
  313. uint origidx = 0;
  314. volatile uint32 *r = NULL;
  315. uint w;
  316. uint intr_val = 0;
  317. bool fast = FALSE;
  318. si_info_t *sii = SI_INFO(sih);
  319. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  320. ASSERT(GOODIDX(coreidx));
  321. ASSERT(regoff < SI_CORE_SIZE);
  322. ASSERT((val & ~mask) == 0);
  323. if (coreidx >= SI_MAXCORES)
  324. return 0;
  325. if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
  326. /* If internal bus, we can always get at everything */
  327. fast = TRUE;
  328. /* map if does not exist */
  329. if (!cores_info->regs[coreidx]) {
  330. cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
  331. SI_CORE_SIZE);
  332. ASSERT(GOODREGS(cores_info->regs[coreidx]));
  333. }
  334. r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
  335. } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
  336. /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
  337. if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
  338. /* Chipc registers are mapped at 12KB */
  339. fast = TRUE;
  340. r = (volatile uint32 *)((volatile char *)sii->curmap +
  341. PCI_16KB0_CCREGS_OFFSET + regoff);
  342. } else if (sii->pub.buscoreidx == coreidx) {
  343. /* pci registers are at either in the last 2KB of an 8KB window
  344. * or, in pcie and pci rev 13 at 8KB
  345. */
  346. fast = TRUE;
  347. if (SI_FAST(sii))
  348. r = (volatile uint32 *)((volatile char *)sii->curmap +
  349. PCI_16KB0_PCIREGS_OFFSET + regoff);
  350. else
  351. r = (volatile uint32 *)((volatile char *)sii->curmap +
  352. ((regoff >= SBCONFIGOFF) ?
  353. PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
  354. regoff);
  355. }
  356. }
  357. if (!fast) {
  358. INTR_OFF(sii, intr_val);
  359. /* save current core index */
  360. origidx = si_coreidx(&sii->pub);
  361. /* switch core */
  362. r = (volatile uint32*) ((volatile uchar*)sb_setcoreidx(&sii->pub, coreidx) +
  363. regoff);
  364. }
  365. ASSERT(r != NULL);
  366. /* mask and set */
  367. if (mask || val) {
  368. if (regoff >= SBCONFIGOFF) {
  369. w = (R_SBREG(sii, r) & ~mask) | val;
  370. W_SBREG(sii, r, w);
  371. } else {
  372. w = (R_REG(sii->osh, r) & ~mask) | val;
  373. W_REG(sii->osh, r, w);
  374. }
  375. }
  376. /* readback */
  377. if (regoff >= SBCONFIGOFF)
  378. w = R_SBREG(sii, r);
  379. else {
  380. w = R_REG(sii->osh, r);
  381. }
  382. if (!fast) {
  383. /* restore core index */
  384. if (origidx != coreidx)
  385. sb_setcoreidx(&sii->pub, origidx);
  386. INTR_RESTORE(sii, intr_val);
  387. }
  388. return (w);
  389. }
  390. /*
  391. * If there is no need for fiddling with interrupts or core switches (typically silicon
  392. * back plane registers, pci registers and chipcommon registers), this function
  393. * returns the register offset on this core to a mapped address. This address can
  394. * be used for W_REG/R_REG directly.
  395. *
  396. * For accessing registers that would need a core switch, this function will return
  397. * NULL.
  398. */
  399. volatile uint32 *
  400. sb_corereg_addr(si_t *sih, uint coreidx, uint regoff)
  401. {
  402. volatile uint32 *r = NULL;
  403. bool fast = FALSE;
  404. si_info_t *sii = SI_INFO(sih);
  405. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  406. ASSERT(GOODIDX(coreidx));
  407. ASSERT(regoff < SI_CORE_SIZE);
  408. if (coreidx >= SI_MAXCORES)
  409. return 0;
  410. if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
  411. /* If internal bus, we can always get at everything */
  412. fast = TRUE;
  413. /* map if does not exist */
  414. if (!cores_info->regs[coreidx]) {
  415. cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
  416. SI_CORE_SIZE);
  417. ASSERT(GOODREGS(cores_info->regs[coreidx]));
  418. }
  419. r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
  420. } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
  421. /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
  422. if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
  423. /* Chipc registers are mapped at 12KB */
  424. fast = TRUE;
  425. r = (volatile uint32 *)((volatile char *)sii->curmap +
  426. PCI_16KB0_CCREGS_OFFSET + regoff);
  427. } else if (sii->pub.buscoreidx == coreidx) {
  428. /* pci registers are at either in the last 2KB of an 8KB window
  429. * or, in pcie and pci rev 13 at 8KB
  430. */
  431. fast = TRUE;
  432. if (SI_FAST(sii))
  433. r = (volatile uint32 *)((volatile char *)sii->curmap +
  434. PCI_16KB0_PCIREGS_OFFSET + regoff);
  435. else
  436. r = (volatile uint32 *)((volatile char *)sii->curmap +
  437. ((regoff >= SBCONFIGOFF) ?
  438. PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
  439. regoff);
  440. }
  441. }
  442. if (!fast)
  443. return 0;
  444. return (r);
  445. }
  446. /* Scan the enumeration space to find all cores starting from the given
  447. * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
  448. * is the default core address at chip POR time and 'regs' is the virtual
  449. * address that the default core is mapped at. 'ncores' is the number of
  450. * cores expected on bus 'sbba'. It returns the total number of cores
  451. * starting from bus 'sbba', inclusive.
  452. */
  453. #define SB_MAXBUSES 2
  454. static uint
  455. _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus,
  456. uint32 sbba, uint numcores, uint devid)
  457. {
  458. uint next;
  459. uint ncc = 0;
  460. uint i;
  461. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  462. if (bus >= SB_MAXBUSES) {
  463. SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
  464. return 0;
  465. }
  466. SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
  467. /* Scan all cores on the bus starting from core 0.
  468. * Core addresses must be contiguous on each bus.
  469. */
  470. for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
  471. cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
  472. /* keep and reuse the initial register mapping */
  473. if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) {
  474. SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
  475. cores_info->regs[next] = regs;
  476. }
  477. /* change core to 'next' and read its coreid */
  478. sii->curmap = _sb_setcoreidx(sii, next);
  479. sii->curidx = next;
  480. cores_info->coreid[next] = sb_coreid(&sii->pub);
  481. /* core specific processing... */
  482. /* chipc provides # cores */
  483. if (cores_info->coreid[next] == CC_CORE_ID) {
  484. chipcregs_t *cc = (chipcregs_t *)sii->curmap;
  485. uint32 ccrev = sb_corerev(&sii->pub);
  486. /* determine numcores - this is the total # cores in the chip */
  487. if (((ccrev == 4) || (ccrev >= 6))) {
  488. ASSERT(cc);
  489. numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
  490. CID_CC_SHIFT;
  491. } else {
  492. /* Older chips */
  493. uint chip = CHIPID(sii->pub.chip);
  494. if (chip == BCM4704_CHIP_ID)
  495. numcores = 9;
  496. else if (chip == BCM5365_CHIP_ID)
  497. numcores = 7;
  498. else {
  499. SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
  500. chip));
  501. ASSERT(0);
  502. numcores = 1;
  503. }
  504. }
  505. SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
  506. sii->pub.issim ? "QT" : ""));
  507. }
  508. /* scan bridged SB(s) and add results to the end of the list */
  509. else if (cores_info->coreid[next] == OCP_CORE_ID) {
  510. sbconfig_t *sb = REGS2SB(sii->curmap);
  511. uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
  512. uint nsbcc;
  513. sii->numcores = next + 1;
  514. if ((nsbba & 0xfff00000) != si_enum_base(devid))
  515. continue;
  516. nsbba &= 0xfffff000;
  517. if (_sb_coreidx(sii, nsbba) != BADIDX)
  518. continue;
  519. nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
  520. nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc, devid);
  521. if (sbba == si_enum_base(devid))
  522. numcores -= nsbcc;
  523. ncc += nsbcc;
  524. }
  525. }
  526. SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
  527. sii->numcores = i + ncc;
  528. return sii->numcores;
  529. }
  530. /* scan the sb enumerated space to identify all cores */
  531. void
  532. sb_scan(si_t *sih, volatile void *regs, uint devid)
  533. {
  534. uint32 origsba;
  535. sbconfig_t *sb;
  536. si_info_t *sii = SI_INFO(sih);
  537. BCM_REFERENCE(devid);
  538. sb = REGS2SB(sii->curmap);
  539. sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
  540. /* Save the current core info and validate it later till we know
  541. * for sure what is good and what is bad.
  542. */
  543. origsba = _sb_coresba(sii);
  544. /* scan all SB(s) starting from SI_ENUM_BASE_DEFAULT */
  545. sii->numcores = _sb_scan(sii, origsba, regs, 0, si_enum_base(devid), 1, devid);
  546. }
  547. /*
  548. * This function changes logical "focus" to the indicated core;
  549. * must be called with interrupts off.
  550. * Moreover, callers should keep interrupts off during switching out of and back to d11 core
  551. */
  552. volatile void *
  553. sb_setcoreidx(si_t *sih, uint coreidx)
  554. {
  555. si_info_t *sii = SI_INFO(sih);
  556. if (coreidx >= sii->numcores)
  557. return (NULL);
  558. /*
  559. * If the user has provided an interrupt mask enabled function,
  560. * then assert interrupts are disabled before switching the core.
  561. */
  562. ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
  563. sii->curmap = _sb_setcoreidx(sii, coreidx);
  564. sii->curidx = coreidx;
  565. return (sii->curmap);
  566. }
  567. /* This function changes the logical "focus" to the indicated core.
  568. * Return the current core's virtual address.
  569. */
  570. static volatile void *
  571. _sb_setcoreidx(si_info_t *sii, uint coreidx)
  572. {
  573. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  574. uint32 sbaddr = cores_info->coresba[coreidx];
  575. volatile void *regs;
  576. switch (BUSTYPE(sii->pub.bustype)) {
  577. case SI_BUS:
  578. /* map new one */
  579. if (!cores_info->regs[coreidx]) {
  580. cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
  581. ASSERT(GOODREGS(cores_info->regs[coreidx]));
  582. }
  583. regs = cores_info->regs[coreidx];
  584. break;
  585. case PCI_BUS:
  586. /* point bar0 window */
  587. OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
  588. regs = sii->curmap;
  589. break;
  590. case PCMCIA_BUS: {
  591. uint8 tmp = (sbaddr >> 12) & 0x0f;
  592. OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
  593. tmp = (sbaddr >> 16) & 0xff;
  594. OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
  595. tmp = (sbaddr >> 24) & 0xff;
  596. OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
  597. regs = sii->curmap;
  598. break;
  599. }
  600. #ifdef BCMSDIO
  601. case SPI_BUS:
  602. case SDIO_BUS:
  603. /* map new one */
  604. if (!cores_info->regs[coreidx]) {
  605. cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
  606. ASSERT(GOODREGS(cores_info->regs[coreidx]));
  607. }
  608. regs = cores_info->regs[coreidx];
  609. break;
  610. #endif /* BCMSDIO */
  611. default:
  612. ASSERT(0);
  613. regs = NULL;
  614. break;
  615. }
  616. return regs;
  617. }
  618. /* Return the address of sbadmatch0/1/2/3 register */
  619. static volatile uint32 *
  620. sb_admatch(si_info_t *sii, uint asidx)
  621. {
  622. sbconfig_t *sb;
  623. volatile uint32 *addrm;
  624. sb = REGS2SB(sii->curmap);
  625. switch (asidx) {
  626. case 0:
  627. addrm = &sb->sbadmatch0;
  628. break;
  629. case 1:
  630. addrm = &sb->sbadmatch1;
  631. break;
  632. case 2:
  633. addrm = &sb->sbadmatch2;
  634. break;
  635. case 3:
  636. addrm = &sb->sbadmatch3;
  637. break;
  638. default:
  639. SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
  640. return 0;
  641. }
  642. return (addrm);
  643. }
  644. /* Return the number of address spaces in current core */
  645. int
  646. sb_numaddrspaces(si_t *sih)
  647. {
  648. si_info_t *sii;
  649. sbconfig_t *sb;
  650. sii = SI_INFO(sih);
  651. sb = REGS2SB(sii->curmap);
  652. /* + 1 because of enumeration space */
  653. return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
  654. }
  655. /* Return the address of the nth address space in the current core */
  656. uint32
  657. sb_addrspace(si_t *sih, uint asidx)
  658. {
  659. si_info_t *sii;
  660. sii = SI_INFO(sih);
  661. return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
  662. }
  663. /* Return the size of the nth address space in the current core */
  664. uint32
  665. sb_addrspacesize(si_t *sih, uint asidx)
  666. {
  667. si_info_t *sii;
  668. sii = SI_INFO(sih);
  669. return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
  670. }
  671. /* do buffered registers update */
  672. void
  673. sb_commit(si_t *sih)
  674. {
  675. si_info_t *sii = SI_INFO(sih);
  676. uint origidx;
  677. uint intr_val = 0;
  678. origidx = sii->curidx;
  679. ASSERT(GOODIDX(origidx));
  680. INTR_OFF(sii, intr_val);
  681. /* switch over to chipcommon core if there is one, else use pci */
  682. if (sii->pub.ccrev != NOREV) {
  683. chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
  684. ASSERT(ccregs != NULL);
  685. /* do the buffer registers update */
  686. W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
  687. W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
  688. } else
  689. ASSERT(0);
  690. /* restore core index */
  691. sb_setcoreidx(sih, origidx);
  692. INTR_RESTORE(sii, intr_val);
  693. }
  694. void
  695. sb_core_disable(si_t *sih, uint32 bits)
  696. {
  697. si_info_t *sii;
  698. volatile uint32 dummy;
  699. sbconfig_t *sb;
  700. sii = SI_INFO(sih);
  701. ASSERT(GOODREGS(sii->curmap));
  702. sb = REGS2SB(sii->curmap);
  703. /* if core is already in reset, just return */
  704. if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
  705. return;
  706. /* if clocks are not enabled, put into reset and return */
  707. if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
  708. goto disable;
  709. /* set target reject and spin until busy is clear (preserve core-specific bits) */
  710. OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
  711. dummy = R_SBREG(sii, &sb->sbtmstatelow);
  712. BCM_REFERENCE(dummy);
  713. OSL_DELAY(1);
  714. SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
  715. if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
  716. SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
  717. if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
  718. OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
  719. dummy = R_SBREG(sii, &sb->sbimstate);
  720. BCM_REFERENCE(dummy);
  721. OSL_DELAY(1);
  722. SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
  723. }
  724. /* set reset and reject while enabling the clocks */
  725. W_SBREG(sii, &sb->sbtmstatelow,
  726. (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
  727. SBTML_REJ | SBTML_RESET));
  728. dummy = R_SBREG(sii, &sb->sbtmstatelow);
  729. BCM_REFERENCE(dummy);
  730. OSL_DELAY(10);
  731. /* don't forget to clear the initiator reject bit */
  732. if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
  733. AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
  734. disable:
  735. /* leave reset and reject asserted */
  736. W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
  737. OSL_DELAY(1);
  738. }
  739. /* reset and re-enable a core
  740. * inputs:
  741. * bits - core specific bits that are set during and after reset sequence
  742. * resetbits - core specific bits that are set only during reset sequence
  743. */
  744. void
  745. sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
  746. {
  747. si_info_t *sii;
  748. sbconfig_t *sb;
  749. volatile uint32 dummy;
  750. sii = SI_INFO(sih);
  751. ASSERT(GOODREGS(sii->curmap));
  752. sb = REGS2SB(sii->curmap);
  753. /*
  754. * Must do the disable sequence first to work for arbitrary current core state.
  755. */
  756. sb_core_disable(sih, (bits | resetbits));
  757. /*
  758. * Now do the initialization sequence.
  759. */
  760. /* set reset while enabling the clock and forcing them on throughout the core */
  761. W_SBREG(sii, &sb->sbtmstatelow,
  762. (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
  763. SBTML_RESET));
  764. dummy = R_SBREG(sii, &sb->sbtmstatelow);
  765. BCM_REFERENCE(dummy);
  766. OSL_DELAY(1);
  767. if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
  768. W_SBREG(sii, &sb->sbtmstatehigh, 0);
  769. }
  770. if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
  771. AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
  772. }
  773. /* clear reset and allow it to propagate throughout the core */
  774. W_SBREG(sii, &sb->sbtmstatelow,
  775. ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
  776. dummy = R_SBREG(sii, &sb->sbtmstatelow);
  777. BCM_REFERENCE(dummy);
  778. OSL_DELAY(1);
  779. /* leave clock enabled */
  780. W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
  781. dummy = R_SBREG(sii, &sb->sbtmstatelow);
  782. BCM_REFERENCE(dummy);
  783. OSL_DELAY(1);
  784. }
  785. /*
  786. * Set the initiator timeout for the "master core".
  787. * The master core is defined to be the core in control
  788. * of the chip and so it issues accesses to non-memory
  789. * locations (Because of dma *any* core can access memeory).
  790. *
  791. * The routine uses the bus to decide who is the master:
  792. * SI_BUS => mips
  793. * JTAG_BUS => chipc
  794. * PCI_BUS => pci or pcie
  795. * PCMCIA_BUS => pcmcia
  796. * SDIO_BUS => pcmcia
  797. *
  798. * This routine exists so callers can disable initiator
  799. * timeouts so accesses to very slow devices like otp
  800. * won't cause an abort. The routine allows arbitrary
  801. * settings of the service and request timeouts, though.
  802. *
  803. * Returns the timeout state before changing it or -1
  804. * on error.
  805. */
  806. #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
  807. uint32
  808. sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
  809. {
  810. si_info_t *sii = SI_INFO(sih);
  811. uint origidx;
  812. uint intr_val = 0;
  813. uint32 tmp, ret = 0xffffffff;
  814. sbconfig_t *sb;
  815. if ((to & ~TO_MASK) != 0)
  816. return ret;
  817. /* Figure out the master core */
  818. if (idx == BADIDX) {
  819. switch (BUSTYPE(sii->pub.bustype)) {
  820. case PCI_BUS:
  821. idx = sii->pub.buscoreidx;
  822. break;
  823. case JTAG_BUS:
  824. idx = SI_CC_IDX;
  825. break;
  826. case PCMCIA_BUS:
  827. #ifdef BCMSDIO
  828. case SDIO_BUS:
  829. #endif // endif
  830. idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
  831. break;
  832. case SI_BUS:
  833. idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
  834. break;
  835. default:
  836. ASSERT(0);
  837. }
  838. if (idx == BADIDX)
  839. return ret;
  840. }
  841. INTR_OFF(sii, intr_val);
  842. origidx = si_coreidx(sih);
  843. sb = REGS2SB(sb_setcoreidx(sih, idx));
  844. tmp = R_SBREG(sii, &sb->sbimconfiglow);
  845. ret = tmp & TO_MASK;
  846. W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
  847. sb_commit(sih);
  848. sb_setcoreidx(sih, origidx);
  849. INTR_RESTORE(sii, intr_val);
  850. return ret;
  851. }
  852. uint32
  853. sb_base(uint32 admatch)
  854. {
  855. uint32 base;
  856. uint type;
  857. type = admatch & SBAM_TYPE_MASK;
  858. ASSERT(type < 3);
  859. base = 0;
  860. if (type == 0) {
  861. base = admatch & SBAM_BASE0_MASK;
  862. } else if (type == 1) {
  863. ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
  864. base = admatch & SBAM_BASE1_MASK;
  865. } else if (type == 2) {
  866. ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
  867. base = admatch & SBAM_BASE2_MASK;
  868. }
  869. return (base);
  870. }
  871. uint32
  872. sb_size(uint32 admatch)
  873. {
  874. uint32 size;
  875. uint type;
  876. type = admatch & SBAM_TYPE_MASK;
  877. ASSERT(type < 3);
  878. size = 0;
  879. if (type == 0) {
  880. size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
  881. } else if (type == 1) {
  882. ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
  883. size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
  884. } else if (type == 2) {
  885. ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
  886. size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
  887. }
  888. return (size);
  889. }
  890. #if defined(BCMDBG_PHYDUMP)
  891. /* print interesting sbconfig registers */
  892. void
  893. sb_dumpregs(si_t *sih, struct bcmstrbuf *b)
  894. {
  895. sbconfig_t *sb;
  896. uint origidx, i, intr_val = 0;
  897. si_info_t *sii = SI_INFO(sih);
  898. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  899. origidx = sii->curidx;
  900. INTR_OFF(sii, intr_val);
  901. for (i = 0; i < sii->numcores; i++) {
  902. sb = REGS2SB(sb_setcoreidx(sih, i));
  903. bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
  904. if (sii->pub.socirev > SONICS_2_2)
  905. bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
  906. sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
  907. sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0));
  908. bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
  909. "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
  910. R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh),
  911. R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate),
  912. R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh));
  913. }
  914. sb_setcoreidx(sih, origidx);
  915. INTR_RESTORE(sii, intr_val);
  916. }
  917. #endif // endif