cache.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
  4. */
  5. #include <config.h>
  6. #include <common.h>
  7. #include <cpu_func.h>
  8. #include <asm/global_data.h>
  9. #include <linux/bitops.h>
  10. #include <linux/compiler.h>
  11. #include <linux/kernel.h>
  12. #include <linux/log2.h>
  13. #include <lmb.h>
  14. #include <asm/arcregs.h>
  15. #include <asm/arc-bcr.h>
  16. #include <asm/cache.h>
  17. /*
  18. * [ NOTE 1 ]:
  19. * Data cache (L1 D$ or SL$) entire invalidate operation or data cache disable
  20. * operation may result in unexpected behavior and data loss even if we flush
  21. * data cache right before invalidation. That may happens if we store any context
  22. * on stack (like we store BLINK register on stack before function call).
  23. * BLINK register is the register where return address is automatically saved
  24. * when we do function call with instructions like 'bl'.
  25. *
  26. * There is the real example:
  27. * We may hang in the next code as we store any BLINK register on stack in
  28. * invalidate_dcache_all() function.
  29. *
  30. * void flush_dcache_all() {
  31. * __dc_entire_op(OP_FLUSH);
  32. * // Other code //
  33. * }
  34. *
  35. * void invalidate_dcache_all() {
  36. * __dc_entire_op(OP_INV);
  37. * // Other code //
  38. * }
  39. *
  40. * void foo(void) {
  41. * flush_dcache_all();
  42. * invalidate_dcache_all();
  43. * }
  44. *
  45. * Now let's see what really happens during that code execution:
  46. *
  47. * foo()
  48. * |->> call flush_dcache_all
  49. * [return address is saved to BLINK register]
  50. * [push BLINK] (save to stack) ![point 1]
  51. * |->> call __dc_entire_op(OP_FLUSH)
  52. * [return address is saved to BLINK register]
  53. * [flush L1 D$]
  54. * return [jump to BLINK]
  55. * <<------
  56. * [other flush_dcache_all code]
  57. * [pop BLINK] (get from stack)
  58. * return [jump to BLINK]
  59. * <<------
  60. * |->> call invalidate_dcache_all
  61. * [return address is saved to BLINK register]
  62. * [push BLINK] (save to stack) ![point 2]
  63. * |->> call __dc_entire_op(OP_FLUSH)
  64. * [return address is saved to BLINK register]
  65. * [invalidate L1 D$] ![point 3]
  66. * // Oops!!!
  67. * // We lose return address from invalidate_dcache_all function:
  68. * // we save it to stack and invalidate L1 D$ after that!
  69. * return [jump to BLINK]
  70. * <<------
  71. * [other invalidate_dcache_all code]
  72. * [pop BLINK] (get from stack)
  73. * // we don't have this data in L1 dcache as we invalidated it in [point 3]
  74. * // so we get it from next memory level (for example DDR memory)
  75. * // but in the memory we have value which we save in [point 1], which
  76. * // is return address from flush_dcache_all function (instead of
  77. * // address from current invalidate_dcache_all function which we
  78. * // saved in [point 2] !)
  79. * return [jump to BLINK]
  80. * <<------
  81. * // As BLINK points to invalidate_dcache_all, we call it again and
  82. * // loop forever.
  83. *
  84. * Fortunately we may fix that by using flush & invalidation of D$ with a single
  85. * one instruction (instead of flush and invalidation instructions pair) and
  86. * enabling force function inline with '__attribute__((always_inline))' gcc
  87. * attribute to avoid any function call (and BLINK store) between cache flush
  88. * and disable.
  89. *
  90. *
  91. * [ NOTE 2 ]:
  92. * As of today we only support the following cache configurations on ARC.
  93. * Other configurations may exist in HW but we don't support it in SW.
  94. * Configuration 1:
  95. * ______________________
  96. * | |
  97. * | ARC CPU |
  98. * |______________________|
  99. * ___|___ ___|___
  100. * | | | |
  101. * | L1 I$ | | L1 D$ |
  102. * |_______| |_______|
  103. * on/off on/off
  104. * ___|______________|____
  105. * | |
  106. * | main memory |
  107. * |______________________|
  108. *
  109. * Configuration 2:
  110. * ______________________
  111. * | |
  112. * | ARC CPU |
  113. * |______________________|
  114. * ___|___ ___|___
  115. * | | | |
  116. * | L1 I$ | | L1 D$ |
  117. * |_______| |_______|
  118. * on/off on/off
  119. * ___|______________|____
  120. * | |
  121. * | L2 (SL$) |
  122. * |______________________|
  123. * always on (ARCv2, HS < 3.0)
  124. * on/off (ARCv2, HS >= 3.0)
  125. * ___|______________|____
  126. * | |
  127. * | main memory |
  128. * |______________________|
  129. *
  130. * Configuration 3:
  131. * ______________________
  132. * | |
  133. * | ARC CPU |
  134. * |______________________|
  135. * ___|___ ___|___
  136. * | | | |
  137. * | L1 I$ | | L1 D$ |
  138. * |_______| |_______|
  139. * on/off must be on
  140. * ___|______________|____ _______
  141. * | | | |
  142. * | L2 (SL$) |-----| IOC |
  143. * |______________________| |_______|
  144. * always must be on on/off
  145. * ___|______________|____
  146. * | |
  147. * | main memory |
  148. * |______________________|
  149. */
  150. DECLARE_GLOBAL_DATA_PTR;
  151. /* Bit values in IC_CTRL */
  152. #define IC_CTRL_CACHE_DISABLE BIT(0)
  153. /* Bit values in DC_CTRL */
  154. #define DC_CTRL_CACHE_DISABLE BIT(0)
  155. #define DC_CTRL_INV_MODE_FLUSH BIT(6)
  156. #define DC_CTRL_FLUSH_STATUS BIT(8)
  157. #define OP_INV BIT(0)
  158. #define OP_FLUSH BIT(1)
  159. #define OP_FLUSH_N_INV (OP_FLUSH | OP_INV)
  160. /* Bit val in SLC_CONTROL */
  161. #define SLC_CTRL_DIS 0x001
  162. #define SLC_CTRL_IM 0x040
  163. #define SLC_CTRL_BUSY 0x100
  164. #define SLC_CTRL_RGN_OP_INV 0x200
  165. #define CACHE_LINE_MASK (~(gd->arch.l1_line_sz - 1))
  166. /*
  167. * We don't want to use '__always_inline' macro here as it can be redefined
  168. * to simple 'inline' in some cases which breaks stuff. See [ NOTE 1 ] for more
  169. * details about the reasons we need to use always_inline functions.
  170. */
  171. #define inlined_cachefunc inline __attribute__((always_inline))
  172. static inlined_cachefunc void __ic_entire_invalidate(void);
  173. static inlined_cachefunc void __dc_entire_op(const int cacheop);
  174. static inlined_cachefunc void __slc_entire_op(const int op);
  175. static inlined_cachefunc bool ioc_enabled(void);
  176. static inline bool pae_exists(void)
  177. {
  178. /* TODO: should we compare mmu version from BCR and from CONFIG? */
  179. #if (CONFIG_ARC_MMU_VER >= 4)
  180. union bcr_mmu_4 mmu4;
  181. mmu4.word = read_aux_reg(ARC_AUX_MMU_BCR);
  182. if (mmu4.fields.pae)
  183. return true;
  184. #endif /* (CONFIG_ARC_MMU_VER >= 4) */
  185. return false;
  186. }
  187. static inlined_cachefunc bool icache_exists(void)
  188. {
  189. union bcr_di_cache ibcr;
  190. ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
  191. return !!ibcr.fields.ver;
  192. }
  193. static inlined_cachefunc bool icache_enabled(void)
  194. {
  195. if (!icache_exists())
  196. return false;
  197. return !(read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE);
  198. }
  199. static inlined_cachefunc bool dcache_exists(void)
  200. {
  201. union bcr_di_cache dbcr;
  202. dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
  203. return !!dbcr.fields.ver;
  204. }
  205. static inlined_cachefunc bool dcache_enabled(void)
  206. {
  207. if (!dcache_exists())
  208. return false;
  209. return !(read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE);
  210. }
  211. static inlined_cachefunc bool slc_exists(void)
  212. {
  213. if (is_isa_arcv2()) {
  214. union bcr_generic sbcr;
  215. sbcr.word = read_aux_reg(ARC_BCR_SLC);
  216. return !!sbcr.fields.ver;
  217. }
  218. return false;
  219. }
  220. enum slc_dis_status {
  221. ST_SLC_MISSING = 0,
  222. ST_SLC_NO_DISABLE_CTRL,
  223. ST_SLC_DISABLE_CTRL
  224. };
  225. /*
  226. * ARCv1 -> ST_SLC_MISSING
  227. * ARCv2 && SLC absent -> ST_SLC_MISSING
  228. * ARCv2 && SLC exists && SLC version <= 2 -> ST_SLC_NO_DISABLE_CTRL
  229. * ARCv2 && SLC exists && SLC version > 2 -> ST_SLC_DISABLE_CTRL
  230. */
  231. static inlined_cachefunc enum slc_dis_status slc_disable_supported(void)
  232. {
  233. if (is_isa_arcv2()) {
  234. union bcr_generic sbcr;
  235. sbcr.word = read_aux_reg(ARC_BCR_SLC);
  236. if (sbcr.fields.ver == 0)
  237. return ST_SLC_MISSING;
  238. else if (sbcr.fields.ver <= 2)
  239. return ST_SLC_NO_DISABLE_CTRL;
  240. else
  241. return ST_SLC_DISABLE_CTRL;
  242. }
  243. return ST_SLC_MISSING;
  244. }
  245. static inlined_cachefunc bool __slc_enabled(void)
  246. {
  247. return !(read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_DIS);
  248. }
  249. static inlined_cachefunc void __slc_enable(void)
  250. {
  251. unsigned int ctrl;
  252. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  253. ctrl &= ~SLC_CTRL_DIS;
  254. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  255. }
  256. static inlined_cachefunc void __slc_disable(void)
  257. {
  258. unsigned int ctrl;
  259. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  260. ctrl |= SLC_CTRL_DIS;
  261. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  262. }
  263. static inlined_cachefunc bool slc_enabled(void)
  264. {
  265. enum slc_dis_status slc_status = slc_disable_supported();
  266. if (slc_status == ST_SLC_MISSING)
  267. return false;
  268. else if (slc_status == ST_SLC_NO_DISABLE_CTRL)
  269. return true;
  270. else
  271. return __slc_enabled();
  272. }
  273. static inlined_cachefunc bool slc_data_bypass(void)
  274. {
  275. /*
  276. * If L1 data cache is disabled SL$ is bypassed and all load/store
  277. * requests are sent directly to main memory.
  278. */
  279. return !dcache_enabled();
  280. }
  281. void slc_enable(void)
  282. {
  283. if (slc_disable_supported() != ST_SLC_DISABLE_CTRL)
  284. return;
  285. if (__slc_enabled())
  286. return;
  287. __slc_enable();
  288. }
  289. /* TODO: warn if we are not able to disable SLC */
  290. void slc_disable(void)
  291. {
  292. if (slc_disable_supported() != ST_SLC_DISABLE_CTRL)
  293. return;
  294. /* we don't support SLC disabling if we use IOC */
  295. if (ioc_enabled())
  296. return;
  297. if (!__slc_enabled())
  298. return;
  299. /*
  300. * We need to flush L1D$ to guarantee that we won't have any
  301. * writeback operations during SLC disabling.
  302. */
  303. __dc_entire_op(OP_FLUSH);
  304. __slc_entire_op(OP_FLUSH_N_INV);
  305. __slc_disable();
  306. }
  307. static inlined_cachefunc bool ioc_exists(void)
  308. {
  309. if (is_isa_arcv2()) {
  310. union bcr_clust_cfg cbcr;
  311. cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
  312. return cbcr.fields.c;
  313. }
  314. return false;
  315. }
  316. static inlined_cachefunc bool ioc_enabled(void)
  317. {
  318. /*
  319. * We check only CONFIG option instead of IOC HW state check as IOC
  320. * must be disabled by default.
  321. */
  322. if (is_ioc_enabled())
  323. return ioc_exists();
  324. return false;
  325. }
  326. static inlined_cachefunc void __slc_entire_op(const int op)
  327. {
  328. unsigned int ctrl;
  329. if (!slc_enabled())
  330. return;
  331. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  332. if (!(op & OP_FLUSH)) /* i.e. OP_INV */
  333. ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
  334. else
  335. ctrl |= SLC_CTRL_IM;
  336. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  337. if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
  338. write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1);
  339. else
  340. write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1);
  341. /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
  342. read_aux_reg(ARC_AUX_SLC_CTRL);
  343. /* Important to wait for flush to complete */
  344. while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
  345. }
  346. static void slc_upper_region_init(void)
  347. {
  348. /*
  349. * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
  350. * only if PAE exists in current HW. So we had to check pae_exist
  351. * before using them.
  352. */
  353. if (!pae_exists())
  354. return;
  355. /*
  356. * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0
  357. * as we don't use PAE40.
  358. */
  359. write_aux_reg(ARC_AUX_SLC_RGN_END1, 0);
  360. write_aux_reg(ARC_AUX_SLC_RGN_START1, 0);
  361. }
  362. static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
  363. {
  364. #ifdef CONFIG_ISA_ARCV2
  365. unsigned int ctrl;
  366. unsigned long end;
  367. if (!slc_enabled())
  368. return;
  369. /*
  370. * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
  371. * - b'000 (default) is Flush,
  372. * - b'001 is Invalidate if CTRL.IM == 0
  373. * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
  374. */
  375. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  376. /* Don't rely on default value of IM bit */
  377. if (!(op & OP_FLUSH)) /* i.e. OP_INV */
  378. ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
  379. else
  380. ctrl |= SLC_CTRL_IM;
  381. if (op & OP_INV)
  382. ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
  383. else
  384. ctrl &= ~SLC_CTRL_RGN_OP_INV;
  385. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  386. /*
  387. * Lower bits are ignored, no need to clip
  388. * END needs to be setup before START (latter triggers the operation)
  389. * END can't be same as START, so add (l2_line_sz - 1) to sz
  390. */
  391. end = paddr + sz + gd->arch.slc_line_sz - 1;
  392. /*
  393. * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1)
  394. * are always == 0 as we don't use PAE40, so we only setup lower ones
  395. * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START)
  396. */
  397. write_aux_reg(ARC_AUX_SLC_RGN_END, end);
  398. write_aux_reg(ARC_AUX_SLC_RGN_START, paddr);
  399. /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
  400. read_aux_reg(ARC_AUX_SLC_CTRL);
  401. while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
  402. #endif /* CONFIG_ISA_ARCV2 */
  403. }
  404. static void arc_ioc_setup(void)
  405. {
  406. /* IOC Aperture start is equal to DDR start */
  407. unsigned int ap_base = CFG_SYS_SDRAM_BASE;
  408. /* IOC Aperture size is equal to DDR size */
  409. long ap_size = CFG_SYS_SDRAM_SIZE;
  410. /* Unsupported configuration. See [ NOTE 2 ] for more details. */
  411. if (!slc_exists())
  412. panic("Try to enable IOC but SLC is not present");
  413. if (!slc_enabled())
  414. panic("Try to enable IOC but SLC is disabled");
  415. /* Unsupported configuration. See [ NOTE 2 ] for more details. */
  416. if (!dcache_enabled())
  417. panic("Try to enable IOC but L1 D$ is disabled");
  418. if (!is_power_of_2(ap_size) || ap_size < 4096)
  419. panic("IOC Aperture size must be power of 2 and bigger 4Kib");
  420. /* IOC Aperture start must be aligned to the size of the aperture */
  421. if (ap_base % ap_size != 0)
  422. panic("IOC Aperture start must be aligned to the size of the aperture");
  423. flush_n_invalidate_dcache_all();
  424. /*
  425. * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
  426. * so setting 0x11 implies 512M, 0x12 implies 1G...
  427. */
  428. write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
  429. order_base_2(ap_size / 1024) - 2);
  430. write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
  431. write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
  432. write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
  433. }
  434. static void read_decode_cache_bcr_arcv2(void)
  435. {
  436. #ifdef CONFIG_ISA_ARCV2
  437. union bcr_slc_cfg slc_cfg;
  438. if (slc_exists()) {
  439. slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
  440. gd->arch.slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
  441. /*
  442. * We don't support configuration where L1 I$ or L1 D$ is
  443. * absent but SL$ exists. See [ NOTE 2 ] for more details.
  444. */
  445. if (!icache_exists() || !dcache_exists())
  446. panic("Unsupported cache configuration: SLC exists but one of L1 caches is absent");
  447. }
  448. #endif /* CONFIG_ISA_ARCV2 */
  449. }
  450. void read_decode_cache_bcr(void)
  451. {
  452. int dc_line_sz = 0, ic_line_sz = 0;
  453. union bcr_di_cache ibcr, dbcr;
  454. /*
  455. * We don't care much about I$ line length really as there're
  456. * no per-line ops on I$ instead we only do full invalidation of it
  457. * on occasion of relocation and right before jumping to the OS.
  458. * Still we check insane config with zero-encoded line length in
  459. * presense of version field in I$ BCR. Just in case.
  460. */
  461. ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
  462. if (ibcr.fields.ver) {
  463. ic_line_sz = 8 << ibcr.fields.line_len;
  464. if (!ic_line_sz)
  465. panic("Instruction exists but line length is 0\n");
  466. }
  467. dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
  468. if (dbcr.fields.ver) {
  469. gd->arch.l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
  470. if (!dc_line_sz)
  471. panic("Data cache exists but line length is 0\n");
  472. }
  473. }
  474. void cache_init(void)
  475. {
  476. read_decode_cache_bcr();
  477. if (is_isa_arcv2())
  478. read_decode_cache_bcr_arcv2();
  479. if (is_isa_arcv2() && ioc_enabled())
  480. arc_ioc_setup();
  481. if (is_isa_arcv2() && slc_exists())
  482. slc_upper_region_init();
  483. }
  484. int icache_status(void)
  485. {
  486. return icache_enabled();
  487. }
  488. void icache_enable(void)
  489. {
  490. if (icache_exists())
  491. write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
  492. ~IC_CTRL_CACHE_DISABLE);
  493. }
  494. void icache_disable(void)
  495. {
  496. if (!icache_exists())
  497. return;
  498. __ic_entire_invalidate();
  499. write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
  500. IC_CTRL_CACHE_DISABLE);
  501. }
  502. /* IC supports only invalidation */
  503. static inlined_cachefunc void __ic_entire_invalidate(void)
  504. {
  505. if (!icache_enabled())
  506. return;
  507. /* Any write to IC_IVIC register triggers invalidation of entire I$ */
  508. write_aux_reg(ARC_AUX_IC_IVIC, 1);
  509. /*
  510. * As per ARC HS databook (see chapter 5.3.3.2)
  511. * it is required to add 3 NOPs after each write to IC_IVIC.
  512. */
  513. __builtin_arc_nop();
  514. __builtin_arc_nop();
  515. __builtin_arc_nop();
  516. read_aux_reg(ARC_AUX_IC_CTRL); /* blocks */
  517. }
  518. void invalidate_icache_all(void)
  519. {
  520. __ic_entire_invalidate();
  521. /*
  522. * If SL$ is bypassed for data it is used only for instructions,
  523. * so we need to invalidate it too.
  524. */
  525. if (is_isa_arcv2() && slc_data_bypass())
  526. __slc_entire_op(OP_INV);
  527. }
  528. int dcache_status(void)
  529. {
  530. return dcache_enabled();
  531. }
  532. void dcache_enable(void)
  533. {
  534. if (!dcache_exists())
  535. return;
  536. write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
  537. ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
  538. }
  539. void dcache_disable(void)
  540. {
  541. if (!dcache_exists())
  542. return;
  543. __dc_entire_op(OP_FLUSH_N_INV);
  544. /*
  545. * As SLC will be bypassed for data after L1 D$ disable we need to
  546. * flush it first before L1 D$ disable. Also we invalidate SLC to
  547. * avoid any inconsistent data problems after enabling L1 D$ again with
  548. * dcache_enable function.
  549. */
  550. if (is_isa_arcv2())
  551. __slc_entire_op(OP_FLUSH_N_INV);
  552. write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
  553. DC_CTRL_CACHE_DISABLE);
  554. }
  555. /* Common Helper for Line Operations on D-cache */
  556. static inline void __dcache_line_loop(unsigned long paddr, unsigned long sz,
  557. const int cacheop)
  558. {
  559. unsigned int aux_cmd;
  560. int num_lines;
  561. /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
  562. aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
  563. sz += paddr & ~CACHE_LINE_MASK;
  564. paddr &= CACHE_LINE_MASK;
  565. num_lines = DIV_ROUND_UP(sz, gd->arch.l1_line_sz);
  566. while (num_lines-- > 0) {
  567. #if (CONFIG_ARC_MMU_VER == 3)
  568. write_aux_reg(ARC_AUX_DC_PTAG, paddr);
  569. #endif
  570. write_aux_reg(aux_cmd, paddr);
  571. paddr += gd->arch.l1_line_sz;
  572. }
  573. }
  574. static inlined_cachefunc void __before_dc_op(const int op)
  575. {
  576. unsigned int ctrl;
  577. ctrl = read_aux_reg(ARC_AUX_DC_CTRL);
  578. /* IM bit implies flush-n-inv, instead of vanilla inv */
  579. if (op == OP_INV)
  580. ctrl &= ~DC_CTRL_INV_MODE_FLUSH;
  581. else
  582. ctrl |= DC_CTRL_INV_MODE_FLUSH;
  583. write_aux_reg(ARC_AUX_DC_CTRL, ctrl);
  584. }
  585. static inlined_cachefunc void __after_dc_op(const int op)
  586. {
  587. if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
  588. while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
  589. }
  590. static inlined_cachefunc void __dc_entire_op(const int cacheop)
  591. {
  592. int aux;
  593. if (!dcache_enabled())
  594. return;
  595. __before_dc_op(cacheop);
  596. if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
  597. aux = ARC_AUX_DC_IVDC;
  598. else
  599. aux = ARC_AUX_DC_FLSH;
  600. write_aux_reg(aux, 0x1);
  601. __after_dc_op(cacheop);
  602. }
  603. static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
  604. const int cacheop)
  605. {
  606. if (!dcache_enabled())
  607. return;
  608. __before_dc_op(cacheop);
  609. __dcache_line_loop(paddr, sz, cacheop);
  610. __after_dc_op(cacheop);
  611. }
  612. void invalidate_dcache_range(unsigned long start, unsigned long end)
  613. {
  614. if (start >= end)
  615. return;
  616. /*
  617. * ARCv1 -> call __dc_line_op
  618. * ARCv2 && L1 D$ disabled -> nothing
  619. * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
  620. * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op
  621. */
  622. if (!is_isa_arcv2() || !ioc_enabled())
  623. __dc_line_op(start, end - start, OP_INV);
  624. if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
  625. __slc_rgn_op(start, end - start, OP_INV);
  626. }
  627. void flush_dcache_range(unsigned long start, unsigned long end)
  628. {
  629. if (start >= end)
  630. return;
  631. /*
  632. * ARCv1 -> call __dc_line_op
  633. * ARCv2 && L1 D$ disabled -> nothing
  634. * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
  635. * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op
  636. */
  637. if (!is_isa_arcv2() || !ioc_enabled())
  638. __dc_line_op(start, end - start, OP_FLUSH);
  639. if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
  640. __slc_rgn_op(start, end - start, OP_FLUSH);
  641. }
  642. void flush_cache(unsigned long start, unsigned long size)
  643. {
  644. flush_dcache_range(start, start + size);
  645. }
  646. /*
  647. * As invalidate_dcache_all() is not used in generic U-Boot code and as we
  648. * don't need it in arch/arc code alone (invalidate without flush) we implement
  649. * flush_n_invalidate_dcache_all (flush and invalidate in 1 operation) because
  650. * it's much safer. See [ NOTE 1 ] for more details.
  651. */
  652. void flush_n_invalidate_dcache_all(void)
  653. {
  654. __dc_entire_op(OP_FLUSH_N_INV);
  655. if (is_isa_arcv2() && !slc_data_bypass())
  656. __slc_entire_op(OP_FLUSH_N_INV);
  657. }
  658. void flush_dcache_all(void)
  659. {
  660. __dc_entire_op(OP_FLUSH);
  661. if (is_isa_arcv2() && !slc_data_bypass())
  662. __slc_entire_op(OP_FLUSH);
  663. }
  664. /*
  665. * This is function to cleanup all caches (and therefore sync I/D caches) which
  666. * can be used for cleanup before linux launch or to sync caches during
  667. * relocation.
  668. */
  669. void sync_n_cleanup_cache_all(void)
  670. {
  671. __dc_entire_op(OP_FLUSH_N_INV);
  672. /*
  673. * If SL$ is bypassed for data it is used only for instructions,
  674. * and we shouldn't flush it. So invalidate it instead of flush_n_inv.
  675. */
  676. if (is_isa_arcv2()) {
  677. if (slc_data_bypass())
  678. __slc_entire_op(OP_INV);
  679. else
  680. __slc_entire_op(OP_FLUSH_N_INV);
  681. }
  682. __ic_entire_invalidate();
  683. }
  684. static ulong get_sp(void)
  685. {
  686. ulong ret;
  687. asm("mov %0, sp" : "=r"(ret) : );
  688. return ret;
  689. }
  690. void arch_lmb_reserve(struct lmb *lmb)
  691. {
  692. arch_lmb_reserve_generic(lmb, get_sp(), gd->ram_top, 4096);
  693. }