cache-l2x0.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825
  1. /*
  2. * arch/arm/mm/cache-l2x0.c - L210/L220/L310 cache controller support
  3. *
  4. * Copyright (C) 2007 ARM Limited
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/cpu.h>
  20. #include <linux/err.h>
  21. #include <linux/init.h>
  22. #include <linux/smp.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/log2.h>
  25. #include <linux/io.h>
  26. #include <linux/of.h>
  27. #include <linux/of_address.h>
  28. #include <asm/cacheflush.h>
  29. #include <asm/cp15.h>
  30. #include <asm/cputype.h>
  31. #include <asm/hardware/cache-l2x0.h>
  32. #include "cache-tauros3.h"
  33. #include "cache-aurora-l2.h"
  34. struct l2c_init_data {
  35. const char *type;
  36. unsigned way_size_0;
  37. unsigned num_lock;
  38. void (*of_parse)(const struct device_node *, u32 *, u32 *);
  39. void (*enable)(void __iomem *, unsigned);
  40. void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
  41. void (*save)(void __iomem *);
  42. void (*configure)(void __iomem *);
  43. void (*unlock)(void __iomem *, unsigned);
  44. struct outer_cache_fns outer_cache;
  45. };
  46. #define CACHE_LINE_SIZE 32
  47. static void __iomem *l2x0_base;
  48. static const struct l2c_init_data *l2x0_data;
  49. static DEFINE_RAW_SPINLOCK(l2x0_lock);
  50. static u32 l2x0_way_mask; /* Bitmask of active ways */
  51. static u32 l2x0_size;
  52. static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
  53. struct l2x0_regs l2x0_saved_regs;
  54. static bool l2x0_bresp_disable;
  55. static bool l2x0_flz_disable;
  56. /*
  57. * Common code for all cache controllers.
  58. */
  59. static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
  60. {
  61. /* wait for cache operation by line or way to complete */
  62. while (readl_relaxed(reg) & mask)
  63. cpu_relax();
  64. }
  65. /*
  66. * By default, we write directly to secure registers. Platforms must
  67. * override this if they are running non-secure.
  68. */
  69. static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
  70. {
  71. if (val == readl_relaxed(base + reg))
  72. return;
  73. if (outer_cache.write_sec)
  74. outer_cache.write_sec(val, reg);
  75. else
  76. writel_relaxed(val, base + reg);
  77. }
  78. /*
  79. * This should only be called when we have a requirement that the
  80. * register be written due to a work-around, as platforms running
  81. * in non-secure mode may not be able to access this register.
  82. */
  83. static inline void l2c_set_debug(void __iomem *base, unsigned long val)
  84. {
  85. l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
  86. }
  87. static void __l2c_op_way(void __iomem *reg)
  88. {
  89. writel_relaxed(l2x0_way_mask, reg);
  90. l2c_wait_mask(reg, l2x0_way_mask);
  91. }
  92. static inline void l2c_unlock(void __iomem *base, unsigned num)
  93. {
  94. unsigned i;
  95. for (i = 0; i < num; i++) {
  96. writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
  97. i * L2X0_LOCKDOWN_STRIDE);
  98. writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
  99. i * L2X0_LOCKDOWN_STRIDE);
  100. }
  101. }
  102. static void l2c_configure(void __iomem *base)
  103. {
  104. l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
  105. }
  106. /*
  107. * Enable the L2 cache controller. This function must only be
  108. * called when the cache controller is known to be disabled.
  109. */
  110. static void l2c_enable(void __iomem *base, unsigned num_lock)
  111. {
  112. unsigned long flags;
  113. if (outer_cache.configure)
  114. outer_cache.configure(&l2x0_saved_regs);
  115. else
  116. l2x0_data->configure(base);
  117. l2x0_data->unlock(base, num_lock);
  118. local_irq_save(flags);
  119. __l2c_op_way(base + L2X0_INV_WAY);
  120. writel_relaxed(0, base + sync_reg_offset);
  121. l2c_wait_mask(base + sync_reg_offset, 1);
  122. local_irq_restore(flags);
  123. l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
  124. }
  125. static void l2c_disable(void)
  126. {
  127. void __iomem *base = l2x0_base;
  128. l2x0_pmu_suspend();
  129. outer_cache.flush_all();
  130. l2c_write_sec(0, base, L2X0_CTRL);
  131. dsb(st);
  132. }
  133. static void l2c_save(void __iomem *base)
  134. {
  135. l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  136. }
  137. static void l2c_resume(void)
  138. {
  139. void __iomem *base = l2x0_base;
  140. /* Do not touch the controller if already enabled. */
  141. if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
  142. l2c_enable(base, l2x0_data->num_lock);
  143. l2x0_pmu_resume();
  144. }
  145. /*
  146. * L2C-210 specific code.
  147. *
  148. * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
  149. * ensure that no background operation is running. The way operations
  150. * are all background tasks.
  151. *
  152. * While a background operation is in progress, any new operation is
  153. * ignored (unspecified whether this causes an error.) Thankfully, not
  154. * used on SMP.
  155. *
  156. * Never has a different sync register other than L2X0_CACHE_SYNC, but
  157. * we use sync_reg_offset here so we can share some of this with L2C-310.
  158. */
  159. static void __l2c210_cache_sync(void __iomem *base)
  160. {
  161. writel_relaxed(0, base + sync_reg_offset);
  162. }
  163. static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
  164. unsigned long end)
  165. {
  166. while (start < end) {
  167. writel_relaxed(start, reg);
  168. start += CACHE_LINE_SIZE;
  169. }
  170. }
  171. static void l2c210_inv_range(unsigned long start, unsigned long end)
  172. {
  173. void __iomem *base = l2x0_base;
  174. if (start & (CACHE_LINE_SIZE - 1)) {
  175. start &= ~(CACHE_LINE_SIZE - 1);
  176. writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
  177. start += CACHE_LINE_SIZE;
  178. }
  179. if (end & (CACHE_LINE_SIZE - 1)) {
  180. end &= ~(CACHE_LINE_SIZE - 1);
  181. writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
  182. }
  183. __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
  184. __l2c210_cache_sync(base);
  185. }
  186. static void l2c210_clean_range(unsigned long start, unsigned long end)
  187. {
  188. void __iomem *base = l2x0_base;
  189. start &= ~(CACHE_LINE_SIZE - 1);
  190. __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
  191. __l2c210_cache_sync(base);
  192. }
  193. static void l2c210_flush_range(unsigned long start, unsigned long end)
  194. {
  195. void __iomem *base = l2x0_base;
  196. start &= ~(CACHE_LINE_SIZE - 1);
  197. __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
  198. __l2c210_cache_sync(base);
  199. }
  200. static void l2c210_flush_all(void)
  201. {
  202. void __iomem *base = l2x0_base;
  203. BUG_ON(!irqs_disabled());
  204. __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
  205. __l2c210_cache_sync(base);
  206. }
  207. static void l2c210_sync(void)
  208. {
  209. __l2c210_cache_sync(l2x0_base);
  210. }
  211. static const struct l2c_init_data l2c210_data __initconst = {
  212. .type = "L2C-210",
  213. .way_size_0 = SZ_8K,
  214. .num_lock = 1,
  215. .enable = l2c_enable,
  216. .save = l2c_save,
  217. .configure = l2c_configure,
  218. .unlock = l2c_unlock,
  219. .outer_cache = {
  220. .inv_range = l2c210_inv_range,
  221. .clean_range = l2c210_clean_range,
  222. .flush_range = l2c210_flush_range,
  223. .flush_all = l2c210_flush_all,
  224. .disable = l2c_disable,
  225. .sync = l2c210_sync,
  226. .resume = l2c_resume,
  227. },
  228. };
  229. /*
  230. * L2C-220 specific code.
  231. *
  232. * All operations are background operations: they have to be waited for.
  233. * Conflicting requests generate a slave error (which will cause an
  234. * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
  235. * sync register here.
  236. *
  237. * However, we can re-use the l2c210_resume call.
  238. */
  239. static inline void __l2c220_cache_sync(void __iomem *base)
  240. {
  241. writel_relaxed(0, base + L2X0_CACHE_SYNC);
  242. l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
  243. }
  244. static void l2c220_op_way(void __iomem *base, unsigned reg)
  245. {
  246. unsigned long flags;
  247. raw_spin_lock_irqsave(&l2x0_lock, flags);
  248. __l2c_op_way(base + reg);
  249. __l2c220_cache_sync(base);
  250. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  251. }
  252. static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
  253. unsigned long end, unsigned long flags)
  254. {
  255. raw_spinlock_t *lock = &l2x0_lock;
  256. while (start < end) {
  257. unsigned long blk_end = start + min(end - start, 4096UL);
  258. while (start < blk_end) {
  259. l2c_wait_mask(reg, 1);
  260. writel_relaxed(start, reg);
  261. start += CACHE_LINE_SIZE;
  262. }
  263. if (blk_end < end) {
  264. raw_spin_unlock_irqrestore(lock, flags);
  265. raw_spin_lock_irqsave(lock, flags);
  266. }
  267. }
  268. return flags;
  269. }
  270. static void l2c220_inv_range(unsigned long start, unsigned long end)
  271. {
  272. void __iomem *base = l2x0_base;
  273. unsigned long flags;
  274. raw_spin_lock_irqsave(&l2x0_lock, flags);
  275. if ((start | end) & (CACHE_LINE_SIZE - 1)) {
  276. if (start & (CACHE_LINE_SIZE - 1)) {
  277. start &= ~(CACHE_LINE_SIZE - 1);
  278. writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
  279. start += CACHE_LINE_SIZE;
  280. }
  281. if (end & (CACHE_LINE_SIZE - 1)) {
  282. end &= ~(CACHE_LINE_SIZE - 1);
  283. l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
  284. writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
  285. }
  286. }
  287. flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
  288. start, end, flags);
  289. l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
  290. __l2c220_cache_sync(base);
  291. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  292. }
  293. static void l2c220_clean_range(unsigned long start, unsigned long end)
  294. {
  295. void __iomem *base = l2x0_base;
  296. unsigned long flags;
  297. start &= ~(CACHE_LINE_SIZE - 1);
  298. if ((end - start) >= l2x0_size) {
  299. l2c220_op_way(base, L2X0_CLEAN_WAY);
  300. return;
  301. }
  302. raw_spin_lock_irqsave(&l2x0_lock, flags);
  303. flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
  304. start, end, flags);
  305. l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
  306. __l2c220_cache_sync(base);
  307. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  308. }
  309. static void l2c220_flush_range(unsigned long start, unsigned long end)
  310. {
  311. void __iomem *base = l2x0_base;
  312. unsigned long flags;
  313. start &= ~(CACHE_LINE_SIZE - 1);
  314. if ((end - start) >= l2x0_size) {
  315. l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
  316. return;
  317. }
  318. raw_spin_lock_irqsave(&l2x0_lock, flags);
  319. flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
  320. start, end, flags);
  321. l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
  322. __l2c220_cache_sync(base);
  323. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  324. }
  325. static void l2c220_flush_all(void)
  326. {
  327. l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
  328. }
  329. static void l2c220_sync(void)
  330. {
  331. unsigned long flags;
  332. raw_spin_lock_irqsave(&l2x0_lock, flags);
  333. __l2c220_cache_sync(l2x0_base);
  334. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  335. }
  336. static void l2c220_enable(void __iomem *base, unsigned num_lock)
  337. {
  338. /*
  339. * Always enable non-secure access to the lockdown registers -
  340. * we write to them as part of the L2C enable sequence so they
  341. * need to be accessible.
  342. */
  343. l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN;
  344. l2c_enable(base, num_lock);
  345. }
  346. static void l2c220_unlock(void __iomem *base, unsigned num_lock)
  347. {
  348. if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN)
  349. l2c_unlock(base, num_lock);
  350. }
  351. static const struct l2c_init_data l2c220_data = {
  352. .type = "L2C-220",
  353. .way_size_0 = SZ_8K,
  354. .num_lock = 1,
  355. .enable = l2c220_enable,
  356. .save = l2c_save,
  357. .configure = l2c_configure,
  358. .unlock = l2c220_unlock,
  359. .outer_cache = {
  360. .inv_range = l2c220_inv_range,
  361. .clean_range = l2c220_clean_range,
  362. .flush_range = l2c220_flush_range,
  363. .flush_all = l2c220_flush_all,
  364. .disable = l2c_disable,
  365. .sync = l2c220_sync,
  366. .resume = l2c_resume,
  367. },
  368. };
  369. /*
  370. * L2C-310 specific code.
  371. *
  372. * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
  373. * and the way operations are all background tasks. However, issuing an
  374. * operation while a background operation is in progress results in a
  375. * SLVERR response. We can reuse:
  376. *
  377. * __l2c210_cache_sync (using sync_reg_offset)
  378. * l2c210_sync
  379. * l2c210_inv_range (if 588369 is not applicable)
  380. * l2c210_clean_range
  381. * l2c210_flush_range (if 588369 is not applicable)
  382. * l2c210_flush_all (if 727915 is not applicable)
  383. *
  384. * Errata:
  385. * 588369: PL310 R0P0->R1P0, fixed R2P0.
  386. * Affects: all clean+invalidate operations
  387. * clean and invalidate skips the invalidate step, so we need to issue
  388. * separate operations. We also require the above debug workaround
  389. * enclosing this code fragment on affected parts. On unaffected parts,
  390. * we must not use this workaround without the debug register writes
  391. * to avoid exposing a problem similar to 727915.
  392. *
  393. * 727915: PL310 R2P0->R3P0, fixed R3P1.
  394. * Affects: clean+invalidate by way
  395. * clean and invalidate by way runs in the background, and a store can
  396. * hit the line between the clean operation and invalidate operation,
  397. * resulting in the store being lost.
  398. *
  399. * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
  400. * Affects: 8x64-bit (double fill) line fetches
  401. * double fill line fetches can fail to cause dirty data to be evicted
  402. * from the cache before the new data overwrites the second line.
  403. *
  404. * 753970: PL310 R3P0, fixed R3P1.
  405. * Affects: sync
  406. * prevents merging writes after the sync operation, until another L2C
  407. * operation is performed (or a number of other conditions.)
  408. *
  409. * 769419: PL310 R0P0->R3P1, fixed R3P2.
  410. * Affects: store buffer
  411. * store buffer is not automatically drained.
  412. */
  413. static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
  414. {
  415. void __iomem *base = l2x0_base;
  416. if ((start | end) & (CACHE_LINE_SIZE - 1)) {
  417. unsigned long flags;
  418. /* Erratum 588369 for both clean+invalidate operations */
  419. raw_spin_lock_irqsave(&l2x0_lock, flags);
  420. l2c_set_debug(base, 0x03);
  421. if (start & (CACHE_LINE_SIZE - 1)) {
  422. start &= ~(CACHE_LINE_SIZE - 1);
  423. writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
  424. writel_relaxed(start, base + L2X0_INV_LINE_PA);
  425. start += CACHE_LINE_SIZE;
  426. }
  427. if (end & (CACHE_LINE_SIZE - 1)) {
  428. end &= ~(CACHE_LINE_SIZE - 1);
  429. writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
  430. writel_relaxed(end, base + L2X0_INV_LINE_PA);
  431. }
  432. l2c_set_debug(base, 0x00);
  433. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  434. }
  435. __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
  436. __l2c210_cache_sync(base);
  437. }
  438. static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
  439. {
  440. raw_spinlock_t *lock = &l2x0_lock;
  441. unsigned long flags;
  442. void __iomem *base = l2x0_base;
  443. raw_spin_lock_irqsave(lock, flags);
  444. while (start < end) {
  445. unsigned long blk_end = start + min(end - start, 4096UL);
  446. l2c_set_debug(base, 0x03);
  447. while (start < blk_end) {
  448. writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
  449. writel_relaxed(start, base + L2X0_INV_LINE_PA);
  450. start += CACHE_LINE_SIZE;
  451. }
  452. l2c_set_debug(base, 0x00);
  453. if (blk_end < end) {
  454. raw_spin_unlock_irqrestore(lock, flags);
  455. raw_spin_lock_irqsave(lock, flags);
  456. }
  457. }
  458. raw_spin_unlock_irqrestore(lock, flags);
  459. __l2c210_cache_sync(base);
  460. }
  461. static void l2c310_flush_all_erratum(void)
  462. {
  463. void __iomem *base = l2x0_base;
  464. unsigned long flags;
  465. raw_spin_lock_irqsave(&l2x0_lock, flags);
  466. l2c_set_debug(base, 0x03);
  467. __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
  468. l2c_set_debug(base, 0x00);
  469. __l2c210_cache_sync(base);
  470. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  471. }
  472. static void __init l2c310_save(void __iomem *base)
  473. {
  474. unsigned revision;
  475. l2c_save(base);
  476. l2x0_saved_regs.tag_latency = readl_relaxed(base +
  477. L310_TAG_LATENCY_CTRL);
  478. l2x0_saved_regs.data_latency = readl_relaxed(base +
  479. L310_DATA_LATENCY_CTRL);
  480. l2x0_saved_regs.filter_end = readl_relaxed(base +
  481. L310_ADDR_FILTER_END);
  482. l2x0_saved_regs.filter_start = readl_relaxed(base +
  483. L310_ADDR_FILTER_START);
  484. revision = readl_relaxed(base + L2X0_CACHE_ID) &
  485. L2X0_CACHE_ID_RTL_MASK;
  486. /* From r2p0, there is Prefetch offset/control register */
  487. if (revision >= L310_CACHE_ID_RTL_R2P0)
  488. l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
  489. L310_PREFETCH_CTRL);
  490. /* From r3p0, there is Power control register */
  491. if (revision >= L310_CACHE_ID_RTL_R3P0)
  492. l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
  493. L310_POWER_CTRL);
  494. }
  495. static void l2c310_configure(void __iomem *base)
  496. {
  497. unsigned revision;
  498. l2c_configure(base);
  499. /* restore pl310 setup */
  500. l2c_write_sec(l2x0_saved_regs.tag_latency, base,
  501. L310_TAG_LATENCY_CTRL);
  502. l2c_write_sec(l2x0_saved_regs.data_latency, base,
  503. L310_DATA_LATENCY_CTRL);
  504. l2c_write_sec(l2x0_saved_regs.filter_end, base,
  505. L310_ADDR_FILTER_END);
  506. l2c_write_sec(l2x0_saved_regs.filter_start, base,
  507. L310_ADDR_FILTER_START);
  508. revision = readl_relaxed(base + L2X0_CACHE_ID) &
  509. L2X0_CACHE_ID_RTL_MASK;
  510. if (revision >= L310_CACHE_ID_RTL_R2P0)
  511. l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
  512. L310_PREFETCH_CTRL);
  513. if (revision >= L310_CACHE_ID_RTL_R3P0)
  514. l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
  515. L310_POWER_CTRL);
  516. }
  517. static int l2c310_starting_cpu(unsigned int cpu)
  518. {
  519. set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
  520. return 0;
  521. }
  522. static int l2c310_dying_cpu(unsigned int cpu)
  523. {
  524. set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
  525. return 0;
  526. }
  527. static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
  528. {
  529. unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
  530. bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
  531. u32 aux = l2x0_saved_regs.aux_ctrl;
  532. if (rev >= L310_CACHE_ID_RTL_R2P0) {
  533. if (cortex_a9 && !l2x0_bresp_disable) {
  534. aux |= L310_AUX_CTRL_EARLY_BRESP;
  535. pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
  536. } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
  537. pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
  538. aux &= ~L310_AUX_CTRL_EARLY_BRESP;
  539. }
  540. }
  541. if (cortex_a9 && !l2x0_flz_disable) {
  542. u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
  543. u32 acr = get_auxcr();
  544. pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
  545. if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
  546. pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
  547. if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
  548. pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
  549. if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
  550. aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
  551. pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
  552. }
  553. } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
  554. pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
  555. aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
  556. }
  557. /*
  558. * Always enable non-secure access to the lockdown registers -
  559. * we write to them as part of the L2C enable sequence so they
  560. * need to be accessible.
  561. */
  562. l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN;
  563. l2c_enable(base, num_lock);
  564. /* Read back resulting AUX_CTRL value as it could have been altered. */
  565. aux = readl_relaxed(base + L2X0_AUX_CTRL);
  566. if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
  567. u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
  568. pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
  569. aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
  570. aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
  571. 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
  572. }
  573. /* r3p0 or later has power control register */
  574. if (rev >= L310_CACHE_ID_RTL_R3P0) {
  575. u32 power_ctrl;
  576. power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
  577. pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
  578. power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
  579. power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
  580. }
  581. if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
  582. cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
  583. "arm/l2x0:starting", l2c310_starting_cpu,
  584. l2c310_dying_cpu);
  585. }
  586. static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
  587. struct outer_cache_fns *fns)
  588. {
  589. unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
  590. const char *errata[8];
  591. unsigned n = 0;
  592. if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
  593. revision < L310_CACHE_ID_RTL_R2P0 &&
  594. /* For bcm compatibility */
  595. fns->inv_range == l2c210_inv_range) {
  596. fns->inv_range = l2c310_inv_range_erratum;
  597. fns->flush_range = l2c310_flush_range_erratum;
  598. errata[n++] = "588369";
  599. }
  600. if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
  601. revision >= L310_CACHE_ID_RTL_R2P0 &&
  602. revision < L310_CACHE_ID_RTL_R3P1) {
  603. fns->flush_all = l2c310_flush_all_erratum;
  604. errata[n++] = "727915";
  605. }
  606. if (revision >= L310_CACHE_ID_RTL_R3P0 &&
  607. revision < L310_CACHE_ID_RTL_R3P2) {
  608. u32 val = l2x0_saved_regs.prefetch_ctrl;
  609. if (val & L310_PREFETCH_CTRL_DBL_LINEFILL) {
  610. val &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
  611. l2x0_saved_regs.prefetch_ctrl = val;
  612. errata[n++] = "752271";
  613. }
  614. }
  615. if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
  616. revision == L310_CACHE_ID_RTL_R3P0) {
  617. sync_reg_offset = L2X0_DUMMY_REG;
  618. errata[n++] = "753970";
  619. }
  620. if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
  621. errata[n++] = "769419";
  622. if (n) {
  623. unsigned i;
  624. pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
  625. for (i = 0; i < n; i++)
  626. pr_cont(" %s", errata[i]);
  627. pr_cont(" enabled\n");
  628. }
  629. }
  630. static void l2c310_disable(void)
  631. {
  632. /*
  633. * If full-line-of-zeros is enabled, we must first disable it in the
  634. * Cortex-A9 auxiliary control register before disabling the L2 cache.
  635. */
  636. if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
  637. set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
  638. l2c_disable();
  639. }
  640. static void l2c310_resume(void)
  641. {
  642. l2c_resume();
  643. /* Re-enable full-line-of-zeros for Cortex-A9 */
  644. if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
  645. set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
  646. }
  647. static void l2c310_unlock(void __iomem *base, unsigned num_lock)
  648. {
  649. if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN)
  650. l2c_unlock(base, num_lock);
  651. }
  652. static const struct l2c_init_data l2c310_init_fns __initconst = {
  653. .type = "L2C-310",
  654. .way_size_0 = SZ_8K,
  655. .num_lock = 8,
  656. .enable = l2c310_enable,
  657. .fixup = l2c310_fixup,
  658. .save = l2c310_save,
  659. .configure = l2c310_configure,
  660. .unlock = l2c310_unlock,
  661. .outer_cache = {
  662. .inv_range = l2c210_inv_range,
  663. .clean_range = l2c210_clean_range,
  664. .flush_range = l2c210_flush_range,
  665. .flush_all = l2c210_flush_all,
  666. .disable = l2c310_disable,
  667. .sync = l2c210_sync,
  668. .resume = l2c310_resume,
  669. },
  670. };
  671. static int __init __l2c_init(const struct l2c_init_data *data,
  672. u32 aux_val, u32 aux_mask, u32 cache_id, bool nosync)
  673. {
  674. struct outer_cache_fns fns;
  675. unsigned way_size_bits, ways;
  676. u32 aux, old_aux;
  677. /*
  678. * Save the pointer globally so that callbacks which do not receive
  679. * context from callers can access the structure.
  680. */
  681. l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
  682. if (!l2x0_data)
  683. return -ENOMEM;
  684. /*
  685. * Sanity check the aux values. aux_mask is the bits we preserve
  686. * from reading the hardware register, and aux_val is the bits we
  687. * set.
  688. */
  689. if (aux_val & aux_mask)
  690. pr_alert("L2C: platform provided aux values permit register corruption.\n");
  691. old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  692. aux &= aux_mask;
  693. aux |= aux_val;
  694. if (old_aux != aux)
  695. pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
  696. old_aux, aux);
  697. /* Determine the number of ways */
  698. switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
  699. case L2X0_CACHE_ID_PART_L310:
  700. if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
  701. pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
  702. if (aux & (1 << 16))
  703. ways = 16;
  704. else
  705. ways = 8;
  706. break;
  707. case L2X0_CACHE_ID_PART_L210:
  708. case L2X0_CACHE_ID_PART_L220:
  709. ways = (aux >> 13) & 0xf;
  710. break;
  711. case AURORA_CACHE_ID:
  712. ways = (aux >> 13) & 0xf;
  713. ways = 2 << ((ways + 1) >> 2);
  714. break;
  715. default:
  716. /* Assume unknown chips have 8 ways */
  717. ways = 8;
  718. break;
  719. }
  720. l2x0_way_mask = (1 << ways) - 1;
  721. /*
  722. * way_size_0 is the size that a way_size value of zero would be
  723. * given the calculation: way_size = way_size_0 << way_size_bits.
  724. * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
  725. * then way_size_0 would be 8k.
  726. *
  727. * L2 cache size = number of ways * way size.
  728. */
  729. way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
  730. L2C_AUX_CTRL_WAY_SIZE_SHIFT;
  731. l2x0_size = ways * (data->way_size_0 << way_size_bits);
  732. fns = data->outer_cache;
  733. fns.write_sec = outer_cache.write_sec;
  734. fns.configure = outer_cache.configure;
  735. if (data->fixup)
  736. data->fixup(l2x0_base, cache_id, &fns);
  737. if (nosync) {
  738. pr_info("L2C: disabling outer sync\n");
  739. fns.sync = NULL;
  740. }
  741. /*
  742. * Check if l2x0 controller is already enabled. If we are booting
  743. * in non-secure mode accessing the below registers will fault.
  744. */
  745. if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  746. l2x0_saved_regs.aux_ctrl = aux;
  747. data->enable(l2x0_base, data->num_lock);
  748. }
  749. outer_cache = fns;
  750. /*
  751. * It is strange to save the register state before initialisation,
  752. * but hey, this is what the DT implementations decided to do.
  753. */
  754. if (data->save)
  755. data->save(l2x0_base);
  756. /* Re-read it in case some bits are reserved. */
  757. aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  758. pr_info("%s cache controller enabled, %d ways, %d kB\n",
  759. data->type, ways, l2x0_size >> 10);
  760. pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
  761. data->type, cache_id, aux);
  762. l2x0_pmu_register(l2x0_base, cache_id);
  763. return 0;
  764. }
  765. void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
  766. {
  767. const struct l2c_init_data *data;
  768. u32 cache_id;
  769. l2x0_base = base;
  770. cache_id = readl_relaxed(base + L2X0_CACHE_ID);
  771. switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
  772. default:
  773. case L2X0_CACHE_ID_PART_L210:
  774. data = &l2c210_data;
  775. break;
  776. case L2X0_CACHE_ID_PART_L220:
  777. data = &l2c220_data;
  778. break;
  779. case L2X0_CACHE_ID_PART_L310:
  780. data = &l2c310_init_fns;
  781. break;
  782. }
  783. /* Read back current (default) hardware configuration */
  784. if (data->save)
  785. data->save(l2x0_base);
  786. __l2c_init(data, aux_val, aux_mask, cache_id, false);
  787. }
  788. #ifdef CONFIG_OF
  789. static int l2_wt_override;
  790. /* Aurora don't have the cache ID register available, so we have to
  791. * pass it though the device tree */
  792. static u32 cache_id_part_number_from_dt;
  793. /**
  794. * l2x0_cache_size_of_parse() - read cache size parameters from DT
  795. * @np: the device tree node for the l2 cache
  796. * @aux_val: pointer to machine-supplied auxilary register value, to
  797. * be augmented by the call (bits to be set to 1)
  798. * @aux_mask: pointer to machine-supplied auxilary register mask, to
  799. * be augmented by the call (bits to be set to 0)
  800. * @associativity: variable to return the calculated associativity in
  801. * @max_way_size: the maximum size in bytes for the cache ways
  802. */
  803. static int __init l2x0_cache_size_of_parse(const struct device_node *np,
  804. u32 *aux_val, u32 *aux_mask,
  805. u32 *associativity,
  806. u32 max_way_size)
  807. {
  808. u32 mask = 0, val = 0;
  809. u32 cache_size = 0, sets = 0;
  810. u32 way_size_bits = 1;
  811. u32 way_size = 0;
  812. u32 block_size = 0;
  813. u32 line_size = 0;
  814. of_property_read_u32(np, "cache-size", &cache_size);
  815. of_property_read_u32(np, "cache-sets", &sets);
  816. of_property_read_u32(np, "cache-block-size", &block_size);
  817. of_property_read_u32(np, "cache-line-size", &line_size);
  818. if (!cache_size || !sets)
  819. return -ENODEV;
  820. /* All these l2 caches have the same line = block size actually */
  821. if (!line_size) {
  822. if (block_size) {
  823. /* If linesize is not given, it is equal to blocksize */
  824. line_size = block_size;
  825. } else {
  826. /* Fall back to known size */
  827. pr_warn("L2C OF: no cache block/line size given: "
  828. "falling back to default size %d bytes\n",
  829. CACHE_LINE_SIZE);
  830. line_size = CACHE_LINE_SIZE;
  831. }
  832. }
  833. if (line_size != CACHE_LINE_SIZE)
  834. pr_warn("L2C OF: DT supplied line size %d bytes does "
  835. "not match hardware line size of %d bytes\n",
  836. line_size,
  837. CACHE_LINE_SIZE);
  838. /*
  839. * Since:
  840. * set size = cache size / sets
  841. * ways = cache size / (sets * line size)
  842. * way size = cache size / (cache size / (sets * line size))
  843. * way size = sets * line size
  844. * associativity = ways = cache size / way size
  845. */
  846. way_size = sets * line_size;
  847. *associativity = cache_size / way_size;
  848. if (way_size > max_way_size) {
  849. pr_err("L2C OF: set size %dKB is too large\n", way_size);
  850. return -EINVAL;
  851. }
  852. pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
  853. cache_size, cache_size >> 10);
  854. pr_info("L2C OF: override line size: %d bytes\n", line_size);
  855. pr_info("L2C OF: override way size: %d bytes (%dKB)\n",
  856. way_size, way_size >> 10);
  857. pr_info("L2C OF: override associativity: %d\n", *associativity);
  858. /*
  859. * Calculates the bits 17:19 to set for way size:
  860. * 512KB -> 6, 256KB -> 5, ... 16KB -> 1
  861. */
  862. way_size_bits = ilog2(way_size >> 10) - 3;
  863. if (way_size_bits < 1 || way_size_bits > 6) {
  864. pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
  865. way_size);
  866. return -EINVAL;
  867. }
  868. mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
  869. val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT);
  870. *aux_val &= ~mask;
  871. *aux_val |= val;
  872. *aux_mask &= ~mask;
  873. return 0;
  874. }
  875. static void __init l2x0_of_parse(const struct device_node *np,
  876. u32 *aux_val, u32 *aux_mask)
  877. {
  878. u32 data[2] = { 0, 0 };
  879. u32 tag = 0;
  880. u32 dirty = 0;
  881. u32 val = 0, mask = 0;
  882. u32 assoc;
  883. int ret;
  884. of_property_read_u32(np, "arm,tag-latency", &tag);
  885. if (tag) {
  886. mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
  887. val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
  888. }
  889. of_property_read_u32_array(np, "arm,data-latency",
  890. data, ARRAY_SIZE(data));
  891. if (data[0] && data[1]) {
  892. mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
  893. L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
  894. val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
  895. ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
  896. }
  897. of_property_read_u32(np, "arm,dirty-latency", &dirty);
  898. if (dirty) {
  899. mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
  900. val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
  901. }
  902. if (of_property_read_bool(np, "arm,parity-enable")) {
  903. mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
  904. val |= L2C_AUX_CTRL_PARITY_ENABLE;
  905. } else if (of_property_read_bool(np, "arm,parity-disable")) {
  906. mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
  907. }
  908. if (of_property_read_bool(np, "arm,shared-override")) {
  909. mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
  910. val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
  911. }
  912. ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
  913. if (ret)
  914. return;
  915. if (assoc > 8) {
  916. pr_err("l2x0 of: cache setting yield too high associativity\n");
  917. pr_err("l2x0 of: %d calculated, max 8\n", assoc);
  918. } else {
  919. mask |= L2X0_AUX_CTRL_ASSOC_MASK;
  920. val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT);
  921. }
  922. *aux_val &= ~mask;
  923. *aux_val |= val;
  924. *aux_mask &= ~mask;
  925. }
  926. static const struct l2c_init_data of_l2c210_data __initconst = {
  927. .type = "L2C-210",
  928. .way_size_0 = SZ_8K,
  929. .num_lock = 1,
  930. .of_parse = l2x0_of_parse,
  931. .enable = l2c_enable,
  932. .save = l2c_save,
  933. .configure = l2c_configure,
  934. .unlock = l2c_unlock,
  935. .outer_cache = {
  936. .inv_range = l2c210_inv_range,
  937. .clean_range = l2c210_clean_range,
  938. .flush_range = l2c210_flush_range,
  939. .flush_all = l2c210_flush_all,
  940. .disable = l2c_disable,
  941. .sync = l2c210_sync,
  942. .resume = l2c_resume,
  943. },
  944. };
  945. static const struct l2c_init_data of_l2c220_data __initconst = {
  946. .type = "L2C-220",
  947. .way_size_0 = SZ_8K,
  948. .num_lock = 1,
  949. .of_parse = l2x0_of_parse,
  950. .enable = l2c220_enable,
  951. .save = l2c_save,
  952. .configure = l2c_configure,
  953. .unlock = l2c220_unlock,
  954. .outer_cache = {
  955. .inv_range = l2c220_inv_range,
  956. .clean_range = l2c220_clean_range,
  957. .flush_range = l2c220_flush_range,
  958. .flush_all = l2c220_flush_all,
  959. .disable = l2c_disable,
  960. .sync = l2c220_sync,
  961. .resume = l2c_resume,
  962. },
  963. };
  964. static void __init l2c310_of_parse(const struct device_node *np,
  965. u32 *aux_val, u32 *aux_mask)
  966. {
  967. u32 data[3] = { 0, 0, 0 };
  968. u32 tag[3] = { 0, 0, 0 };
  969. u32 filter[2] = { 0, 0 };
  970. u32 assoc;
  971. u32 prefetch;
  972. u32 power;
  973. u32 val;
  974. int ret;
  975. of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
  976. if (tag[0] && tag[1] && tag[2])
  977. l2x0_saved_regs.tag_latency =
  978. L310_LATENCY_CTRL_RD(tag[0] - 1) |
  979. L310_LATENCY_CTRL_WR(tag[1] - 1) |
  980. L310_LATENCY_CTRL_SETUP(tag[2] - 1);
  981. of_property_read_u32_array(np, "arm,data-latency",
  982. data, ARRAY_SIZE(data));
  983. if (data[0] && data[1] && data[2])
  984. l2x0_saved_regs.data_latency =
  985. L310_LATENCY_CTRL_RD(data[0] - 1) |
  986. L310_LATENCY_CTRL_WR(data[1] - 1) |
  987. L310_LATENCY_CTRL_SETUP(data[2] - 1);
  988. of_property_read_u32_array(np, "arm,filter-ranges",
  989. filter, ARRAY_SIZE(filter));
  990. if (filter[1]) {
  991. l2x0_saved_regs.filter_end =
  992. ALIGN(filter[0] + filter[1], SZ_1M);
  993. l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1))
  994. | L310_ADDR_FILTER_EN;
  995. }
  996. ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
  997. if (!ret) {
  998. switch (assoc) {
  999. case 16:
  1000. *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
  1001. *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
  1002. *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
  1003. break;
  1004. case 8:
  1005. *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
  1006. *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
  1007. break;
  1008. default:
  1009. pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
  1010. assoc);
  1011. break;
  1012. }
  1013. }
  1014. if (of_property_read_bool(np, "arm,shared-override")) {
  1015. *aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
  1016. *aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
  1017. }
  1018. if (of_property_read_bool(np, "arm,parity-enable")) {
  1019. *aux_val |= L2C_AUX_CTRL_PARITY_ENABLE;
  1020. *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
  1021. } else if (of_property_read_bool(np, "arm,parity-disable")) {
  1022. *aux_val &= ~L2C_AUX_CTRL_PARITY_ENABLE;
  1023. *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
  1024. }
  1025. if (of_property_read_bool(np, "arm,early-bresp-disable"))
  1026. l2x0_bresp_disable = true;
  1027. if (of_property_read_bool(np, "arm,full-line-zero-disable"))
  1028. l2x0_flz_disable = true;
  1029. prefetch = l2x0_saved_regs.prefetch_ctrl;
  1030. ret = of_property_read_u32(np, "arm,double-linefill", &val);
  1031. if (ret == 0) {
  1032. if (val)
  1033. prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL;
  1034. else
  1035. prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
  1036. } else if (ret != -EINVAL) {
  1037. pr_err("L2C-310 OF arm,double-linefill property value is missing\n");
  1038. }
  1039. ret = of_property_read_u32(np, "arm,double-linefill-incr", &val);
  1040. if (ret == 0) {
  1041. if (val)
  1042. prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
  1043. else
  1044. prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
  1045. } else if (ret != -EINVAL) {
  1046. pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n");
  1047. }
  1048. ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val);
  1049. if (ret == 0) {
  1050. if (!val)
  1051. prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
  1052. else
  1053. prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
  1054. } else if (ret != -EINVAL) {
  1055. pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n");
  1056. }
  1057. ret = of_property_read_u32(np, "arm,prefetch-drop", &val);
  1058. if (ret == 0) {
  1059. if (val)
  1060. prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP;
  1061. else
  1062. prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP;
  1063. } else if (ret != -EINVAL) {
  1064. pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n");
  1065. }
  1066. ret = of_property_read_u32(np, "arm,prefetch-offset", &val);
  1067. if (ret == 0) {
  1068. prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK;
  1069. prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK;
  1070. } else if (ret != -EINVAL) {
  1071. pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
  1072. }
  1073. ret = of_property_read_u32(np, "prefetch-data", &val);
  1074. if (ret == 0) {
  1075. if (val) {
  1076. prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
  1077. *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH;
  1078. } else {
  1079. prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
  1080. *aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
  1081. }
  1082. *aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
  1083. } else if (ret != -EINVAL) {
  1084. pr_err("L2C-310 OF prefetch-data property value is missing\n");
  1085. }
  1086. ret = of_property_read_u32(np, "prefetch-instr", &val);
  1087. if (ret == 0) {
  1088. if (val) {
  1089. prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
  1090. *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
  1091. } else {
  1092. prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
  1093. *aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
  1094. }
  1095. *aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
  1096. } else if (ret != -EINVAL) {
  1097. pr_err("L2C-310 OF prefetch-instr property value is missing\n");
  1098. }
  1099. l2x0_saved_regs.prefetch_ctrl = prefetch;
  1100. power = l2x0_saved_regs.pwr_ctrl |
  1101. L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN;
  1102. ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val);
  1103. if (!ret) {
  1104. if (!val)
  1105. power &= ~L310_DYNAMIC_CLK_GATING_EN;
  1106. } else if (ret != -EINVAL) {
  1107. pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n");
  1108. }
  1109. ret = of_property_read_u32(np, "arm,standby-mode", &val);
  1110. if (!ret) {
  1111. if (!val)
  1112. power &= ~L310_STNDBY_MODE_EN;
  1113. } else if (ret != -EINVAL) {
  1114. pr_err("L2C-310 OF standby-mode property value is missing or invalid\n");
  1115. }
  1116. l2x0_saved_regs.pwr_ctrl = power;
  1117. }
  1118. static const struct l2c_init_data of_l2c310_data __initconst = {
  1119. .type = "L2C-310",
  1120. .way_size_0 = SZ_8K,
  1121. .num_lock = 8,
  1122. .of_parse = l2c310_of_parse,
  1123. .enable = l2c310_enable,
  1124. .fixup = l2c310_fixup,
  1125. .save = l2c310_save,
  1126. .configure = l2c310_configure,
  1127. .unlock = l2c310_unlock,
  1128. .outer_cache = {
  1129. .inv_range = l2c210_inv_range,
  1130. .clean_range = l2c210_clean_range,
  1131. .flush_range = l2c210_flush_range,
  1132. .flush_all = l2c210_flush_all,
  1133. .disable = l2c310_disable,
  1134. .sync = l2c210_sync,
  1135. .resume = l2c310_resume,
  1136. },
  1137. };
  1138. /*
  1139. * This is a variant of the of_l2c310_data with .sync set to
  1140. * NULL. Outer sync operations are not needed when the system is I/O
  1141. * coherent, and potentially harmful in certain situations (PCIe/PL310
  1142. * deadlock on Armada 375/38x due to hardware I/O coherency). The
  1143. * other operations are kept because they are infrequent (therefore do
  1144. * not cause the deadlock in practice) and needed for secondary CPU
  1145. * boot and other power management activities.
  1146. */
  1147. static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
  1148. .type = "L2C-310 Coherent",
  1149. .way_size_0 = SZ_8K,
  1150. .num_lock = 8,
  1151. .of_parse = l2c310_of_parse,
  1152. .enable = l2c310_enable,
  1153. .fixup = l2c310_fixup,
  1154. .save = l2c310_save,
  1155. .configure = l2c310_configure,
  1156. .unlock = l2c310_unlock,
  1157. .outer_cache = {
  1158. .inv_range = l2c210_inv_range,
  1159. .clean_range = l2c210_clean_range,
  1160. .flush_range = l2c210_flush_range,
  1161. .flush_all = l2c210_flush_all,
  1162. .disable = l2c310_disable,
  1163. .resume = l2c310_resume,
  1164. },
  1165. };
  1166. /*
  1167. * Note that the end addresses passed to Linux primitives are
  1168. * noninclusive, while the hardware cache range operations use
  1169. * inclusive start and end addresses.
  1170. */
  1171. static unsigned long aurora_range_end(unsigned long start, unsigned long end)
  1172. {
  1173. /*
  1174. * Limit the number of cache lines processed at once,
  1175. * since cache range operations stall the CPU pipeline
  1176. * until completion.
  1177. */
  1178. if (end > start + MAX_RANGE_SIZE)
  1179. end = start + MAX_RANGE_SIZE;
  1180. /*
  1181. * Cache range operations can't straddle a page boundary.
  1182. */
  1183. if (end > PAGE_ALIGN(start+1))
  1184. end = PAGE_ALIGN(start+1);
  1185. return end;
  1186. }
  1187. static void aurora_pa_range(unsigned long start, unsigned long end,
  1188. unsigned long offset)
  1189. {
  1190. void __iomem *base = l2x0_base;
  1191. unsigned long range_end;
  1192. unsigned long flags;
  1193. /*
  1194. * round start and end adresses up to cache line size
  1195. */
  1196. start &= ~(CACHE_LINE_SIZE - 1);
  1197. end = ALIGN(end, CACHE_LINE_SIZE);
  1198. /*
  1199. * perform operation on all full cache lines between 'start' and 'end'
  1200. */
  1201. while (start < end) {
  1202. range_end = aurora_range_end(start, end);
  1203. raw_spin_lock_irqsave(&l2x0_lock, flags);
  1204. writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
  1205. writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset);
  1206. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  1207. writel_relaxed(0, base + AURORA_SYNC_REG);
  1208. start = range_end;
  1209. }
  1210. }
  1211. static void aurora_inv_range(unsigned long start, unsigned long end)
  1212. {
  1213. aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
  1214. }
  1215. static void aurora_clean_range(unsigned long start, unsigned long end)
  1216. {
  1217. /*
  1218. * If L2 is forced to WT, the L2 will always be clean and we
  1219. * don't need to do anything here.
  1220. */
  1221. if (!l2_wt_override)
  1222. aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG);
  1223. }
  1224. static void aurora_flush_range(unsigned long start, unsigned long end)
  1225. {
  1226. if (l2_wt_override)
  1227. aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
  1228. else
  1229. aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG);
  1230. }
  1231. static void aurora_flush_all(void)
  1232. {
  1233. void __iomem *base = l2x0_base;
  1234. unsigned long flags;
  1235. /* clean all ways */
  1236. raw_spin_lock_irqsave(&l2x0_lock, flags);
  1237. __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
  1238. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  1239. writel_relaxed(0, base + AURORA_SYNC_REG);
  1240. }
  1241. static void aurora_cache_sync(void)
  1242. {
  1243. writel_relaxed(0, l2x0_base + AURORA_SYNC_REG);
  1244. }
  1245. static void aurora_disable(void)
  1246. {
  1247. void __iomem *base = l2x0_base;
  1248. unsigned long flags;
  1249. raw_spin_lock_irqsave(&l2x0_lock, flags);
  1250. __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
  1251. writel_relaxed(0, base + AURORA_SYNC_REG);
  1252. l2c_write_sec(0, base, L2X0_CTRL);
  1253. dsb(st);
  1254. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  1255. }
  1256. static void aurora_save(void __iomem *base)
  1257. {
  1258. l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
  1259. l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
  1260. }
  1261. /*
  1262. * For Aurora cache in no outer mode, enable via the CP15 coprocessor
  1263. * broadcasting of cache commands to L2.
  1264. */
  1265. static void __init aurora_enable_no_outer(void __iomem *base,
  1266. unsigned num_lock)
  1267. {
  1268. u32 u;
  1269. asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
  1270. u |= AURORA_CTRL_FW; /* Set the FW bit */
  1271. asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
  1272. isb();
  1273. l2c_enable(base, num_lock);
  1274. }
  1275. static void __init aurora_fixup(void __iomem *base, u32 cache_id,
  1276. struct outer_cache_fns *fns)
  1277. {
  1278. sync_reg_offset = AURORA_SYNC_REG;
  1279. }
  1280. static void __init aurora_of_parse(const struct device_node *np,
  1281. u32 *aux_val, u32 *aux_mask)
  1282. {
  1283. u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
  1284. u32 mask = AURORA_ACR_REPLACEMENT_MASK;
  1285. of_property_read_u32(np, "cache-id-part",
  1286. &cache_id_part_number_from_dt);
  1287. /* Determine and save the write policy */
  1288. l2_wt_override = of_property_read_bool(np, "wt-override");
  1289. if (l2_wt_override) {
  1290. val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
  1291. mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
  1292. }
  1293. *aux_val &= ~mask;
  1294. *aux_val |= val;
  1295. *aux_mask &= ~mask;
  1296. }
  1297. static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
  1298. .type = "Aurora",
  1299. .way_size_0 = SZ_4K,
  1300. .num_lock = 4,
  1301. .of_parse = aurora_of_parse,
  1302. .enable = l2c_enable,
  1303. .fixup = aurora_fixup,
  1304. .save = aurora_save,
  1305. .configure = l2c_configure,
  1306. .unlock = l2c_unlock,
  1307. .outer_cache = {
  1308. .inv_range = aurora_inv_range,
  1309. .clean_range = aurora_clean_range,
  1310. .flush_range = aurora_flush_range,
  1311. .flush_all = aurora_flush_all,
  1312. .disable = aurora_disable,
  1313. .sync = aurora_cache_sync,
  1314. .resume = l2c_resume,
  1315. },
  1316. };
  1317. static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
  1318. .type = "Aurora",
  1319. .way_size_0 = SZ_4K,
  1320. .num_lock = 4,
  1321. .of_parse = aurora_of_parse,
  1322. .enable = aurora_enable_no_outer,
  1323. .fixup = aurora_fixup,
  1324. .save = aurora_save,
  1325. .configure = l2c_configure,
  1326. .unlock = l2c_unlock,
  1327. .outer_cache = {
  1328. .resume = l2c_resume,
  1329. },
  1330. };
  1331. /*
  1332. * For certain Broadcom SoCs, depending on the address range, different offsets
  1333. * need to be added to the address before passing it to L2 for
  1334. * invalidation/clean/flush
  1335. *
  1336. * Section Address Range Offset EMI
  1337. * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
  1338. * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
  1339. * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
  1340. *
  1341. * When the start and end addresses have crossed two different sections, we
  1342. * need to break the L2 operation into two, each within its own section.
  1343. * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
  1344. * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
  1345. * 0xC0000000 - 0xC0001000
  1346. *
  1347. * Note 1:
  1348. * By breaking a single L2 operation into two, we may potentially suffer some
  1349. * performance hit, but keep in mind the cross section case is very rare
  1350. *
  1351. * Note 2:
  1352. * We do not need to handle the case when the start address is in
  1353. * Section 1 and the end address is in Section 3, since it is not a valid use
  1354. * case
  1355. *
  1356. * Note 3:
  1357. * Section 1 in practical terms can no longer be used on rev A2. Because of
  1358. * that the code does not need to handle section 1 at all.
  1359. *
  1360. */
  1361. #define BCM_SYS_EMI_START_ADDR 0x40000000UL
  1362. #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
  1363. #define BCM_SYS_EMI_OFFSET 0x40000000UL
  1364. #define BCM_VC_EMI_OFFSET 0x80000000UL
  1365. static inline int bcm_addr_is_sys_emi(unsigned long addr)
  1366. {
  1367. return (addr >= BCM_SYS_EMI_START_ADDR) &&
  1368. (addr < BCM_VC_EMI_SEC3_START_ADDR);
  1369. }
  1370. static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
  1371. {
  1372. if (bcm_addr_is_sys_emi(addr))
  1373. return addr + BCM_SYS_EMI_OFFSET;
  1374. else
  1375. return addr + BCM_VC_EMI_OFFSET;
  1376. }
  1377. static void bcm_inv_range(unsigned long start, unsigned long end)
  1378. {
  1379. unsigned long new_start, new_end;
  1380. BUG_ON(start < BCM_SYS_EMI_START_ADDR);
  1381. if (unlikely(end <= start))
  1382. return;
  1383. new_start = bcm_l2_phys_addr(start);
  1384. new_end = bcm_l2_phys_addr(end);
  1385. /* normal case, no cross section between start and end */
  1386. if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
  1387. l2c210_inv_range(new_start, new_end);
  1388. return;
  1389. }
  1390. /* They cross sections, so it can only be a cross from section
  1391. * 2 to section 3
  1392. */
  1393. l2c210_inv_range(new_start,
  1394. bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
  1395. l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  1396. new_end);
  1397. }
  1398. static void bcm_clean_range(unsigned long start, unsigned long end)
  1399. {
  1400. unsigned long new_start, new_end;
  1401. BUG_ON(start < BCM_SYS_EMI_START_ADDR);
  1402. if (unlikely(end <= start))
  1403. return;
  1404. new_start = bcm_l2_phys_addr(start);
  1405. new_end = bcm_l2_phys_addr(end);
  1406. /* normal case, no cross section between start and end */
  1407. if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
  1408. l2c210_clean_range(new_start, new_end);
  1409. return;
  1410. }
  1411. /* They cross sections, so it can only be a cross from section
  1412. * 2 to section 3
  1413. */
  1414. l2c210_clean_range(new_start,
  1415. bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
  1416. l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  1417. new_end);
  1418. }
  1419. static void bcm_flush_range(unsigned long start, unsigned long end)
  1420. {
  1421. unsigned long new_start, new_end;
  1422. BUG_ON(start < BCM_SYS_EMI_START_ADDR);
  1423. if (unlikely(end <= start))
  1424. return;
  1425. if ((end - start) >= l2x0_size) {
  1426. outer_cache.flush_all();
  1427. return;
  1428. }
  1429. new_start = bcm_l2_phys_addr(start);
  1430. new_end = bcm_l2_phys_addr(end);
  1431. /* normal case, no cross section between start and end */
  1432. if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
  1433. l2c210_flush_range(new_start, new_end);
  1434. return;
  1435. }
  1436. /* They cross sections, so it can only be a cross from section
  1437. * 2 to section 3
  1438. */
  1439. l2c210_flush_range(new_start,
  1440. bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
  1441. l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  1442. new_end);
  1443. }
  1444. /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
  1445. static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
  1446. .type = "BCM-L2C-310",
  1447. .way_size_0 = SZ_8K,
  1448. .num_lock = 8,
  1449. .of_parse = l2c310_of_parse,
  1450. .enable = l2c310_enable,
  1451. .save = l2c310_save,
  1452. .configure = l2c310_configure,
  1453. .unlock = l2c310_unlock,
  1454. .outer_cache = {
  1455. .inv_range = bcm_inv_range,
  1456. .clean_range = bcm_clean_range,
  1457. .flush_range = bcm_flush_range,
  1458. .flush_all = l2c210_flush_all,
  1459. .disable = l2c310_disable,
  1460. .sync = l2c210_sync,
  1461. .resume = l2c310_resume,
  1462. },
  1463. };
  1464. static void __init tauros3_save(void __iomem *base)
  1465. {
  1466. l2c_save(base);
  1467. l2x0_saved_regs.aux2_ctrl =
  1468. readl_relaxed(base + TAUROS3_AUX2_CTRL);
  1469. l2x0_saved_regs.prefetch_ctrl =
  1470. readl_relaxed(base + L310_PREFETCH_CTRL);
  1471. }
  1472. static void tauros3_configure(void __iomem *base)
  1473. {
  1474. l2c_configure(base);
  1475. writel_relaxed(l2x0_saved_regs.aux2_ctrl,
  1476. base + TAUROS3_AUX2_CTRL);
  1477. writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
  1478. base + L310_PREFETCH_CTRL);
  1479. }
  1480. static const struct l2c_init_data of_tauros3_data __initconst = {
  1481. .type = "Tauros3",
  1482. .way_size_0 = SZ_8K,
  1483. .num_lock = 8,
  1484. .enable = l2c_enable,
  1485. .save = tauros3_save,
  1486. .configure = tauros3_configure,
  1487. .unlock = l2c_unlock,
  1488. /* Tauros3 broadcasts L1 cache operations to L2 */
  1489. .outer_cache = {
  1490. .resume = l2c_resume,
  1491. },
  1492. };
  1493. #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
  1494. static const struct of_device_id l2x0_ids[] __initconst = {
  1495. L2C_ID("arm,l210-cache", of_l2c210_data),
  1496. L2C_ID("arm,l220-cache", of_l2c220_data),
  1497. L2C_ID("arm,pl310-cache", of_l2c310_data),
  1498. L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
  1499. L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
  1500. L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
  1501. L2C_ID("marvell,tauros3-cache", of_tauros3_data),
  1502. /* Deprecated IDs */
  1503. L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
  1504. {}
  1505. };
  1506. int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
  1507. {
  1508. const struct l2c_init_data *data;
  1509. struct device_node *np;
  1510. struct resource res;
  1511. u32 cache_id, old_aux;
  1512. u32 cache_level = 2;
  1513. bool nosync = false;
  1514. np = of_find_matching_node(NULL, l2x0_ids);
  1515. if (!np)
  1516. return -ENODEV;
  1517. if (of_address_to_resource(np, 0, &res))
  1518. return -ENODEV;
  1519. l2x0_base = ioremap(res.start, resource_size(&res));
  1520. if (!l2x0_base)
  1521. return -ENOMEM;
  1522. l2x0_saved_regs.phy_base = res.start;
  1523. data = of_match_node(l2x0_ids, np)->data;
  1524. if (of_device_is_compatible(np, "arm,pl310-cache") &&
  1525. of_property_read_bool(np, "arm,io-coherent"))
  1526. data = &of_l2c310_coherent_data;
  1527. old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  1528. if (old_aux != ((old_aux & aux_mask) | aux_val)) {
  1529. pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
  1530. old_aux, (old_aux & aux_mask) | aux_val);
  1531. } else if (aux_mask != ~0U && aux_val != 0) {
  1532. pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n");
  1533. }
  1534. /* All L2 caches are unified, so this property should be specified */
  1535. if (!of_property_read_bool(np, "cache-unified"))
  1536. pr_err("L2C: device tree omits to specify unified cache\n");
  1537. if (of_property_read_u32(np, "cache-level", &cache_level))
  1538. pr_err("L2C: device tree omits to specify cache-level\n");
  1539. if (cache_level != 2)
  1540. pr_err("L2C: device tree specifies invalid cache level\n");
  1541. nosync = of_property_read_bool(np, "arm,outer-sync-disable");
  1542. /* Read back current (default) hardware configuration */
  1543. if (data->save)
  1544. data->save(l2x0_base);
  1545. /* L2 configuration can only be changed if the cache is disabled */
  1546. if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
  1547. if (data->of_parse)
  1548. data->of_parse(np, &aux_val, &aux_mask);
  1549. if (cache_id_part_number_from_dt)
  1550. cache_id = cache_id_part_number_from_dt;
  1551. else
  1552. cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
  1553. return __l2c_init(data, aux_val, aux_mask, cache_id, nosync);
  1554. }
  1555. #endif