mali_l2_cache.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581
  1. /*
  2. * This confidential and proprietary software may be used only as
  3. * authorised by a licensing agreement from ARM Limited
  4. * (C) COPYRIGHT 2008-2013 ARM Limited
  5. * ALL RIGHTS RESERVED
  6. * The entire notice above must be reproduced on all authorised
  7. * copies and copies may only be made to the extent permitted
  8. * by a licensing agreement from ARM Limited.
  9. */
  10. #include "mali_kernel_common.h"
  11. #include "mali_osk.h"
  12. #include "mali_l2_cache.h"
  13. #include "mali_hw_core.h"
  14. #include "mali_scheduler.h"
  15. #include "mali_pm_domain.h"
  16. /**
  17. * Size of the Mali L2 cache registers in bytes
  18. */
  19. #define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
  20. /**
  21. * Mali L2 cache register numbers
  22. * Used in the register read/write routines.
  23. * See the hardware documentation for more information about each register
  24. */
  25. typedef enum mali_l2_cache_register {
  26. MALI400_L2_CACHE_REGISTER_SIZE = 0x0004,
  27. MALI400_L2_CACHE_REGISTER_STATUS = 0x0008,
  28. /*unused = 0x000C */
  29. MALI400_L2_CACHE_REGISTER_COMMAND = 0x0010, /**< Misc cache commands, e.g. clear */
  30. MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0014,
  31. MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0018, /**< Limit of outstanding read requests */
  32. MALI400_L2_CACHE_REGISTER_ENABLE = 0x001C, /**< Enable misc cache features */
  33. MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
  34. MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
  35. MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
  36. MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x002C,
  37. } mali_l2_cache_register;
  38. /**
  39. * Mali L2 cache commands
  40. * These are the commands that can be sent to the Mali L2 cache unit
  41. */
  42. typedef enum mali_l2_cache_command {
  43. MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
  44. /* Read HW TRM carefully before adding/using other commands than the clear above */
  45. } mali_l2_cache_command;
  46. /**
  47. * Mali L2 cache commands
  48. * These are the commands that can be sent to the Mali L2 cache unit
  49. */
  50. typedef enum mali_l2_cache_enable {
  51. MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
  52. MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
  53. MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
  54. } mali_l2_cache_enable;
  55. /**
  56. * Mali L2 cache status bits
  57. */
  58. typedef enum mali_l2_cache_status {
  59. MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
  60. MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02, /**< L2 cache is busy handling data requests */
  61. } mali_l2_cache_status;
  62. #define MALI400_L2_MAX_READS_DEFAULT 0x1C
  63. static struct mali_l2_cache_core *mali_global_l2_cache_cores[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
  64. static u32 mali_global_num_l2_cache_cores = 0;
  65. int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
  66. /* Local helper functions */
  67. static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val);
  68. static void mali_l2_cache_counter_lock(struct mali_l2_cache_core *cache)
  69. {
  70. #ifdef MALI_UPPER_HALF_SCHEDULING
  71. _mali_osk_spinlock_irq_lock(cache->counter_lock);
  72. #else
  73. _mali_osk_spinlock_lock(cache->counter_lock);
  74. #endif
  75. }
  76. static void mali_l2_cache_counter_unlock(struct mali_l2_cache_core *cache)
  77. {
  78. #ifdef MALI_UPPER_HALF_SCHEDULING
  79. _mali_osk_spinlock_irq_unlock(cache->counter_lock);
  80. #else
  81. _mali_osk_spinlock_unlock(cache->counter_lock);
  82. #endif
  83. }
  84. static void mali_l2_cache_command_lock(struct mali_l2_cache_core *cache)
  85. {
  86. #ifdef MALI_UPPER_HALF_SCHEDULING
  87. _mali_osk_spinlock_irq_lock(cache->command_lock);
  88. #else
  89. _mali_osk_spinlock_lock(cache->command_lock);
  90. #endif
  91. }
  92. static void mali_l2_cache_command_unlock(struct mali_l2_cache_core *cache)
  93. {
  94. #ifdef MALI_UPPER_HALF_SCHEDULING
  95. _mali_osk_spinlock_irq_unlock(cache->command_lock);
  96. #else
  97. _mali_osk_spinlock_unlock(cache->command_lock);
  98. #endif
  99. }
  100. struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource)
  101. {
  102. struct mali_l2_cache_core *cache = NULL;
  103. MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n", resource->description));
  104. if (mali_global_num_l2_cache_cores >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) {
  105. MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 cache core objects created\n"));
  106. return NULL;
  107. }
  108. cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
  109. if (NULL != cache) {
  110. cache->core_id = mali_global_num_l2_cache_cores;
  111. cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
  112. cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
  113. cache->pm_domain = NULL;
  114. cache->mali_l2_status = MALI_L2_NORMAL;
  115. if (_MALI_OSK_ERR_OK == mali_hw_core_create(&cache->hw_core, resource, MALI400_L2_CACHE_REGISTERS_SIZE)) {
  116. MALI_DEBUG_CODE(u32 cache_size = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_SIZE));
  117. MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n",
  118. resource->description,
  119. 1 << (((cache_size >> 16) & 0xff) - 10),
  120. 1 << ((cache_size >> 8) & 0xff),
  121. 1 << (cache_size & 0xff),
  122. 1 << ((cache_size >> 24) & 0xff)));
  123. #ifdef MALI_UPPER_HALF_SCHEDULING
  124. cache->command_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
  125. #else
  126. cache->command_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
  127. #endif
  128. if (NULL != cache->command_lock) {
  129. #ifdef MALI_UPPER_HALF_SCHEDULING
  130. cache->counter_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
  131. #else
  132. cache->counter_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
  133. #endif
  134. if (NULL != cache->counter_lock) {
  135. mali_l2_cache_reset(cache);
  136. cache->last_invalidated_id = 0;
  137. mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = cache;
  138. mali_global_num_l2_cache_cores++;
  139. return cache;
  140. } else {
  141. MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", cache->hw_core.description));
  142. }
  143. #ifdef MALI_UPPER_HALF_SCHEDULING
  144. _mali_osk_spinlock_irq_term(cache->command_lock);
  145. #else
  146. _mali_osk_spinlock_term(cache->command_lock);
  147. #endif
  148. } else {
  149. MALI_PRINT_ERROR(("Mali L2 cache: Failed to create command lock for L2 cache core %s\n", cache->hw_core.description));
  150. }
  151. mali_hw_core_delete(&cache->hw_core);
  152. }
  153. _mali_osk_free(cache);
  154. } else {
  155. MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
  156. }
  157. return NULL;
  158. }
  159. void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
  160. {
  161. u32 i;
  162. /* reset to defaults */
  163. mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
  164. mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
  165. #ifdef MALI_UPPER_HALF_SCHEDULING
  166. _mali_osk_spinlock_irq_term(cache->counter_lock);
  167. _mali_osk_spinlock_irq_term(cache->command_lock);
  168. #else
  169. _mali_osk_spinlock_term(cache->command_lock);
  170. _mali_osk_spinlock_term(cache->counter_lock);
  171. #endif
  172. mali_hw_core_delete(&cache->hw_core);
  173. for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
  174. if (mali_global_l2_cache_cores[i] == cache) {
  175. mali_global_l2_cache_cores[i] = NULL;
  176. mali_global_num_l2_cache_cores--;
  177. if (i != mali_global_num_l2_cache_cores) {
  178. /* We removed a l2 cache from the middle of the array -- move the last
  179. * l2 cache to the current position to close the gap */
  180. mali_global_l2_cache_cores[i] = mali_global_l2_cache_cores[mali_global_num_l2_cache_cores];
  181. mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = NULL;
  182. }
  183. break;
  184. }
  185. }
  186. _mali_osk_free(cache);
  187. }
  188. u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
  189. {
  190. return cache->core_id;
  191. }
  192. static void mali_l2_cache_core_set_counter_internal(struct mali_l2_cache_core *cache, u32 source_id, u32 counter)
  193. {
  194. u32 value = 0; /* disabled src */
  195. u32 reg_offset = 0;
  196. mali_bool core_is_on;
  197. MALI_DEBUG_ASSERT_POINTER(cache);
  198. core_is_on = mali_l2_cache_lock_power_state(cache);
  199. mali_l2_cache_counter_lock(cache);
  200. switch (source_id) {
  201. case 0:
  202. cache->counter_src0 = counter;
  203. reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0;
  204. break;
  205. case 1:
  206. cache->counter_src1 = counter;
  207. reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1;
  208. break;
  209. default:
  210. MALI_DEBUG_ASSERT(0);
  211. break;
  212. }
  213. if (MALI_L2_PAUSE == cache->mali_l2_status) {
  214. mali_l2_cache_counter_unlock(cache);
  215. mali_l2_cache_unlock_power_state(cache);
  216. return;
  217. }
  218. if (MALI_HW_CORE_NO_COUNTER != counter) {
  219. value = counter;
  220. }
  221. if (MALI_TRUE == core_is_on) {
  222. mali_hw_core_register_write(&cache->hw_core, reg_offset, value);
  223. }
  224. mali_l2_cache_counter_unlock(cache);
  225. mali_l2_cache_unlock_power_state(cache);
  226. }
  227. void mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter)
  228. {
  229. mali_l2_cache_core_set_counter_internal(cache, 0, counter);
  230. }
  231. void mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter)
  232. {
  233. mali_l2_cache_core_set_counter_internal(cache, 1, counter);
  234. }
  235. u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache)
  236. {
  237. return cache->counter_src0;
  238. }
  239. u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache)
  240. {
  241. return cache->counter_src1;
  242. }
  243. void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1)
  244. {
  245. MALI_DEBUG_ASSERT(NULL != src0);
  246. MALI_DEBUG_ASSERT(NULL != value0);
  247. MALI_DEBUG_ASSERT(NULL != src1);
  248. MALI_DEBUG_ASSERT(NULL != value1);
  249. /* Caller must hold the PM lock and know that we are powered on */
  250. mali_l2_cache_counter_lock(cache);
  251. if (MALI_L2_PAUSE == cache->mali_l2_status) {
  252. mali_l2_cache_counter_unlock(cache);
  253. return;
  254. }
  255. *src0 = cache->counter_src0;
  256. *src1 = cache->counter_src1;
  257. if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
  258. *value0 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
  259. }
  260. if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
  261. *value1 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
  262. }
  263. mali_l2_cache_counter_unlock(cache);
  264. }
  265. static void mali_l2_cache_reset_counters_all(void)
  266. {
  267. int i;
  268. u32 value;
  269. struct mali_l2_cache_core *cache;
  270. u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
  271. for (i = 0; i < num_cores; i++) {
  272. cache = mali_l2_cache_core_get_glob_l2_core(i);
  273. if (MALI_TRUE == mali_l2_cache_lock_power_state(cache)) {
  274. mali_l2_cache_counter_lock(cache);
  275. if (MALI_L2_PAUSE == cache->mali_l2_status) {
  276. mali_l2_cache_counter_unlock(cache);
  277. mali_l2_cache_unlock_power_state(cache);
  278. return;
  279. }
  280. /* Reset performance counters */
  281. if (MALI_HW_CORE_NO_COUNTER == cache->counter_src0) {
  282. value = 0;
  283. } else {
  284. value = cache->counter_src0;
  285. }
  286. mali_hw_core_register_write(&cache->hw_core,
  287. MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, value);
  288. if (MALI_HW_CORE_NO_COUNTER == cache->counter_src1) {
  289. value = 0;
  290. } else {
  291. value = cache->counter_src1;
  292. }
  293. mali_hw_core_register_write(&cache->hw_core,
  294. MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value);
  295. mali_l2_cache_counter_unlock(cache);
  296. }
  297. mali_l2_cache_unlock_power_state(cache);
  298. }
  299. }
  300. struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
  301. {
  302. if (mali_global_num_l2_cache_cores > index) {
  303. return mali_global_l2_cache_cores[index];
  304. }
  305. return NULL;
  306. }
  307. u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
  308. {
  309. return mali_global_num_l2_cache_cores;
  310. }
  311. void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
  312. {
  313. /* Invalidate cache (just to keep it in a known state at startup) */
  314. mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
  315. mali_l2_cache_counter_lock(cache);
  316. if (MALI_L2_PAUSE == cache->mali_l2_status) {
  317. mali_l2_cache_counter_unlock(cache);
  318. return;
  319. }
  320. /* Enable cache */
  321. mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
  322. mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
  323. /* Restart any performance counters (if enabled) */
  324. if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
  325. mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
  326. }
  327. if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
  328. mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
  329. }
  330. mali_l2_cache_counter_unlock(cache);
  331. }
  332. void mali_l2_cache_reset_all(void)
  333. {
  334. int i;
  335. u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
  336. for (i = 0; i < num_cores; i++) {
  337. mali_l2_cache_reset(mali_l2_cache_core_get_glob_l2_core(i));
  338. }
  339. }
  340. void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
  341. {
  342. MALI_DEBUG_ASSERT_POINTER(cache);
  343. if (NULL != cache) {
  344. cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
  345. mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
  346. }
  347. }
  348. mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id)
  349. {
  350. MALI_DEBUG_ASSERT_POINTER(cache);
  351. if (NULL != cache) {
  352. /* If the last cache invalidation was done by a job with a higher id we
  353. * don't have to flush. Since user space will store jobs w/ their
  354. * corresponding memory in sequence (first job #0, then job #1, ...),
  355. * we don't have to flush for job n-1 if job n has already invalidated
  356. * the cache since we know for sure that job n-1's memory was already
  357. * written when job n was started. */
  358. if (((s32)id) <= ((s32)cache->last_invalidated_id)) {
  359. return MALI_FALSE;
  360. } else {
  361. cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
  362. }
  363. mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
  364. }
  365. return MALI_TRUE;
  366. }
  367. void mali_l2_cache_invalidate_all(void)
  368. {
  369. u32 i;
  370. for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
  371. /*additional check*/
  372. if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
  373. _mali_osk_errcode_t ret;
  374. mali_global_l2_cache_cores[i]->last_invalidated_id = mali_scheduler_get_new_cache_order();
  375. ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
  376. if (_MALI_OSK_ERR_OK != ret) {
  377. MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
  378. }
  379. }
  380. mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
  381. }
  382. }
  383. void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages)
  384. {
  385. u32 i;
  386. for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
  387. /*additional check*/
  388. if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
  389. u32 j;
  390. for (j = 0; j < num_pages; j++) {
  391. _mali_osk_errcode_t ret;
  392. ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, pages[j]);
  393. if (_MALI_OSK_ERR_OK != ret) {
  394. MALI_PRINT_ERROR(("Failed to invalidate page cache\n"));
  395. }
  396. }
  397. }
  398. mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
  399. }
  400. }
  401. mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache)
  402. {
  403. return mali_pm_domain_lock_state(cache->pm_domain);
  404. }
  405. void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache)
  406. {
  407. return mali_pm_domain_unlock_state(cache->pm_domain);
  408. }
  409. /* -------- local helper functions below -------- */
  410. static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val)
  411. {
  412. int i = 0;
  413. const int loop_count = 100000;
  414. /*
  415. * Grab lock in order to send commands to the L2 cache in a serialized fashion.
  416. * The L2 cache will ignore commands if it is busy.
  417. */
  418. mali_l2_cache_command_lock(cache);
  419. if (MALI_L2_PAUSE == cache->mali_l2_status) {
  420. mali_l2_cache_command_unlock(cache);
  421. MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for L2 come back\n"));
  422. MALI_ERROR( _MALI_OSK_ERR_BUSY );
  423. }
  424. /* First, wait for L2 cache command handler to go idle */
  425. for (i = 0; i < loop_count; i++) {
  426. if (!(mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_STATUS) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) {
  427. break;
  428. }
  429. }
  430. if (i == loop_count) {
  431. mali_l2_cache_command_unlock(cache);
  432. MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for command interface to go idle\n"));
  433. MALI_ERROR( _MALI_OSK_ERR_FAULT );
  434. }
  435. /* then issue the command */
  436. mali_hw_core_register_write(&cache->hw_core, reg, val);
  437. mali_l2_cache_command_unlock(cache);
  438. MALI_SUCCESS;
  439. }
  440. void mali_l2_cache_pause_all(mali_bool pause)
  441. {
  442. int i;
  443. struct mali_l2_cache_core * cache;
  444. u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
  445. mali_l2_power_status status = MALI_L2_NORMAL;
  446. if (pause) {
  447. status = MALI_L2_PAUSE;
  448. }
  449. for (i = 0; i < num_cores; i++) {
  450. cache = mali_l2_cache_core_get_glob_l2_core(i);
  451. if (NULL != cache) {
  452. cache->mali_l2_status = status;
  453. /* Take and release the counter and command locks to
  454. * ensure there are no active threads that didn't get
  455. * the status flag update.
  456. *
  457. * The locks will also ensure the necessary memory
  458. * barriers are done on SMP systems.
  459. */
  460. mali_l2_cache_counter_lock(cache);
  461. mali_l2_cache_counter_unlock(cache);
  462. mali_l2_cache_command_lock(cache);
  463. mali_l2_cache_command_unlock(cache);
  464. }
  465. }
  466. /* Resume from pause: do the cache invalidation here to prevent any
  467. * loss of cache operation during the pause period to make sure the SW
  468. * status is consistent with L2 cache status.
  469. */
  470. if(!pause) {
  471. mali_l2_cache_invalidate_all();
  472. mali_l2_cache_reset_counters_all();
  473. }
  474. }