mali_group.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855
  1. /*
  2. * This confidential and proprietary software may be used only as
  3. * authorised by a licensing agreement from ARM Limited
  4. * (C) COPYRIGHT 2011-2013 ARM Limited
  5. * ALL RIGHTS RESERVED
  6. * The entire notice above must be reproduced on all authorised
  7. * copies and copies may only be made to the extent permitted
  8. * by a licensing agreement from ARM Limited.
  9. */
  10. #include "mali_kernel_common.h"
  11. #include "mali_group.h"
  12. #include "mali_osk.h"
  13. #include "mali_l2_cache.h"
  14. #include "mali_gp.h"
  15. #include "mali_pp.h"
  16. #include "mali_mmu.h"
  17. #include "mali_dlbu.h"
  18. #include "mali_broadcast.h"
  19. #include "mali_scheduler.h"
  20. #include "mali_osk_profiling.h"
  21. #include "mali_pm_domain.h"
  22. #include "mali_pm.h"
  23. #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
  24. #include <linux/sched.h>
  25. #include <trace/events/gpu.h>
  26. #endif
  27. static void mali_group_bottom_half_mmu(void *data);
  28. static void mali_group_bottom_half_gp(void *data);
  29. static void mali_group_bottom_half_pp(void *data);
  30. static void mali_group_timeout(void *data);
  31. static void mali_group_reset_pp(struct mali_group *group);
  32. static void mali_group_reset_mmu(struct mali_group *group);
  33. #if defined(CONFIG_MALI400_PROFILING)
  34. static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num);
  35. #endif /* #if defined(CONFIG_MALI400_PROFILING) */
  36. /*
  37. * The group object is the most important object in the device driver,
  38. * and acts as the center of many HW operations.
  39. * The reason for this is that operations on the MMU will affect all
  40. * cores connected to this MMU (a group is defined by the MMU and the
  41. * cores which are connected to this).
  42. * The group lock is thus the most important lock, followed by the
  43. * GP and PP scheduler locks. They must be taken in the following
  44. * order:
  45. * GP/PP lock first, then group lock(s).
  46. */
  47. static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS] = { NULL, };
  48. static u32 mali_global_num_groups = 0;
  49. /* timer related */
  50. int mali_max_job_runtime = MALI_MAX_JOB_RUNTIME_DEFAULT;
  51. /* local helper functions */
  52. static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
  53. static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session);
  54. static void mali_group_recovery_reset(struct mali_group *group);
  55. static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group);
  56. static void mali_group_post_process_job_pp(struct mali_group *group);
  57. static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend);
  58. void mali_group_lock(struct mali_group *group)
  59. {
  60. #ifdef MALI_UPPER_HALF_SCHEDULING
  61. _mali_osk_spinlock_irq_lock(group->lock);
  62. #else
  63. _mali_osk_spinlock_lock(group->lock);
  64. #endif
  65. MALI_DEBUG_PRINT(5, ("Mali group: Group lock taken 0x%08X\n", group));
  66. }
  67. void mali_group_unlock(struct mali_group *group)
  68. {
  69. MALI_DEBUG_PRINT(5, ("Mali group: Releasing group lock 0x%08X\n", group));
  70. #ifdef MALI_UPPER_HALF_SCHEDULING
  71. _mali_osk_spinlock_irq_unlock(group->lock);
  72. #else
  73. _mali_osk_spinlock_unlock(group->lock);
  74. #endif
  75. }
  76. #ifdef DEBUG
  77. void mali_group_assert_locked(struct mali_group *group)
  78. {
  79. MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
  80. }
  81. #endif
  82. struct mali_group *mali_group_create(struct mali_l2_cache_core *core, struct mali_dlbu_core *dlbu, struct mali_bcast_unit *bcast)
  83. {
  84. struct mali_group *group = NULL;
  85. if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) {
  86. MALI_PRINT_ERROR(("Mali group: Too many group objects created\n"));
  87. return NULL;
  88. }
  89. group = _mali_osk_calloc(1, sizeof(struct mali_group));
  90. if (NULL != group) {
  91. group->timeout_timer = _mali_osk_timer_init();
  92. if (NULL != group->timeout_timer) {
  93. _mali_osk_lock_order_t order;
  94. _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
  95. if (NULL != dlbu) {
  96. order = _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL;
  97. } else {
  98. order = _MALI_OSK_LOCK_ORDER_GROUP;
  99. }
  100. #ifdef MALI_UPPER_HALF_SCHEDULING
  101. group->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
  102. #else
  103. group->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
  104. #endif
  105. if (NULL != group->lock) {
  106. group->l2_cache_core[0] = core;
  107. group->session = NULL;
  108. group->power_is_on = MALI_TRUE;
  109. group->state = MALI_GROUP_STATE_IDLE;
  110. _mali_osk_list_init(&group->group_list);
  111. _mali_osk_list_init(&group->pp_scheduler_list);
  112. group->parent_group = NULL;
  113. group->l2_cache_core_ref_count[0] = 0;
  114. group->l2_cache_core_ref_count[1] = 0;
  115. group->bcast_core = bcast;
  116. group->dlbu_core = dlbu;
  117. mali_global_groups[mali_global_num_groups] = group;
  118. mali_global_num_groups++;
  119. return group;
  120. }
  121. _mali_osk_timer_term(group->timeout_timer);
  122. }
  123. _mali_osk_free(group);
  124. }
  125. return NULL;
  126. }
  127. _mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core* mmu_core)
  128. {
  129. /* This group object now owns the MMU core object */
  130. group->mmu= mmu_core;
  131. group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group);
  132. if (NULL == group->bottom_half_work_mmu) {
  133. return _MALI_OSK_ERR_FAULT;
  134. }
  135. return _MALI_OSK_ERR_OK;
  136. }
  137. void mali_group_remove_mmu_core(struct mali_group *group)
  138. {
  139. /* This group object no longer owns the MMU core object */
  140. group->mmu = NULL;
  141. if (NULL != group->bottom_half_work_mmu) {
  142. _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
  143. }
  144. }
  145. _mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core* gp_core)
  146. {
  147. /* This group object now owns the GP core object */
  148. group->gp_core = gp_core;
  149. group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group);
  150. if (NULL == group->bottom_half_work_gp) {
  151. return _MALI_OSK_ERR_FAULT;
  152. }
  153. return _MALI_OSK_ERR_OK;
  154. }
  155. void mali_group_remove_gp_core(struct mali_group *group)
  156. {
  157. /* This group object no longer owns the GP core object */
  158. group->gp_core = NULL;
  159. if (NULL != group->bottom_half_work_gp) {
  160. _mali_osk_wq_delete_work(group->bottom_half_work_gp);
  161. }
  162. }
  163. _mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core* pp_core)
  164. {
  165. /* This group object now owns the PP core object */
  166. group->pp_core = pp_core;
  167. group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group);
  168. if (NULL == group->bottom_half_work_pp) {
  169. return _MALI_OSK_ERR_FAULT;
  170. }
  171. return _MALI_OSK_ERR_OK;
  172. }
  173. void mali_group_remove_pp_core(struct mali_group *group)
  174. {
  175. /* This group object no longer owns the PP core object */
  176. group->pp_core = NULL;
  177. if (NULL != group->bottom_half_work_pp) {
  178. _mali_osk_wq_delete_work(group->bottom_half_work_pp);
  179. }
  180. }
  181. void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain)
  182. {
  183. group->pm_domain = domain;
  184. }
  185. void mali_group_delete(struct mali_group *group)
  186. {
  187. u32 i;
  188. MALI_DEBUG_PRINT(4, ("Deleting group %p\n", group));
  189. MALI_DEBUG_ASSERT(NULL == group->parent_group);
  190. /* Delete the resources that this group owns */
  191. if (NULL != group->gp_core) {
  192. mali_gp_delete(group->gp_core);
  193. }
  194. if (NULL != group->pp_core) {
  195. mali_pp_delete(group->pp_core);
  196. }
  197. if (NULL != group->mmu) {
  198. mali_mmu_delete(group->mmu);
  199. }
  200. if (mali_group_is_virtual(group)) {
  201. /* Remove all groups from virtual group */
  202. struct mali_group *child;
  203. struct mali_group *temp;
  204. _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
  205. child->parent_group = NULL;
  206. mali_group_delete(child);
  207. }
  208. mali_dlbu_delete(group->dlbu_core);
  209. if (NULL != group->bcast_core) {
  210. mali_bcast_unit_delete(group->bcast_core);
  211. }
  212. }
  213. for (i = 0; i < mali_global_num_groups; i++) {
  214. if (mali_global_groups[i] == group) {
  215. mali_global_groups[i] = NULL;
  216. mali_global_num_groups--;
  217. if (i != mali_global_num_groups) {
  218. /* We removed a group from the middle of the array -- move the last
  219. * group to the current position to close the gap */
  220. mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
  221. mali_global_groups[mali_global_num_groups] = NULL;
  222. }
  223. break;
  224. }
  225. }
  226. if (NULL != group->timeout_timer) {
  227. _mali_osk_timer_del(group->timeout_timer);
  228. _mali_osk_timer_term(group->timeout_timer);
  229. }
  230. if (NULL != group->bottom_half_work_mmu) {
  231. _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
  232. }
  233. if (NULL != group->bottom_half_work_gp) {
  234. _mali_osk_wq_delete_work(group->bottom_half_work_gp);
  235. }
  236. if (NULL != group->bottom_half_work_pp) {
  237. _mali_osk_wq_delete_work(group->bottom_half_work_pp);
  238. }
  239. #ifdef MALI_UPPER_HALF_SCHEDULING
  240. _mali_osk_spinlock_irq_term(group->lock);
  241. #else
  242. _mali_osk_spinlock_term(group->lock);
  243. #endif
  244. _mali_osk_free(group);
  245. }
  246. MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
  247. {
  248. u32 i;
  249. struct mali_group *group;
  250. struct mali_group *temp;
  251. MALI_DEBUG_PRINT(4, ("Virtual group %p\n", vgroup));
  252. MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
  253. MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
  254. i = 0;
  255. _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) {
  256. MALI_DEBUG_PRINT(4, ("[%d] %p, l2_cache_core[0] = %p\n", i, group, group->l2_cache_core[0]));
  257. i++;
  258. }
  259. })
  260. /**
  261. * @brief Add child group to virtual group parent
  262. *
  263. * Before calling this function, child must have it's state set to JOINING_VIRTUAL
  264. * to ensure it's not touched during the transition period. When this function returns,
  265. * child's state will be IN_VIRTUAL.
  266. */
  267. void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw)
  268. {
  269. mali_bool found;
  270. u32 i;
  271. struct mali_session_data *child_session;
  272. MALI_DEBUG_PRINT(3, ("Adding group %p to virtual group %p\n", child, parent));
  273. MALI_ASSERT_GROUP_LOCKED(parent);
  274. MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
  275. MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
  276. MALI_DEBUG_ASSERT(NULL == child->parent_group);
  277. MALI_DEBUG_ASSERT(MALI_GROUP_STATE_JOINING_VIRTUAL == child->state);
  278. _mali_osk_list_addtail(&child->group_list, &parent->group_list);
  279. child->state = MALI_GROUP_STATE_IN_VIRTUAL;
  280. child->parent_group = parent;
  281. MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
  282. MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1]));
  283. MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1]));
  284. /* Keep track of the L2 cache cores of child groups */
  285. found = MALI_FALSE;
  286. for (i = 0; i < 2; i++) {
  287. if (parent->l2_cache_core[i] == child->l2_cache_core[0]) {
  288. MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0);
  289. parent->l2_cache_core_ref_count[i]++;
  290. found = MALI_TRUE;
  291. }
  292. }
  293. if (!found) {
  294. /* First time we see this L2 cache, add it to our list */
  295. i = (NULL == parent->l2_cache_core[0]) ? 0 : 1;
  296. MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i]));
  297. MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]);
  298. parent->l2_cache_core[i] = child->l2_cache_core[0];
  299. parent->l2_cache_core_ref_count[i]++;
  300. }
  301. /* Update Broadcast Unit and DLBU */
  302. mali_bcast_add_group(parent->bcast_core, child);
  303. mali_dlbu_add_group(parent->dlbu_core, child);
  304. child_session = child->session;
  305. child->session = NULL;
  306. /* Above this comment, only software state is updated and the HW is not
  307. * touched. Now, check if Mali is powered and skip the rest if it isn't
  308. * powered.
  309. */
  310. if (!update_hw) {
  311. MALI_DEBUG_CODE(mali_group_print_virtual(parent));
  312. return;
  313. }
  314. /* Update MMU */
  315. if (parent->session == child_session) {
  316. mali_mmu_zap_tlb(child->mmu);
  317. } else {
  318. if (NULL == parent->session) {
  319. mali_mmu_activate_empty_page_directory(child->mmu);
  320. } else {
  321. mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
  322. }
  323. }
  324. /* Update HW only if power is on */
  325. mali_bcast_reset(parent->bcast_core);
  326. mali_dlbu_update_mask(parent->dlbu_core);
  327. /* Start job on child when parent is active */
  328. if (NULL != parent->pp_running_job) {
  329. struct mali_pp_job *job = parent->pp_running_job;
  330. MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
  331. child, mali_pp_job_get_id(job), parent));
  332. MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING == parent->state);
  333. mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE);
  334. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
  335. MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
  336. MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
  337. mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
  338. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
  339. MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
  340. MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
  341. mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
  342. }
  343. MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
  344. }
  345. /**
  346. * @brief Remove child group from virtual group parent
  347. *
  348. * After the child is removed, it's state will be LEAVING_VIRTUAL and must be set
  349. * to IDLE before it can be used.
  350. */
  351. void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
  352. {
  353. u32 i;
  354. MALI_ASSERT_GROUP_LOCKED(parent);
  355. MALI_DEBUG_PRINT(3, ("Removing group %p from virtual group %p\n", child, parent));
  356. MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
  357. MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
  358. MALI_DEBUG_ASSERT(parent == child->parent_group);
  359. MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IN_VIRTUAL == child->state);
  360. /* Removing groups while running is not yet supported. */
  361. MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == parent->state);
  362. mali_group_lock(child);
  363. /* Update Broadcast Unit and DLBU */
  364. mali_bcast_remove_group(parent->bcast_core, child);
  365. mali_dlbu_remove_group(parent->dlbu_core, child);
  366. /* Update HW only if power is on */
  367. if (mali_pm_is_power_on()) {
  368. mali_bcast_reset(parent->bcast_core);
  369. mali_dlbu_update_mask(parent->dlbu_core);
  370. }
  371. _mali_osk_list_delinit(&child->group_list);
  372. child->session = parent->session;
  373. child->parent_group = NULL;
  374. child->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
  375. /* Keep track of the L2 cache cores of child groups */
  376. i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
  377. MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
  378. parent->l2_cache_core_ref_count[i]--;
  379. if (parent->l2_cache_core_ref_count[i] == 0) {
  380. parent->l2_cache_core[i] = NULL;
  381. }
  382. MALI_DEBUG_CODE(mali_group_print_virtual(parent));
  383. mali_group_unlock(child);
  384. }
  385. struct mali_group *mali_group_acquire_group(struct mali_group *parent)
  386. {
  387. struct mali_group *child;
  388. MALI_ASSERT_GROUP_LOCKED(parent);
  389. MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
  390. MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&parent->group_list));
  391. child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
  392. mali_group_remove_group(parent, child);
  393. return child;
  394. }
  395. void mali_group_reset(struct mali_group *group)
  396. {
  397. /*
  398. * This function should not be used to abort jobs,
  399. * currently only called during insmod and PM resume
  400. */
  401. MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
  402. MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
  403. MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
  404. group->session = NULL;
  405. if (NULL != group->dlbu_core) {
  406. mali_dlbu_reset(group->dlbu_core);
  407. }
  408. if (NULL != group->bcast_core) {
  409. mali_bcast_reset(group->bcast_core);
  410. }
  411. if (NULL != group->mmu) {
  412. mali_group_reset_mmu(group);
  413. }
  414. if (NULL != group->gp_core) {
  415. mali_gp_reset(group->gp_core);
  416. }
  417. if (NULL != group->pp_core) {
  418. mali_group_reset_pp(group);
  419. }
  420. }
  421. struct mali_gp_core* mali_group_get_gp_core(struct mali_group *group)
  422. {
  423. return group->gp_core;
  424. }
  425. struct mali_pp_core* mali_group_get_pp_core(struct mali_group *group)
  426. {
  427. return group->pp_core;
  428. }
  429. void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job)
  430. {
  431. struct mali_session_data *session;
  432. MALI_ASSERT_GROUP_LOCKED(group);
  433. MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
  434. session = mali_gp_job_get_session(job);
  435. if (NULL != group->l2_cache_core[0]) {
  436. mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
  437. }
  438. mali_group_activate_page_directory(group, session);
  439. mali_gp_job_start(group->gp_core, job);
  440. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
  441. MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) |
  442. MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
  443. mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
  444. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
  445. MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
  446. mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
  447. #if defined(CONFIG_MALI400_PROFILING)
  448. if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
  449. (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
  450. mali_group_report_l2_cache_counters_per_core(group, 0);
  451. #endif /* #if defined(CONFIG_MALI400_PROFILING) */
  452. #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
  453. trace_gpu_sched_switch(mali_gp_get_hw_core_desc(group->gp_core), sched_clock(),
  454. mali_gp_job_get_pid(job), 0, mali_gp_job_get_id(job));
  455. #endif
  456. group->gp_running_job = job;
  457. group->state = MALI_GROUP_STATE_WORKING;
  458. /* Setup the timeout timer value and save the job id for the job running on the gp core */
  459. _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
  460. }
  461. void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
  462. {
  463. struct mali_session_data *session;
  464. MALI_ASSERT_GROUP_LOCKED(group);
  465. MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
  466. session = mali_pp_job_get_session(job);
  467. if (NULL != group->l2_cache_core[0]) {
  468. mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_cache_order(job));
  469. }
  470. if (NULL != group->l2_cache_core[1]) {
  471. mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job));
  472. }
  473. mali_group_activate_page_directory(group, session);
  474. if (mali_group_is_virtual(group)) {
  475. struct mali_group *child;
  476. struct mali_group *temp;
  477. u32 core_num = 0;
  478. MALI_DEBUG_ASSERT( mali_pp_job_is_virtual(job));
  479. /* Configure DLBU for the job */
  480. mali_dlbu_config_job(group->dlbu_core, job);
  481. /* Write stack address for each child group */
  482. _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
  483. mali_pp_write_addr_stack(child->pp_core, job);
  484. core_num++;
  485. }
  486. /* Try to use DMA unit to start job, fallback to writing directly to the core */
  487. MALI_DEBUG_ASSERT(mali_dma_cmd_buf_is_valid(&job->dma_cmd_buf));
  488. if (_MALI_OSK_ERR_OK != mali_dma_start(mali_dma_get_global_dma_core(), &job->dma_cmd_buf)) {
  489. mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
  490. }
  491. } else {
  492. mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
  493. }
  494. /* if the group is virtual, loop through physical groups which belong to this group
  495. * and call profiling events for its cores as virtual */
  496. if (MALI_TRUE == mali_group_is_virtual(group)) {
  497. struct mali_group *child;
  498. struct mali_group *temp;
  499. _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
  500. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
  501. MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
  502. MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
  503. mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
  504. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
  505. MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
  506. MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
  507. mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
  508. }
  509. #if defined(CONFIG_MALI400_PROFILING)
  510. if (0 != group->l2_cache_core_ref_count[0]) {
  511. if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
  512. (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
  513. mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
  514. }
  515. }
  516. if (0 != group->l2_cache_core_ref_count[1]) {
  517. if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
  518. (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
  519. mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
  520. }
  521. }
  522. #endif /* #if defined(CONFIG_MALI400_PROFILING) */
  523. } else { /* group is physical - call profiling events for physical cores */
  524. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
  525. MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
  526. MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
  527. mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
  528. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
  529. MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
  530. MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
  531. mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
  532. #if defined(CONFIG_MALI400_PROFILING)
  533. if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
  534. (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
  535. mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
  536. }
  537. #endif /* #if defined(CONFIG_MALI400_PROFILING) */
  538. }
  539. #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
  540. trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), mali_pp_job_get_tid(job), 0, mali_pp_job_get_id(job));
  541. #endif
  542. group->pp_running_job = job;
  543. group->pp_running_sub_job = sub_job;
  544. group->state = MALI_GROUP_STATE_WORKING;
  545. /* Setup the timeout timer value and save the job id for the job running on the pp core */
  546. _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
  547. }
  548. struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
  549. {
  550. MALI_ASSERT_GROUP_LOCKED(group);
  551. if (group->state != MALI_GROUP_STATE_OOM ||
  552. mali_gp_job_get_id(group->gp_running_job) != job_id) {
  553. return NULL; /* Illegal request or job has already been aborted */
  554. }
  555. if (NULL != group->l2_cache_core[0]) {
  556. mali_l2_cache_invalidate(group->l2_cache_core[0]);
  557. }
  558. mali_mmu_zap_tlb_without_stall(group->mmu);
  559. mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
  560. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), 0, 0, 0, 0, 0);
  561. group->state = MALI_GROUP_STATE_WORKING;
  562. return group->gp_running_job;
  563. }
  564. static void mali_group_reset_mmu(struct mali_group *group)
  565. {
  566. struct mali_group *child;
  567. struct mali_group *temp;
  568. _mali_osk_errcode_t err;
  569. if (!mali_group_is_virtual(group)) {
  570. /* This is a physical group or an idle virtual group -- simply wait for
  571. * the reset to complete. */
  572. err = mali_mmu_reset(group->mmu);
  573. MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
  574. } else { /* virtual group */
  575. err = mali_mmu_reset(group->mmu);
  576. if (_MALI_OSK_ERR_OK == err) {
  577. return;
  578. }
  579. /* Loop through all members of this virtual group and wait
  580. * until they are done resetting.
  581. */
  582. _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
  583. err = mali_mmu_reset(child->mmu);
  584. MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
  585. }
  586. }
  587. }
  588. static void mali_group_reset_pp(struct mali_group *group)
  589. {
  590. struct mali_group *child;
  591. struct mali_group *temp;
  592. mali_pp_reset_async(group->pp_core);
  593. if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) {
  594. /* This is a physical group or an idle virtual group -- simply wait for
  595. * the reset to complete. */
  596. mali_pp_reset_wait(group->pp_core);
  597. } else { /* virtual group */
  598. /* Loop through all members of this virtual group and wait until they
  599. * are done resetting.
  600. */
  601. _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
  602. mali_pp_reset_wait(child->pp_core);
  603. }
  604. }
  605. }
  606. /* Group must be locked when entering this function. Will be unlocked before exiting. */
  607. static void mali_group_complete_pp_and_unlock(struct mali_group *group, mali_bool success, mali_bool in_upper_half)
  608. {
  609. struct mali_pp_job *pp_job_to_return;
  610. u32 pp_sub_job_to_return;
  611. MALI_DEBUG_ASSERT_POINTER(group);
  612. MALI_DEBUG_ASSERT_POINTER(group->pp_core);
  613. MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
  614. MALI_ASSERT_GROUP_LOCKED(group);
  615. mali_group_post_process_job_pp(group);
  616. if (success) {
  617. /* Only do soft reset for successful jobs, a full recovery
  618. * reset will be done for failed jobs. */
  619. mali_pp_reset_async(group->pp_core);
  620. }
  621. pp_job_to_return = group->pp_running_job;
  622. pp_sub_job_to_return = group->pp_running_sub_job;
  623. group->state = MALI_GROUP_STATE_IDLE;
  624. group->pp_running_job = NULL;
  625. if (!success) {
  626. MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
  627. mali_group_recovery_reset(group);
  628. } else if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core)) {
  629. MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
  630. mali_group_recovery_reset(group);
  631. }
  632. /* Return job to user, schedule and unlock group. */
  633. mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, success, in_upper_half);
  634. }
  635. /* Group must be locked when entering this function. Will be unlocked before exiting. */
  636. static void mali_group_complete_gp_and_unlock(struct mali_group *group, mali_bool success)
  637. {
  638. struct mali_gp_job *gp_job_to_return;
  639. MALI_DEBUG_ASSERT_POINTER(group);
  640. MALI_DEBUG_ASSERT_POINTER(group->gp_core);
  641. MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
  642. MALI_ASSERT_GROUP_LOCKED(group);
  643. mali_group_post_process_job_gp(group, MALI_FALSE);
  644. if (success) {
  645. /* Only do soft reset for successful jobs, a full recovery
  646. * reset will be done for failed jobs. */
  647. mali_gp_reset_async(group->gp_core);
  648. }
  649. gp_job_to_return = group->gp_running_job;
  650. group->state = MALI_GROUP_STATE_IDLE;
  651. group->gp_running_job = NULL;
  652. if (!success) {
  653. MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
  654. mali_group_recovery_reset(group);
  655. } else if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core)) {
  656. MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
  657. mali_group_recovery_reset(group);
  658. }
  659. /* Return job to user, schedule and unlock group. */
  660. mali_gp_scheduler_job_done(group, gp_job_to_return, success);
  661. }
  662. void mali_group_abort_gp_job(struct mali_group *group, u32 job_id)
  663. {
  664. MALI_ASSERT_GROUP_LOCKED(group);
  665. if (MALI_GROUP_STATE_IDLE == group->state ||
  666. mali_gp_job_get_id(group->gp_running_job) != job_id) {
  667. return; /* No need to cancel or job has already been aborted or completed */
  668. }
  669. /* Function will unlock the group, so we need to lock it again */
  670. mali_group_complete_gp_and_unlock(group, MALI_FALSE);
  671. mali_group_lock(group);
  672. }
  673. static void mali_group_abort_pp_job(struct mali_group *group, u32 job_id)
  674. {
  675. MALI_ASSERT_GROUP_LOCKED(group);
  676. if (MALI_GROUP_STATE_IDLE == group->state ||
  677. mali_pp_job_get_id(group->pp_running_job) != job_id) {
  678. return; /* No need to cancel or job has already been aborted or completed */
  679. }
  680. mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
  681. mali_group_lock(group);
  682. }
  683. void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session)
  684. {
  685. struct mali_gp_job *gp_job;
  686. struct mali_pp_job *pp_job;
  687. u32 gp_job_id = 0;
  688. u32 pp_job_id = 0;
  689. mali_bool abort_pp = MALI_FALSE;
  690. mali_bool abort_gp = MALI_FALSE;
  691. mali_group_lock(group);
  692. if (mali_group_is_in_virtual(group)) {
  693. /* Group is member of a virtual group, don't touch it! */
  694. mali_group_unlock(group);
  695. return;
  696. }
  697. gp_job = group->gp_running_job;
  698. pp_job = group->pp_running_job;
  699. if ((NULL != gp_job) && (mali_gp_job_get_session(gp_job) == session)) {
  700. MALI_DEBUG_PRINT(4, ("Aborting GP job 0x%08x from session 0x%08x\n", gp_job, session));
  701. gp_job_id = mali_gp_job_get_id(gp_job);
  702. abort_gp = MALI_TRUE;
  703. }
  704. if ((NULL != pp_job) && (mali_pp_job_get_session(pp_job) == session)) {
  705. MALI_DEBUG_PRINT(4, ("Mali group: Aborting PP job 0x%08x from session 0x%08x\n", pp_job, session));
  706. pp_job_id = mali_pp_job_get_id(pp_job);
  707. abort_pp = MALI_TRUE;
  708. }
  709. if (abort_gp) {
  710. mali_group_abort_gp_job(group, gp_job_id);
  711. }
  712. if (abort_pp) {
  713. mali_group_abort_pp_job(group, pp_job_id);
  714. }
  715. mali_group_remove_session_if_unused(group, session);
  716. mali_group_unlock(group);
  717. }
  718. struct mali_group *mali_group_get_glob_group(u32 index)
  719. {
  720. if(mali_global_num_groups > index) {
  721. return mali_global_groups[index];
  722. }
  723. return NULL;
  724. }
  725. u32 mali_group_get_glob_num_groups(void)
  726. {
  727. return mali_global_num_groups;
  728. }
  729. static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session)
  730. {
  731. MALI_ASSERT_GROUP_LOCKED(group);
  732. MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group 0x%08X\n", mali_session_get_page_directory(session), session, group));
  733. if (group->session != session) {
  734. /* Different session than last time, so we need to do some work */
  735. MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group 0x%08X\n", session, group->session, group));
  736. mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
  737. group->session = session;
  738. } else {
  739. /* Same session as last time, so no work required */
  740. MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group 0x%08X\n", session->page_directory, group));
  741. mali_mmu_zap_tlb_without_stall(group->mmu);
  742. }
  743. }
  744. static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session)
  745. {
  746. MALI_ASSERT_GROUP_LOCKED(group);
  747. if (MALI_GROUP_STATE_IDLE == group->state) {
  748. if (group->session == session) {
  749. MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING != group->state);
  750. MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
  751. MALI_DEBUG_PRINT(3, ("Mali group: Deactivating unused session 0x%08X on group %08X\n", session, group));
  752. mali_mmu_activate_empty_page_directory(group->mmu);
  753. group->session = NULL;
  754. }
  755. }
  756. }
  757. mali_bool mali_group_power_is_on(struct mali_group *group)
  758. {
  759. MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
  760. return group->power_is_on;
  761. }
  762. void mali_group_power_on_group(struct mali_group *group)
  763. {
  764. MALI_DEBUG_ASSERT_POINTER(group);
  765. MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
  766. MALI_DEBUG_ASSERT( MALI_GROUP_STATE_IDLE == group->state
  767. || MALI_GROUP_STATE_IN_VIRTUAL == group->state
  768. || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
  769. || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
  770. || MALI_GROUP_STATE_DISABLED == group->state);
  771. MALI_DEBUG_PRINT(3, ("Group %p powered on\n", group));
  772. group->power_is_on = MALI_TRUE;
  773. }
  774. void mali_group_power_off_group(struct mali_group *group, mali_bool do_power_change)
  775. {
  776. MALI_DEBUG_ASSERT_POINTER(group);
  777. MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
  778. MALI_DEBUG_ASSERT( MALI_GROUP_STATE_IDLE == group->state
  779. || MALI_GROUP_STATE_IN_VIRTUAL == group->state
  780. || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
  781. || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
  782. || MALI_GROUP_STATE_DISABLED == group->state);
  783. MALI_DEBUG_PRINT(3, ("Group %p powered off\n", group));
  784. /* It is necessary to set group->session = NULL so that the powered off MMU is not written
  785. * to on map/unmap. It is also necessary to set group->power_is_on = MALI_FALSE so that
  786. * pending bottom_halves does not access powered off cores. */
  787. group->session = NULL;
  788. if (do_power_change) {
  789. group->power_is_on = MALI_FALSE;
  790. }
  791. }
  792. void mali_group_power_on(void)
  793. {
  794. int i;
  795. for (i = 0; i < mali_global_num_groups; i++) {
  796. struct mali_group *group = mali_global_groups[i];
  797. mali_group_lock(group);
  798. if (MALI_GROUP_STATE_DISABLED == group->state) {
  799. MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
  800. } else {
  801. mali_group_power_on_group(group);
  802. }
  803. mali_group_unlock(group);
  804. }
  805. MALI_DEBUG_PRINT(4, ("Mali Group: power on\n"));
  806. }
  807. void mali_group_power_off(mali_bool do_power_change)
  808. {
  809. int i;
  810. for (i = 0; i < mali_global_num_groups; i++) {
  811. struct mali_group *group = mali_global_groups[i];
  812. mali_group_lock(group);
  813. if (MALI_GROUP_STATE_DISABLED == group->state) {
  814. MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
  815. } else {
  816. mali_group_power_off_group(group, do_power_change);
  817. }
  818. mali_group_unlock(group);
  819. }
  820. MALI_DEBUG_PRINT(4, ("Mali Group: power off\n"));
  821. }
  822. static void mali_group_recovery_reset(struct mali_group *group)
  823. {
  824. _mali_osk_errcode_t err;
  825. MALI_ASSERT_GROUP_LOCKED(group);
  826. /* Stop cores, bus stop */
  827. if (NULL != group->pp_core) {
  828. mali_pp_stop_bus(group->pp_core);
  829. } else {
  830. mali_gp_stop_bus(group->gp_core);
  831. }
  832. /* Flush MMU and clear page fault (if any) */
  833. mali_mmu_activate_fault_flush_page_directory(group->mmu);
  834. mali_mmu_page_fault_done(group->mmu);
  835. /* Wait for cores to stop bus, then do a hard reset on them */
  836. if (NULL != group->pp_core) {
  837. if (mali_group_is_virtual(group)) {
  838. struct mali_group *child, *temp;
  839. /* Disable the broadcast unit while we do reset directly on the member cores. */
  840. mali_bcast_disable(group->bcast_core);
  841. _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
  842. mali_pp_stop_bus_wait(child->pp_core);
  843. mali_pp_hard_reset(child->pp_core);
  844. }
  845. mali_bcast_enable(group->bcast_core);
  846. } else {
  847. mali_pp_stop_bus_wait(group->pp_core);
  848. mali_pp_hard_reset(group->pp_core);
  849. }
  850. } else {
  851. mali_gp_stop_bus_wait(group->gp_core);
  852. mali_gp_hard_reset(group->gp_core);
  853. }
  854. /* Reset MMU */
  855. err = mali_mmu_reset(group->mmu);
  856. MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
  857. MALI_IGNORE(err);
  858. group->session = NULL;
  859. }
  860. #if MALI_STATE_TRACKING
  861. u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
  862. {
  863. int n = 0;
  864. n += _mali_osk_snprintf(buf + n, size - n, "Group: %p\n", group);
  865. n += _mali_osk_snprintf(buf + n, size - n, "\tstate: %d\n", group->state);
  866. if (group->gp_core) {
  867. n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
  868. n += _mali_osk_snprintf(buf + n, size - n, "\tGP job: %p\n", group->gp_running_job);
  869. }
  870. if (group->pp_core) {
  871. n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
  872. n += _mali_osk_snprintf(buf + n, size - n, "\tPP job: %p, subjob %d \n",
  873. group->pp_running_job, group->pp_running_sub_job);
  874. }
  875. return n;
  876. }
  877. #endif
  878. /* Group must be locked when entering this function. Will be unlocked before exiting. */
  879. static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group)
  880. {
  881. MALI_DEBUG_ASSERT_POINTER(group);
  882. MALI_ASSERT_GROUP_LOCKED(group);
  883. if (NULL != group->pp_core) {
  884. struct mali_pp_job *pp_job_to_return;
  885. u32 pp_sub_job_to_return;
  886. MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
  887. mali_group_post_process_job_pp(group);
  888. pp_job_to_return = group->pp_running_job;
  889. pp_sub_job_to_return = group->pp_running_sub_job;
  890. group->state = MALI_GROUP_STATE_IDLE;
  891. group->pp_running_job = NULL;
  892. mali_group_recovery_reset(group); /* This will also clear the page fault itself */
  893. /* Will unlock group. */
  894. mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, MALI_FALSE, MALI_FALSE);
  895. } else {
  896. struct mali_gp_job *gp_job_to_return;
  897. MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
  898. mali_group_post_process_job_gp(group, MALI_FALSE);
  899. gp_job_to_return = group->gp_running_job;
  900. group->state = MALI_GROUP_STATE_IDLE;
  901. group->gp_running_job = NULL;
  902. mali_group_recovery_reset(group); /* This will also clear the page fault itself */
  903. /* Will unlock group. */
  904. mali_gp_scheduler_job_done(group, gp_job_to_return, MALI_FALSE);
  905. }
  906. }
  907. _mali_osk_errcode_t mali_group_upper_half_mmu(void * data)
  908. {
  909. _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
  910. struct mali_group *group = (struct mali_group *)data;
  911. struct mali_mmu_core *mmu = group->mmu;
  912. u32 int_stat;
  913. MALI_DEBUG_ASSERT_POINTER(mmu);
  914. #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
  915. if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
  916. goto out;
  917. }
  918. #endif
  919. /* Check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
  920. int_stat = mali_mmu_get_int_status(mmu);
  921. if (0 != int_stat) {
  922. struct mali_group *parent = group->parent_group;
  923. /* page fault or bus error, we thread them both in the same way */
  924. mali_mmu_mask_all_interrupts(mmu);
  925. if (NULL == parent) {
  926. _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
  927. } else {
  928. _mali_osk_wq_schedule_work(parent->bottom_half_work_mmu);
  929. }
  930. err = _MALI_OSK_ERR_OK;
  931. goto out;
  932. }
  933. out:
  934. #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
  935. mali_pm_domain_unlock_state(group->pm_domain);
  936. #endif
  937. return err;
  938. }
  939. static void mali_group_bottom_half_mmu(void * data)
  940. {
  941. struct mali_group *group = (struct mali_group *)data;
  942. struct mali_mmu_core *mmu = group->mmu;
  943. u32 rawstat;
  944. MALI_DEBUG_CODE(u32 status);
  945. MALI_DEBUG_ASSERT_POINTER(mmu);
  946. mali_group_lock(group);
  947. MALI_DEBUG_ASSERT(NULL == group->parent_group);
  948. if ( MALI_FALSE == mali_group_power_is_on(group) ) {
  949. MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mmu->hw_core.description));
  950. mali_group_unlock(group);
  951. return;
  952. }
  953. rawstat = mali_mmu_get_rawstat(mmu);
  954. MALI_DEBUG_CODE(status = mali_mmu_get_status(mmu));
  955. MALI_DEBUG_PRINT(4, ("Mali MMU: Bottom half, interrupt 0x%08X, status 0x%08X\n", rawstat, status));
  956. if (rawstat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
  957. /* An actual page fault has occurred. */
  958. #ifdef DEBUG
  959. u32 fault_address = mali_mmu_get_page_fault_addr(mmu);
  960. MALI_DEBUG_PRINT(2,("Mali MMU: Page fault detected at 0x%x from bus id %d of type %s on %s\n",
  961. (void*)fault_address,
  962. (status >> 6) & 0x1F,
  963. (status & 32) ? "write" : "read",
  964. mmu->hw_core.description));
  965. #endif
  966. mali_group_mmu_page_fault_and_unlock(group);
  967. return;
  968. }
  969. mali_group_unlock(group);
  970. }
  971. _mali_osk_errcode_t mali_group_upper_half_gp(void *data)
  972. {
  973. _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
  974. struct mali_group *group = (struct mali_group *)data;
  975. struct mali_gp_core *core = group->gp_core;
  976. u32 irq_readout;
  977. #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
  978. if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
  979. goto out;
  980. }
  981. #endif
  982. irq_readout = mali_gp_get_int_stat(core);
  983. if (MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout) {
  984. /* Mask out all IRQs from this core until IRQ is handled */
  985. mali_gp_mask_all_interrupts(core);
  986. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0)|MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
  987. /* We do need to handle this in a bottom half */
  988. _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
  989. err = _MALI_OSK_ERR_OK;
  990. goto out;
  991. }
  992. out:
  993. #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
  994. mali_pm_domain_unlock_state(group->pm_domain);
  995. #endif
  996. return err;
  997. }
  998. static void mali_group_bottom_half_gp(void *data)
  999. {
  1000. struct mali_group *group = (struct mali_group *)data;
  1001. u32 irq_readout;
  1002. u32 irq_errors;
  1003. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), 0, 0);
  1004. mali_group_lock(group);
  1005. if ( MALI_FALSE == mali_group_power_is_on(group) ) {
  1006. MALI_PRINT_ERROR(("Mali group: Interrupt bottom half of %s when core is OFF.", mali_gp_get_hw_core_desc(group->gp_core)));
  1007. mali_group_unlock(group);
  1008. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
  1009. return;
  1010. }
  1011. irq_readout = mali_gp_read_rawstat(group->gp_core);
  1012. MALI_DEBUG_PRINT(4, ("Mali group: GP bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
  1013. if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST)) {
  1014. u32 core_status = mali_gp_read_core_status(group->gp_core);
  1015. if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE)) {
  1016. MALI_DEBUG_PRINT(4, ("Mali group: GP job completed, calling group handler\n"));
  1017. group->core_timed_out = MALI_FALSE;
  1018. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
  1019. MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
  1020. MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
  1021. 0, _mali_osk_get_tid(), 0, 0, 0);
  1022. mali_group_complete_gp_and_unlock(group, MALI_TRUE);
  1023. return;
  1024. }
  1025. }
  1026. /*
  1027. * Now lets look at the possible error cases (IRQ indicating error or timeout)
  1028. * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error.
  1029. */
  1030. irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM);
  1031. if (0 != irq_errors) {
  1032. MALI_PRINT_ERROR(("Mali group: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
  1033. group->core_timed_out = MALI_FALSE;
  1034. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
  1035. MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
  1036. MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
  1037. 0, _mali_osk_get_tid(), 0, 0, 0);
  1038. mali_group_complete_gp_and_unlock(group, MALI_FALSE);
  1039. return;
  1040. } else if (group->core_timed_out) { /* SW timeout */
  1041. group->core_timed_out = MALI_FALSE;
  1042. if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->gp_running_job) {
  1043. MALI_PRINT(("Mali group: Job %d timed out\n", mali_gp_job_get_id(group->gp_running_job)));
  1044. mali_group_complete_gp_and_unlock(group, MALI_FALSE);
  1045. return;
  1046. }
  1047. } else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) {
  1048. /* GP wants more memory in order to continue. */
  1049. MALI_DEBUG_PRINT(3, ("Mali group: PLBU needs more heap memory\n"));
  1050. group->state = MALI_GROUP_STATE_OOM;
  1051. mali_group_unlock(group); /* Nothing to do on the HW side, so just release group lock right away */
  1052. mali_gp_scheduler_oom(group, group->gp_running_job);
  1053. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
  1054. return;
  1055. }
  1056. /*
  1057. * The only way to get here is if we only got one of two needed END_CMD_LST
  1058. * interrupts. Enable all but not the complete interrupt that has been
  1059. * received and continue to run.
  1060. */
  1061. mali_gp_enable_interrupts(group->gp_core, irq_readout & (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST));
  1062. mali_group_unlock(group);
  1063. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
  1064. }
  1065. static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend)
  1066. {
  1067. /* Stop the timeout timer. */
  1068. _mali_osk_timer_del_async(group->timeout_timer);
  1069. if (NULL == group->gp_running_job) {
  1070. /* Nothing to do */
  1071. return;
  1072. }
  1073. mali_gp_update_performance_counters(group->gp_core, group->gp_running_job, suspend);
  1074. #if defined(CONFIG_MALI400_PROFILING)
  1075. if (suspend) {
  1076. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
  1077. mali_gp_job_get_perf_counter_value0(group->gp_running_job),
  1078. mali_gp_job_get_perf_counter_value1(group->gp_running_job),
  1079. mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
  1080. 0, 0);
  1081. } else {
  1082. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
  1083. mali_gp_job_get_perf_counter_value0(group->gp_running_job),
  1084. mali_gp_job_get_perf_counter_value1(group->gp_running_job),
  1085. mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
  1086. 0, 0);
  1087. if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
  1088. (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
  1089. mali_group_report_l2_cache_counters_per_core(group, 0);
  1090. }
  1091. #endif
  1092. mali_gp_job_set_current_heap_addr(group->gp_running_job,
  1093. mali_gp_read_plbu_alloc_start_addr(group->gp_core));
  1094. }
  1095. _mali_osk_errcode_t mali_group_upper_half_pp(void *data)
  1096. {
  1097. _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
  1098. struct mali_group *group = (struct mali_group *)data;
  1099. struct mali_pp_core *core = group->pp_core;
  1100. u32 irq_readout;
  1101. #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
  1102. if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
  1103. goto out;
  1104. }
  1105. #endif
  1106. /*
  1107. * For Mali-450 there is one particular case we need to watch out for:
  1108. *
  1109. * Criteria 1) this function call can be due to a shared interrupt,
  1110. * and not necessary because this core signaled an interrupt.
  1111. * Criteria 2) this core is a part of a virtual group, and thus it should
  1112. * not do any post processing.
  1113. * Criteria 3) this core has actually indicated that is has completed by
  1114. * having set raw_stat/int_stat registers to != 0
  1115. *
  1116. * If all this criteria is meet, then we could incorrectly start post
  1117. * processing on the wrong group object (this should only happen on the
  1118. * parent group)
  1119. */
  1120. #if !defined(MALI_UPPER_HALF_SCHEDULING)
  1121. if (mali_group_is_in_virtual(group)) {
  1122. /*
  1123. * This check is done without the group lock held, which could lead to
  1124. * a potential race. This is however ok, since we will safely re-check
  1125. * this with the group lock held at a later stage. This is just an
  1126. * early out which will strongly benefit shared IRQ systems.
  1127. */
  1128. err = _MALI_OSK_ERR_OK;
  1129. goto out;
  1130. }
  1131. #endif
  1132. irq_readout = mali_pp_get_int_stat(core);
  1133. if (MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout) {
  1134. /* Mask out all IRQs from this core until IRQ is handled */
  1135. mali_pp_mask_all_interrupts(core);
  1136. #if defined(CONFIG_MALI400_PROFILING)
  1137. /* Currently no support for this interrupt event for the virtual PP core */
  1138. if (!mali_group_is_virtual(group)) {
  1139. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
  1140. MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id) |
  1141. MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT,
  1142. irq_readout, 0, 0, 0, 0);
  1143. }
  1144. #endif
  1145. #if defined(MALI_UPPER_HALF_SCHEDULING)
  1146. /* Check if job is complete without errors */
  1147. if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
  1148. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
  1149. MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
  1150. MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
  1151. 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
  1152. MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler from upper half\n"));
  1153. mali_group_lock(group);
  1154. /* Check if job is complete without errors, again, after taking the group lock */
  1155. irq_readout = mali_pp_read_rawstat(core);
  1156. if (MALI200_REG_VAL_IRQ_END_OF_FRAME != irq_readout) {
  1157. mali_pp_enable_interrupts(core);
  1158. mali_group_unlock(group);
  1159. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
  1160. MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
  1161. MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
  1162. 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
  1163. err = _MALI_OSK_ERR_OK;
  1164. goto out;
  1165. }
  1166. if (mali_group_is_virtual(group)) {
  1167. u32 status_readout = mali_pp_read_status(group->pp_core);
  1168. if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) {
  1169. MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
  1170. mali_pp_enable_interrupts(core);
  1171. mali_group_unlock(group);
  1172. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
  1173. MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
  1174. MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
  1175. 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
  1176. err = _MALI_OSK_ERR_OK;
  1177. goto out;
  1178. }
  1179. }
  1180. if (mali_group_is_in_virtual(group)) {
  1181. /* We're member of a virtual group, so interrupt should be handled by the virtual group */
  1182. mali_pp_enable_interrupts(core);
  1183. mali_group_unlock(group);
  1184. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
  1185. MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
  1186. MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
  1187. 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
  1188. err = _MALI_OSK_ERR_FAULT;
  1189. goto out;
  1190. }
  1191. group->core_timed_out = MALI_FALSE;
  1192. mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_TRUE);
  1193. /* No need to enable interrupts again, since the core will be reset while completing the job */
  1194. MALI_DEBUG_PRINT(6, ("Mali PP: Upper half job done\n"));
  1195. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
  1196. MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
  1197. MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
  1198. 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
  1199. err = _MALI_OSK_ERR_OK;
  1200. goto out;
  1201. }
  1202. #endif
  1203. /* We do need to handle this in a bottom half */
  1204. _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
  1205. err = _MALI_OSK_ERR_OK;
  1206. goto out;
  1207. }
  1208. out:
  1209. #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
  1210. mali_pm_domain_unlock_state(group->pm_domain);
  1211. #endif
  1212. return err;
  1213. }
  1214. static void mali_group_bottom_half_pp(void *data)
  1215. {
  1216. struct mali_group *group = (struct mali_group *)data;
  1217. struct mali_pp_core *core = group->pp_core;
  1218. u32 irq_readout;
  1219. u32 irq_errors;
  1220. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
  1221. MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
  1222. MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
  1223. 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
  1224. mali_group_lock(group);
  1225. if (mali_group_is_in_virtual(group)) {
  1226. /* We're member of a virtual group, so interrupt should be handled by the virtual group */
  1227. mali_pp_enable_interrupts(core);
  1228. mali_group_unlock(group);
  1229. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
  1230. MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
  1231. MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
  1232. 0, _mali_osk_get_tid(), 0, 0, 0);
  1233. return;
  1234. }
  1235. if ( MALI_FALSE == mali_group_power_is_on(group) ) {
  1236. MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mali_pp_get_hw_core_desc(core)));
  1237. mali_group_unlock(group);
  1238. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
  1239. MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
  1240. MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
  1241. 0, _mali_osk_get_tid(), 0, 0, 0);
  1242. return;
  1243. }
  1244. irq_readout = mali_pp_read_rawstat(group->pp_core);
  1245. MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
  1246. /* Check if job is complete without errors */
  1247. if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
  1248. if (mali_group_is_virtual(group)) {
  1249. u32 status_readout = mali_pp_read_status(group->pp_core);
  1250. if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE && !group->core_timed_out) {
  1251. MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
  1252. mali_pp_enable_interrupts(core);
  1253. mali_group_unlock(group);
  1254. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
  1255. MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
  1256. MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
  1257. 0, _mali_osk_get_tid(), 0, 0, 0);
  1258. return;
  1259. }
  1260. }
  1261. if (!group->core_timed_out) {
  1262. MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n"));
  1263. group->core_timed_out = MALI_FALSE;
  1264. mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_FALSE);
  1265. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
  1266. MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
  1267. MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
  1268. 0, _mali_osk_get_tid(), 0, 0, 0);
  1269. return;
  1270. }
  1271. }
  1272. /*
  1273. * Now lets look at the possible error cases (IRQ indicating error or timeout)
  1274. * END_OF_FRAME and HANG interrupts are not considered error.
  1275. */
  1276. irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME|MALI200_REG_VAL_IRQ_HANG);
  1277. if (0 != irq_errors) {
  1278. MALI_PRINT_ERROR(("Mali PP: Unexpected interrupt 0x%08X from core %s, aborting job\n",
  1279. irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
  1280. group->core_timed_out = MALI_FALSE;
  1281. mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
  1282. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
  1283. MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
  1284. MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
  1285. 0, _mali_osk_get_tid(), 0, 0, 0);
  1286. return;
  1287. } else if (group->core_timed_out) { /* SW timeout */
  1288. group->core_timed_out = MALI_FALSE;
  1289. if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->pp_running_job) {
  1290. MALI_PRINT(("Mali PP: Job %d timed out on core %s\n",
  1291. mali_pp_job_get_id(group->pp_running_job), mali_pp_get_hw_core_desc(core)));
  1292. mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
  1293. } else {
  1294. mali_group_unlock(group);
  1295. }
  1296. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
  1297. MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
  1298. MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
  1299. 0, _mali_osk_get_tid(), 0, 0, 0);
  1300. return;
  1301. }
  1302. /*
  1303. * We should never get here, re-enable interrupts and continue
  1304. */
  1305. if (0 == irq_readout) {
  1306. MALI_DEBUG_PRINT(3, ("Mali group: No interrupt found on core %s\n",
  1307. mali_pp_get_hw_core_desc(group->pp_core)));
  1308. } else {
  1309. MALI_PRINT_ERROR(("Mali group: Unhandled PP interrupt 0x%08X on %s\n", irq_readout,
  1310. mali_pp_get_hw_core_desc(group->pp_core)));
  1311. }
  1312. mali_pp_enable_interrupts(core);
  1313. mali_group_unlock(group);
  1314. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
  1315. MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
  1316. MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
  1317. 0, _mali_osk_get_tid(), 0, 0, 0);
  1318. }
  1319. static void mali_group_post_process_job_pp(struct mali_group *group)
  1320. {
  1321. MALI_ASSERT_GROUP_LOCKED(group);
  1322. /* Stop the timeout timer. */
  1323. _mali_osk_timer_del_async(group->timeout_timer);
  1324. if (NULL != group->pp_running_job) {
  1325. if (MALI_TRUE == mali_group_is_virtual(group)) {
  1326. struct mali_group *child;
  1327. struct mali_group *temp;
  1328. /* update performance counters from each physical pp core within this virtual group */
  1329. _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
  1330. mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
  1331. }
  1332. #if defined(CONFIG_MALI400_PROFILING)
  1333. /* send profiling data per physical core */
  1334. _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
  1335. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|
  1336. MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
  1337. MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
  1338. mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
  1339. mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
  1340. mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
  1341. 0, 0);
  1342. }
  1343. if (0 != group->l2_cache_core_ref_count[0]) {
  1344. if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
  1345. (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
  1346. mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
  1347. }
  1348. }
  1349. if (0 != group->l2_cache_core_ref_count[1]) {
  1350. if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
  1351. (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
  1352. mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
  1353. }
  1354. }
  1355. #endif
  1356. } else {
  1357. /* update performance counters for a physical group's pp core */
  1358. mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
  1359. #if defined(CONFIG_MALI400_PROFILING)
  1360. _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|
  1361. MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
  1362. MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
  1363. mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
  1364. mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
  1365. mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
  1366. 0, 0);
  1367. if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
  1368. (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
  1369. mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
  1370. }
  1371. #endif
  1372. }
  1373. }
  1374. }
  1375. static void mali_group_timeout(void *data)
  1376. {
  1377. struct mali_group *group = (struct mali_group *)data;
  1378. group->core_timed_out = MALI_TRUE;
  1379. if (NULL != group->gp_core) {
  1380. MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_gp_get_hw_core_desc(group->gp_core)));
  1381. _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
  1382. } else {
  1383. MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_pp_get_hw_core_desc(group->pp_core)));
  1384. _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
  1385. }
  1386. }
  1387. void mali_group_zap_session(struct mali_group *group, struct mali_session_data *session)
  1388. {
  1389. MALI_DEBUG_ASSERT_POINTER(group);
  1390. MALI_DEBUG_ASSERT_POINTER(session);
  1391. /* Early out - safe even if mutex is not held */
  1392. if (group->session != session) return;
  1393. mali_group_lock(group);
  1394. mali_group_remove_session_if_unused(group, session);
  1395. if (group->session == session) {
  1396. /* The Zap also does the stall and disable_stall */
  1397. mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
  1398. if (MALI_TRUE != zap_success) {
  1399. MALI_DEBUG_PRINT(2, ("Mali memory unmap failed. Doing pagefault handling.\n"));
  1400. mali_group_mmu_page_fault_and_unlock(group);
  1401. return;
  1402. }
  1403. }
  1404. mali_group_unlock(group);
  1405. }
  1406. #if defined(CONFIG_MALI400_PROFILING)
  1407. static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num)
  1408. {
  1409. u32 source0 = 0;
  1410. u32 value0 = 0;
  1411. u32 source1 = 0;
  1412. u32 value1 = 0;
  1413. u32 profiling_channel = 0;
  1414. switch(core_num) {
  1415. case 0:
  1416. profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
  1417. MALI_PROFILING_EVENT_CHANNEL_GPU |
  1418. MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
  1419. break;
  1420. case 1:
  1421. profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
  1422. MALI_PROFILING_EVENT_CHANNEL_GPU |
  1423. MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS;
  1424. break;
  1425. case 2:
  1426. profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
  1427. MALI_PROFILING_EVENT_CHANNEL_GPU |
  1428. MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS;
  1429. break;
  1430. default:
  1431. profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
  1432. MALI_PROFILING_EVENT_CHANNEL_GPU |
  1433. MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
  1434. break;
  1435. }
  1436. if (0 == core_num) {
  1437. mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
  1438. }
  1439. if (1 == core_num) {
  1440. if (1 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
  1441. mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
  1442. } else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
  1443. mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
  1444. }
  1445. }
  1446. if (2 == core_num) {
  1447. if (2 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
  1448. mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
  1449. } else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
  1450. mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
  1451. }
  1452. }
  1453. _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
  1454. }
  1455. #endif /* #if defined(CONFIG_MALI400_PROFILING) */
  1456. mali_bool mali_group_is_enabled(struct mali_group *group)
  1457. {
  1458. mali_bool enabled = MALI_TRUE;
  1459. MALI_DEBUG_ASSERT_POINTER(group);
  1460. mali_group_lock(group);
  1461. if (MALI_GROUP_STATE_DISABLED == group->state) {
  1462. enabled = MALI_FALSE;
  1463. }
  1464. mali_group_unlock(group);
  1465. return enabled;
  1466. }
  1467. void mali_group_enable(struct mali_group *group)
  1468. {
  1469. MALI_DEBUG_ASSERT_POINTER(group);
  1470. MALI_DEBUG_ASSERT( NULL != mali_group_get_pp_core(group)
  1471. || NULL != mali_group_get_gp_core(group));
  1472. if (NULL != mali_group_get_pp_core(group)) {
  1473. mali_pp_scheduler_enable_group(group);
  1474. } else {
  1475. mali_gp_scheduler_enable_group(group);
  1476. }
  1477. }
  1478. void mali_group_disable(struct mali_group *group)
  1479. {
  1480. MALI_DEBUG_ASSERT_POINTER(group);
  1481. MALI_DEBUG_ASSERT( NULL != mali_group_get_pp_core(group)
  1482. || NULL != mali_group_get_gp_core(group));
  1483. if (NULL != mali_group_get_pp_core(group)) {
  1484. mali_pp_scheduler_disable_group(group);
  1485. } else {
  1486. mali_gp_scheduler_disable_group(group);
  1487. }
  1488. }
  1489. static struct mali_pm_domain* mali_group_get_l2_domain(struct mali_group *group)
  1490. {
  1491. MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
  1492. /* l2_cache_core[0] stores the related l2 domain */
  1493. return group->l2_cache_core[0]->pm_domain;
  1494. }
  1495. void mali_group_get_pm_domain_ref(struct mali_group *group)
  1496. {
  1497. MALI_DEBUG_ASSERT_POINTER(group);
  1498. /* Get group used l2 domain ref */
  1499. mali_pm_domain_ref_get(mali_group_get_l2_domain(group));
  1500. /* Get group used core domain ref */
  1501. mali_pm_domain_ref_get(group->pm_domain);
  1502. }
  1503. void mali_group_put_pm_domain_ref(struct mali_group *group)
  1504. {
  1505. MALI_DEBUG_ASSERT_POINTER(group);
  1506. /* Put group used core domain ref */
  1507. mali_pm_domain_ref_put(group->pm_domain);
  1508. /* Put group used l2 domain ref */
  1509. mali_pm_domain_ref_put(mali_group_get_l2_domain(group));
  1510. }