panfrost_gpu.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
  3. /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
  4. /* Copyright 2019 Collabora ltd. */
  5. #include <linux/bitfield.h>
  6. #include <linux/bitmap.h>
  7. #include <linux/delay.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/io.h>
  11. #include <linux/iopoll.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/pm_runtime.h>
  14. #include "panfrost_device.h"
  15. #include "panfrost_features.h"
  16. #include "panfrost_issues.h"
  17. #include "panfrost_gpu.h"
  18. #include "panfrost_perfcnt.h"
  19. #include "panfrost_regs.h"
  20. static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data)
  21. {
  22. struct panfrost_device *pfdev = data;
  23. u32 fault_status, state;
  24. if (test_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended))
  25. return IRQ_NONE;
  26. fault_status = gpu_read(pfdev, GPU_FAULT_STATUS);
  27. state = gpu_read(pfdev, GPU_INT_STAT);
  28. if (!state)
  29. return IRQ_NONE;
  30. if (state & GPU_IRQ_MASK_ERROR) {
  31. u64 address = (u64) gpu_read(pfdev, GPU_FAULT_ADDRESS_HI) << 32;
  32. address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO);
  33. dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
  34. fault_status, panfrost_exception_name(fault_status & 0xFF),
  35. address);
  36. if (state & GPU_IRQ_MULTIPLE_FAULT)
  37. dev_warn(pfdev->dev, "There were multiple GPU faults - some have not been reported\n");
  38. gpu_write(pfdev, GPU_INT_MASK, 0);
  39. }
  40. if (state & GPU_IRQ_PERFCNT_SAMPLE_COMPLETED)
  41. panfrost_perfcnt_sample_done(pfdev);
  42. if (state & GPU_IRQ_CLEAN_CACHES_COMPLETED)
  43. panfrost_perfcnt_clean_cache_done(pfdev);
  44. gpu_write(pfdev, GPU_INT_CLEAR, state);
  45. return IRQ_HANDLED;
  46. }
  47. int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
  48. {
  49. int ret;
  50. u32 val;
  51. gpu_write(pfdev, GPU_INT_MASK, 0);
  52. gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
  53. clear_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended);
  54. gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET);
  55. ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT,
  56. val, val & GPU_IRQ_RESET_COMPLETED, 10, 10000);
  57. if (ret) {
  58. dev_err(pfdev->dev, "gpu soft reset timed out, attempting hard reset\n");
  59. gpu_write(pfdev, GPU_CMD, GPU_CMD_HARD_RESET);
  60. ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, val,
  61. val & GPU_IRQ_RESET_COMPLETED, 100, 10000);
  62. if (ret) {
  63. dev_err(pfdev->dev, "gpu hard reset timed out\n");
  64. return ret;
  65. }
  66. }
  67. gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL);
  68. /* Only enable the interrupts we care about */
  69. gpu_write(pfdev, GPU_INT_MASK,
  70. GPU_IRQ_MASK_ERROR |
  71. GPU_IRQ_PERFCNT_SAMPLE_COMPLETED |
  72. GPU_IRQ_CLEAN_CACHES_COMPLETED);
  73. /*
  74. * All in-flight jobs should have released their cycle
  75. * counter references upon reset, but let us make sure
  76. */
  77. if (drm_WARN_ON(pfdev->ddev, atomic_read(&pfdev->cycle_counter.use_count) != 0))
  78. atomic_set(&pfdev->cycle_counter.use_count, 0);
  79. return 0;
  80. }
  81. void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev)
  82. {
  83. /*
  84. * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs
  85. * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order
  86. * to operate correctly.
  87. */
  88. gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK);
  89. gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16));
  90. }
  91. static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
  92. {
  93. u32 quirks = 0;
  94. if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8443) ||
  95. panfrost_has_hw_issue(pfdev, HW_ISSUE_11035))
  96. quirks |= SC_LS_PAUSEBUFFER_DISABLE;
  97. if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10327))
  98. quirks |= SC_SDC_DISABLE_OQ_DISCARD;
  99. if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10797))
  100. quirks |= SC_ENABLE_TEXGRD_FLAGS;
  101. if (!panfrost_has_hw_issue(pfdev, GPUCORE_1619)) {
  102. if (panfrost_model_cmp(pfdev, 0x750) < 0) /* T60x, T62x, T72x */
  103. quirks |= SC_LS_ATTR_CHECK_DISABLE;
  104. else if (panfrost_model_cmp(pfdev, 0x880) <= 0) /* T76x, T8xx */
  105. quirks |= SC_LS_ALLOW_ATTR_TYPES;
  106. }
  107. if (panfrost_has_hw_issue(pfdev, HW_ISSUE_TTRX_2968_TTRX_3162))
  108. quirks |= SC_VAR_ALGORITHM;
  109. if (panfrost_has_hw_feature(pfdev, HW_FEATURE_TLS_HASHING))
  110. quirks |= SC_TLS_HASH_ENABLE;
  111. if (quirks)
  112. gpu_write(pfdev, GPU_SHADER_CONFIG, quirks);
  113. quirks = gpu_read(pfdev, GPU_TILER_CONFIG);
  114. /* Set tiler clock gate override if required */
  115. if (panfrost_has_hw_issue(pfdev, HW_ISSUE_T76X_3953))
  116. quirks |= TC_CLOCK_GATE_OVERRIDE;
  117. gpu_write(pfdev, GPU_TILER_CONFIG, quirks);
  118. quirks = 0;
  119. if ((panfrost_model_eq(pfdev, 0x860) || panfrost_model_eq(pfdev, 0x880)) &&
  120. pfdev->features.revision >= 0x2000)
  121. quirks |= JM_MAX_JOB_THROTTLE_LIMIT << JM_JOB_THROTTLE_LIMIT_SHIFT;
  122. else if (panfrost_model_eq(pfdev, 0x6000) &&
  123. pfdev->features.coherency_features == COHERENCY_ACE)
  124. quirks |= (COHERENCY_ACE_LITE | COHERENCY_ACE) <<
  125. JM_FORCE_COHERENCY_FEATURES_SHIFT;
  126. if (panfrost_has_hw_feature(pfdev, HW_FEATURE_IDVS_GROUP_SIZE))
  127. quirks |= JM_DEFAULT_IDVS_GROUP_SIZE << JM_IDVS_GROUP_SIZE_SHIFT;
  128. if (quirks)
  129. gpu_write(pfdev, GPU_JM_CONFIG, quirks);
  130. /* Here goes platform specific quirks */
  131. if (pfdev->comp->vendor_quirk)
  132. pfdev->comp->vendor_quirk(pfdev);
  133. }
  134. #define MAX_HW_REVS 6
  135. struct panfrost_model {
  136. const char *name;
  137. u32 id;
  138. u64 features;
  139. u64 issues;
  140. struct {
  141. u32 revision;
  142. u64 issues;
  143. } revs[MAX_HW_REVS];
  144. };
  145. #define GPU_MODEL(_name, _id, ...) \
  146. {\
  147. .name = __stringify(_name), \
  148. .id = _id, \
  149. .features = hw_features_##_name, \
  150. .issues = hw_issues_##_name, \
  151. .revs = { __VA_ARGS__ }, \
  152. }
  153. #define GPU_REV_EXT(name, _rev, _p, _s, stat) \
  154. {\
  155. .revision = (_rev) << 12 | (_p) << 4 | (_s), \
  156. .issues = hw_issues_##name##_r##_rev##p##_p##stat, \
  157. }
  158. #define GPU_REV(name, r, p) GPU_REV_EXT(name, r, p, 0, )
  159. static const struct panfrost_model gpu_models[] = {
  160. /* T60x has an oddball version */
  161. GPU_MODEL(t600, 0x600,
  162. GPU_REV_EXT(t600, 0, 0, 1, _15dev0)),
  163. GPU_MODEL(t620, 0x620,
  164. GPU_REV(t620, 0, 1), GPU_REV(t620, 1, 0)),
  165. GPU_MODEL(t720, 0x720),
  166. GPU_MODEL(t760, 0x750,
  167. GPU_REV(t760, 0, 0), GPU_REV(t760, 0, 1),
  168. GPU_REV_EXT(t760, 0, 1, 0, _50rel0),
  169. GPU_REV(t760, 0, 2), GPU_REV(t760, 0, 3)),
  170. GPU_MODEL(t820, 0x820),
  171. GPU_MODEL(t830, 0x830),
  172. GPU_MODEL(t860, 0x860),
  173. GPU_MODEL(t880, 0x880),
  174. GPU_MODEL(g71, 0x6000,
  175. GPU_REV_EXT(g71, 0, 0, 1, _05dev0)),
  176. GPU_MODEL(g72, 0x6001),
  177. GPU_MODEL(g51, 0x7000),
  178. GPU_MODEL(g76, 0x7001),
  179. GPU_MODEL(g52, 0x7002),
  180. GPU_MODEL(g31, 0x7003,
  181. GPU_REV(g31, 1, 0)),
  182. GPU_MODEL(g57, 0x9001,
  183. GPU_REV(g57, 0, 0)),
  184. /* MediaTek MT8192 has a Mali-G57 with a different GPU ID from the
  185. * standard. Arm's driver does not appear to handle this model.
  186. * ChromeOS has a hack downstream for it. Treat it as equivalent to
  187. * standard Mali-G57 for now.
  188. */
  189. GPU_MODEL(g57, 0x9003,
  190. GPU_REV(g57, 0, 0)),
  191. };
  192. static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
  193. {
  194. u32 gpu_id, num_js, major, minor, status, rev;
  195. const char *name = "unknown";
  196. u64 hw_feat = 0;
  197. u64 hw_issues = hw_issues_all;
  198. const struct panfrost_model *model;
  199. int i;
  200. pfdev->features.l2_features = gpu_read(pfdev, GPU_L2_FEATURES);
  201. pfdev->features.core_features = gpu_read(pfdev, GPU_CORE_FEATURES);
  202. pfdev->features.tiler_features = gpu_read(pfdev, GPU_TILER_FEATURES);
  203. pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES);
  204. pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES);
  205. pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES);
  206. pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS);
  207. pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
  208. pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
  209. pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
  210. pfdev->features.afbc_features = gpu_read(pfdev, GPU_AFBC_FEATURES);
  211. for (i = 0; i < 4; i++)
  212. pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
  213. pfdev->features.as_present = gpu_read(pfdev, GPU_AS_PRESENT);
  214. pfdev->features.js_present = gpu_read(pfdev, GPU_JS_PRESENT);
  215. num_js = hweight32(pfdev->features.js_present);
  216. for (i = 0; i < num_js; i++)
  217. pfdev->features.js_features[i] = gpu_read(pfdev, GPU_JS_FEATURES(i));
  218. pfdev->features.shader_present = gpu_read(pfdev, GPU_SHADER_PRESENT_LO);
  219. pfdev->features.shader_present |= (u64)gpu_read(pfdev, GPU_SHADER_PRESENT_HI) << 32;
  220. pfdev->features.tiler_present = gpu_read(pfdev, GPU_TILER_PRESENT_LO);
  221. pfdev->features.tiler_present |= (u64)gpu_read(pfdev, GPU_TILER_PRESENT_HI) << 32;
  222. pfdev->features.l2_present = gpu_read(pfdev, GPU_L2_PRESENT_LO);
  223. pfdev->features.l2_present |= (u64)gpu_read(pfdev, GPU_L2_PRESENT_HI) << 32;
  224. pfdev->features.nr_core_groups = hweight64(pfdev->features.l2_present);
  225. pfdev->features.stack_present = gpu_read(pfdev, GPU_STACK_PRESENT_LO);
  226. pfdev->features.stack_present |= (u64)gpu_read(pfdev, GPU_STACK_PRESENT_HI) << 32;
  227. pfdev->features.thread_tls_alloc = gpu_read(pfdev, GPU_THREAD_TLS_ALLOC);
  228. gpu_id = gpu_read(pfdev, GPU_ID);
  229. pfdev->features.revision = gpu_id & 0xffff;
  230. pfdev->features.id = gpu_id >> 16;
  231. /* The T60x has an oddball ID value. Fix it up to the standard Midgard
  232. * format so we (and userspace) don't have to special case it.
  233. */
  234. if (pfdev->features.id == 0x6956)
  235. pfdev->features.id = 0x0600;
  236. major = (pfdev->features.revision >> 12) & 0xf;
  237. minor = (pfdev->features.revision >> 4) & 0xff;
  238. status = pfdev->features.revision & 0xf;
  239. rev = pfdev->features.revision;
  240. gpu_id = pfdev->features.id;
  241. for (model = gpu_models; model->name; model++) {
  242. int best = -1;
  243. if (!panfrost_model_eq(pfdev, model->id))
  244. continue;
  245. name = model->name;
  246. hw_feat = model->features;
  247. hw_issues |= model->issues;
  248. for (i = 0; i < MAX_HW_REVS; i++) {
  249. if (model->revs[i].revision == rev) {
  250. best = i;
  251. break;
  252. } else if (model->revs[i].revision == (rev & ~0xf))
  253. best = i;
  254. }
  255. if (best >= 0)
  256. hw_issues |= model->revs[best].issues;
  257. break;
  258. }
  259. bitmap_from_u64(pfdev->features.hw_features, hw_feat);
  260. bitmap_from_u64(pfdev->features.hw_issues, hw_issues);
  261. dev_info(pfdev->dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
  262. name, gpu_id, major, minor, status);
  263. dev_info(pfdev->dev, "features: %64pb, issues: %64pb",
  264. pfdev->features.hw_features,
  265. pfdev->features.hw_issues);
  266. dev_info(pfdev->dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x",
  267. pfdev->features.l2_features,
  268. pfdev->features.core_features,
  269. pfdev->features.tiler_features,
  270. pfdev->features.mem_features,
  271. pfdev->features.mmu_features,
  272. pfdev->features.as_present,
  273. pfdev->features.js_present);
  274. dev_info(pfdev->dev, "shader_present=0x%0llx l2_present=0x%0llx",
  275. pfdev->features.shader_present, pfdev->features.l2_present);
  276. }
  277. void panfrost_cycle_counter_get(struct panfrost_device *pfdev)
  278. {
  279. if (atomic_inc_not_zero(&pfdev->cycle_counter.use_count))
  280. return;
  281. spin_lock(&pfdev->cycle_counter.lock);
  282. if (atomic_inc_return(&pfdev->cycle_counter.use_count) == 1)
  283. gpu_write(pfdev, GPU_CMD, GPU_CMD_CYCLE_COUNT_START);
  284. spin_unlock(&pfdev->cycle_counter.lock);
  285. }
  286. void panfrost_cycle_counter_put(struct panfrost_device *pfdev)
  287. {
  288. if (atomic_add_unless(&pfdev->cycle_counter.use_count, -1, 1))
  289. return;
  290. spin_lock(&pfdev->cycle_counter.lock);
  291. if (atomic_dec_return(&pfdev->cycle_counter.use_count) == 0)
  292. gpu_write(pfdev, GPU_CMD, GPU_CMD_CYCLE_COUNT_STOP);
  293. spin_unlock(&pfdev->cycle_counter.lock);
  294. }
  295. unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev)
  296. {
  297. u32 hi, lo;
  298. do {
  299. hi = gpu_read(pfdev, GPU_CYCLE_COUNT_HI);
  300. lo = gpu_read(pfdev, GPU_CYCLE_COUNT_LO);
  301. } while (hi != gpu_read(pfdev, GPU_CYCLE_COUNT_HI));
  302. return ((u64)hi << 32) | lo;
  303. }
  304. static u64 panfrost_get_core_mask(struct panfrost_device *pfdev)
  305. {
  306. u64 core_mask;
  307. if (pfdev->features.l2_present == 1)
  308. return U64_MAX;
  309. /*
  310. * Only support one core group now.
  311. * ~(l2_present - 1) unsets all bits in l2_present except
  312. * the bottom bit. (l2_present - 2) has all the bits in
  313. * the first core group set. AND them together to generate
  314. * a mask of cores in the first core group.
  315. */
  316. core_mask = ~(pfdev->features.l2_present - 1) &
  317. (pfdev->features.l2_present - 2);
  318. dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
  319. hweight64(core_mask),
  320. hweight64(pfdev->features.shader_present));
  321. return core_mask;
  322. }
  323. void panfrost_gpu_power_on(struct panfrost_device *pfdev)
  324. {
  325. int ret;
  326. u32 val;
  327. u64 core_mask;
  328. panfrost_gpu_init_quirks(pfdev);
  329. core_mask = panfrost_get_core_mask(pfdev);
  330. gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask);
  331. ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
  332. val, val == (pfdev->features.l2_present & core_mask),
  333. 10, 20000);
  334. if (ret)
  335. dev_err(pfdev->dev, "error powering up gpu L2");
  336. gpu_write(pfdev, SHADER_PWRON_LO,
  337. pfdev->features.shader_present & core_mask);
  338. ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
  339. val, val == (pfdev->features.shader_present & core_mask),
  340. 10, 20000);
  341. if (ret)
  342. dev_err(pfdev->dev, "error powering up gpu shader");
  343. gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
  344. ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
  345. val, val == pfdev->features.tiler_present, 10, 1000);
  346. if (ret)
  347. dev_err(pfdev->dev, "error powering up gpu tiler");
  348. }
  349. void panfrost_gpu_power_off(struct panfrost_device *pfdev)
  350. {
  351. int ret;
  352. u32 val;
  353. gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present);
  354. ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO,
  355. val, !val, 1, 2000);
  356. if (ret)
  357. dev_err(pfdev->dev, "shader power transition timeout");
  358. gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present);
  359. ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO,
  360. val, !val, 1, 2000);
  361. if (ret)
  362. dev_err(pfdev->dev, "tiler power transition timeout");
  363. gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present);
  364. ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO,
  365. val, !val, 0, 2000);
  366. if (ret)
  367. dev_err(pfdev->dev, "l2 power transition timeout");
  368. }
  369. void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev)
  370. {
  371. set_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended);
  372. gpu_write(pfdev, GPU_INT_MASK, 0);
  373. synchronize_irq(pfdev->gpu_irq);
  374. }
  375. int panfrost_gpu_init(struct panfrost_device *pfdev)
  376. {
  377. int err;
  378. err = panfrost_gpu_soft_reset(pfdev);
  379. if (err)
  380. return err;
  381. panfrost_gpu_init_features(pfdev);
  382. err = dma_set_mask_and_coherent(pfdev->dev,
  383. DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features)));
  384. if (err)
  385. return err;
  386. dma_set_max_seg_size(pfdev->dev, UINT_MAX);
  387. pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
  388. if (pfdev->gpu_irq < 0)
  389. return pfdev->gpu_irq;
  390. err = devm_request_irq(pfdev->dev, pfdev->gpu_irq, panfrost_gpu_irq_handler,
  391. IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev);
  392. if (err) {
  393. dev_err(pfdev->dev, "failed to request gpu irq");
  394. return err;
  395. }
  396. panfrost_gpu_power_on(pfdev);
  397. return 0;
  398. }
  399. void panfrost_gpu_fini(struct panfrost_device *pfdev)
  400. {
  401. panfrost_gpu_power_off(pfdev);
  402. }
  403. u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev)
  404. {
  405. u32 flush_id;
  406. if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) {
  407. /* Flush reduction only makes sense when the GPU is kept powered on between jobs */
  408. if (pm_runtime_get_if_in_use(pfdev->dev)) {
  409. flush_id = gpu_read(pfdev, GPU_LATEST_FLUSH_ID);
  410. pm_runtime_put(pfdev->dev);
  411. return flush_id;
  412. }
  413. }
  414. return 0;
  415. }