mtk_disp_ovl.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015 MediaTek Inc.
  4. */
  5. #include <drm/drm_blend.h>
  6. #include <drm/drm_fourcc.h>
  7. #include <drm/drm_framebuffer.h>
  8. #include <linux/clk.h>
  9. #include <linux/component.h>
  10. #include <linux/module.h>
  11. #include <linux/of.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/pm_runtime.h>
  14. #include <linux/soc/mediatek/mtk-cmdq.h>
  15. #include "mtk_crtc.h"
  16. #include "mtk_ddp_comp.h"
  17. #include "mtk_disp_drv.h"
  18. #include "mtk_drm_drv.h"
  19. #define DISP_REG_OVL_INTEN 0x0004
  20. #define OVL_FME_CPL_INT BIT(1)
  21. #define DISP_REG_OVL_INTSTA 0x0008
  22. #define DISP_REG_OVL_EN 0x000c
  23. #define DISP_REG_OVL_RST 0x0014
  24. #define DISP_REG_OVL_ROI_SIZE 0x0020
  25. #define DISP_REG_OVL_DATAPATH_CON 0x0024
  26. #define OVL_LAYER_SMI_ID_EN BIT(0)
  27. #define OVL_BGCLR_SEL_IN BIT(2)
  28. #define OVL_LAYER_AFBC_EN(n) BIT(4+n)
  29. #define DISP_REG_OVL_ROI_BGCLR 0x0028
  30. #define DISP_REG_OVL_SRC_CON 0x002c
  31. #define DISP_REG_OVL_CON(n) (0x0030 + 0x20 * (n))
  32. #define DISP_REG_OVL_SRC_SIZE(n) (0x0038 + 0x20 * (n))
  33. #define DISP_REG_OVL_OFFSET(n) (0x003c + 0x20 * (n))
  34. #define DISP_REG_OVL_PITCH_MSB(n) (0x0040 + 0x20 * (n))
  35. #define OVL_PITCH_MSB_2ND_SUBBUF BIT(16)
  36. #define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n))
  37. #define OVL_CONST_BLEND BIT(28)
  38. #define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n))
  39. #define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n))
  40. #define DISP_REG_OVL_ADDR_MT2701 0x0040
  41. #define DISP_REG_OVL_CLRFMT_EXT 0x02d0
  42. #define OVL_CON_CLRFMT_BIT_DEPTH_MASK(n) (GENMASK(1, 0) << (4 * (n)))
  43. #define OVL_CON_CLRFMT_BIT_DEPTH(depth, n) ((depth) << (4 * (n)))
  44. #define OVL_CON_CLRFMT_8_BIT (0)
  45. #define OVL_CON_CLRFMT_10_BIT (1)
  46. #define DISP_REG_OVL_ADDR_MT8173 0x0f40
  47. #define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n))
  48. #define DISP_REG_OVL_HDR_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n) + 0x04)
  49. #define DISP_REG_OVL_HDR_PITCH(ovl, n) ((ovl)->data->addr + 0x20 * (n) + 0x08)
  50. #define GMC_THRESHOLD_BITS 16
  51. #define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4)
  52. #define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8)
  53. #define OVL_CON_CLRFMT_MAN BIT(23)
  54. #define OVL_CON_BYTE_SWAP BIT(24)
  55. /* OVL_CON_RGB_SWAP works only if OVL_CON_CLRFMT_MAN is enabled */
  56. #define OVL_CON_RGB_SWAP BIT(25)
  57. #define OVL_CON_CLRFMT_RGB (1 << 12)
  58. #define OVL_CON_CLRFMT_ARGB8888 (2 << 12)
  59. #define OVL_CON_CLRFMT_RGBA8888 (3 << 12)
  60. #define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP)
  61. #define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP)
  62. #define OVL_CON_CLRFMT_UYVY (4 << 12)
  63. #define OVL_CON_CLRFMT_YUYV (5 << 12)
  64. #define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
  65. #define OVL_CON_CLRFMT_PARGB8888 ((3 << 12) | OVL_CON_CLRFMT_MAN)
  66. #define OVL_CON_CLRFMT_PABGR8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_RGB_SWAP)
  67. #define OVL_CON_CLRFMT_PBGRA8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_BYTE_SWAP)
  68. #define OVL_CON_CLRFMT_PRGBA8888 (OVL_CON_CLRFMT_PABGR8888 | OVL_CON_BYTE_SWAP)
  69. #define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
  70. 0 : OVL_CON_CLRFMT_RGB)
  71. #define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
  72. OVL_CON_CLRFMT_RGB : 0)
  73. #define OVL_CON_AEN BIT(8)
  74. #define OVL_CON_ALPHA 0xff
  75. #define OVL_CON_VIRT_FLIP BIT(9)
  76. #define OVL_CON_HORZ_FLIP BIT(10)
  77. #define OVL_COLOR_ALPHA GENMASK(31, 24)
  78. static inline bool is_10bit_rgb(u32 fmt)
  79. {
  80. switch (fmt) {
  81. case DRM_FORMAT_XRGB2101010:
  82. case DRM_FORMAT_ARGB2101010:
  83. case DRM_FORMAT_RGBX1010102:
  84. case DRM_FORMAT_RGBA1010102:
  85. case DRM_FORMAT_XBGR2101010:
  86. case DRM_FORMAT_ABGR2101010:
  87. case DRM_FORMAT_BGRX1010102:
  88. case DRM_FORMAT_BGRA1010102:
  89. return true;
  90. }
  91. return false;
  92. }
  93. static const u32 mt8173_formats[] = {
  94. DRM_FORMAT_XRGB8888,
  95. DRM_FORMAT_ARGB8888,
  96. DRM_FORMAT_BGRX8888,
  97. DRM_FORMAT_BGRA8888,
  98. DRM_FORMAT_ABGR8888,
  99. DRM_FORMAT_XBGR8888,
  100. DRM_FORMAT_RGB888,
  101. DRM_FORMAT_BGR888,
  102. DRM_FORMAT_RGB565,
  103. DRM_FORMAT_UYVY,
  104. DRM_FORMAT_YUYV,
  105. };
  106. static const u32 mt8195_formats[] = {
  107. DRM_FORMAT_XRGB8888,
  108. DRM_FORMAT_ARGB8888,
  109. DRM_FORMAT_XRGB2101010,
  110. DRM_FORMAT_ARGB2101010,
  111. DRM_FORMAT_BGRX8888,
  112. DRM_FORMAT_BGRA8888,
  113. DRM_FORMAT_BGRX1010102,
  114. DRM_FORMAT_BGRA1010102,
  115. DRM_FORMAT_ABGR8888,
  116. DRM_FORMAT_XBGR8888,
  117. DRM_FORMAT_XBGR2101010,
  118. DRM_FORMAT_ABGR2101010,
  119. DRM_FORMAT_RGBX8888,
  120. DRM_FORMAT_RGBA8888,
  121. DRM_FORMAT_RGBX1010102,
  122. DRM_FORMAT_RGBA1010102,
  123. DRM_FORMAT_RGB888,
  124. DRM_FORMAT_BGR888,
  125. DRM_FORMAT_RGB565,
  126. DRM_FORMAT_UYVY,
  127. DRM_FORMAT_YUYV,
  128. };
  129. struct mtk_disp_ovl_data {
  130. unsigned int addr;
  131. unsigned int gmc_bits;
  132. unsigned int layer_nr;
  133. bool fmt_rgb565_is_0;
  134. bool smi_id_en;
  135. bool supports_afbc;
  136. const u32 blend_modes;
  137. const u32 *formats;
  138. size_t num_formats;
  139. bool supports_clrfmt_ext;
  140. };
  141. /*
  142. * struct mtk_disp_ovl - DISP_OVL driver structure
  143. * @crtc: associated crtc to report vblank events to
  144. * @data: platform data
  145. */
  146. struct mtk_disp_ovl {
  147. struct drm_crtc *crtc;
  148. struct clk *clk;
  149. void __iomem *regs;
  150. struct cmdq_client_reg cmdq_reg;
  151. const struct mtk_disp_ovl_data *data;
  152. void (*vblank_cb)(void *data);
  153. void *vblank_cb_data;
  154. };
  155. static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
  156. {
  157. struct mtk_disp_ovl *priv = dev_id;
  158. /* Clear frame completion interrupt */
  159. writel(0x0, priv->regs + DISP_REG_OVL_INTSTA);
  160. if (!priv->vblank_cb)
  161. return IRQ_NONE;
  162. priv->vblank_cb(priv->vblank_cb_data);
  163. return IRQ_HANDLED;
  164. }
  165. void mtk_ovl_register_vblank_cb(struct device *dev,
  166. void (*vblank_cb)(void *),
  167. void *vblank_cb_data)
  168. {
  169. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  170. ovl->vblank_cb = vblank_cb;
  171. ovl->vblank_cb_data = vblank_cb_data;
  172. }
  173. void mtk_ovl_unregister_vblank_cb(struct device *dev)
  174. {
  175. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  176. ovl->vblank_cb = NULL;
  177. ovl->vblank_cb_data = NULL;
  178. }
  179. void mtk_ovl_enable_vblank(struct device *dev)
  180. {
  181. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  182. writel(0x0, ovl->regs + DISP_REG_OVL_INTSTA);
  183. writel_relaxed(OVL_FME_CPL_INT, ovl->regs + DISP_REG_OVL_INTEN);
  184. }
  185. void mtk_ovl_disable_vblank(struct device *dev)
  186. {
  187. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  188. writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN);
  189. }
  190. u32 mtk_ovl_get_blend_modes(struct device *dev)
  191. {
  192. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  193. return ovl->data->blend_modes;
  194. }
  195. const u32 *mtk_ovl_get_formats(struct device *dev)
  196. {
  197. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  198. return ovl->data->formats;
  199. }
  200. size_t mtk_ovl_get_num_formats(struct device *dev)
  201. {
  202. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  203. return ovl->data->num_formats;
  204. }
  205. bool mtk_ovl_is_afbc_supported(struct device *dev)
  206. {
  207. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  208. return ovl->data->supports_afbc;
  209. }
  210. int mtk_ovl_clk_enable(struct device *dev)
  211. {
  212. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  213. return clk_prepare_enable(ovl->clk);
  214. }
  215. void mtk_ovl_clk_disable(struct device *dev)
  216. {
  217. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  218. clk_disable_unprepare(ovl->clk);
  219. }
  220. void mtk_ovl_start(struct device *dev)
  221. {
  222. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  223. if (ovl->data->smi_id_en) {
  224. unsigned int reg;
  225. reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
  226. reg = reg | OVL_LAYER_SMI_ID_EN;
  227. writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
  228. }
  229. writel_relaxed(0x1, ovl->regs + DISP_REG_OVL_EN);
  230. }
  231. void mtk_ovl_stop(struct device *dev)
  232. {
  233. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  234. writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_EN);
  235. if (ovl->data->smi_id_en) {
  236. unsigned int reg;
  237. reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
  238. reg = reg & ~OVL_LAYER_SMI_ID_EN;
  239. writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
  240. }
  241. }
  242. static void mtk_ovl_set_afbc(struct mtk_disp_ovl *ovl, struct cmdq_pkt *cmdq_pkt,
  243. int idx, bool enabled)
  244. {
  245. mtk_ddp_write_mask(cmdq_pkt, enabled ? OVL_LAYER_AFBC_EN(idx) : 0,
  246. &ovl->cmdq_reg, ovl->regs,
  247. DISP_REG_OVL_DATAPATH_CON, OVL_LAYER_AFBC_EN(idx));
  248. }
  249. static void mtk_ovl_set_bit_depth(struct device *dev, int idx, u32 format,
  250. struct cmdq_pkt *cmdq_pkt)
  251. {
  252. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  253. unsigned int bit_depth = OVL_CON_CLRFMT_8_BIT;
  254. if (!ovl->data->supports_clrfmt_ext)
  255. return;
  256. if (is_10bit_rgb(format))
  257. bit_depth = OVL_CON_CLRFMT_10_BIT;
  258. mtk_ddp_write_mask(cmdq_pkt, OVL_CON_CLRFMT_BIT_DEPTH(bit_depth, idx),
  259. &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_CLRFMT_EXT,
  260. OVL_CON_CLRFMT_BIT_DEPTH_MASK(idx));
  261. }
  262. void mtk_ovl_config(struct device *dev, unsigned int w,
  263. unsigned int h, unsigned int vrefresh,
  264. unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
  265. {
  266. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  267. if (w != 0 && h != 0)
  268. mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, &ovl->cmdq_reg, ovl->regs,
  269. DISP_REG_OVL_ROI_SIZE);
  270. /*
  271. * The background color must be opaque black (ARGB),
  272. * otherwise the alpha blending will have no effect
  273. */
  274. mtk_ddp_write_relaxed(cmdq_pkt, OVL_COLOR_ALPHA, &ovl->cmdq_reg,
  275. ovl->regs, DISP_REG_OVL_ROI_BGCLR);
  276. mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
  277. mtk_ddp_write(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
  278. }
  279. unsigned int mtk_ovl_layer_nr(struct device *dev)
  280. {
  281. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  282. return ovl->data->layer_nr;
  283. }
  284. unsigned int mtk_ovl_supported_rotations(struct device *dev)
  285. {
  286. return DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
  287. DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
  288. }
  289. int mtk_ovl_layer_check(struct device *dev, unsigned int idx,
  290. struct mtk_plane_state *mtk_state)
  291. {
  292. struct drm_plane_state *state = &mtk_state->base;
  293. /* check if any unsupported rotation is set */
  294. if (state->rotation & ~mtk_ovl_supported_rotations(dev))
  295. return -EINVAL;
  296. /*
  297. * TODO: Rotating/reflecting YUV buffers is not supported at this time.
  298. * Only RGB[AX] variants are supported.
  299. * Since DRM_MODE_ROTATE_0 means "no rotation", we should not
  300. * reject layers with this property.
  301. */
  302. if (state->fb->format->is_yuv && (state->rotation & ~DRM_MODE_ROTATE_0))
  303. return -EINVAL;
  304. return 0;
  305. }
  306. void mtk_ovl_layer_on(struct device *dev, unsigned int idx,
  307. struct cmdq_pkt *cmdq_pkt)
  308. {
  309. unsigned int gmc_thrshd_l;
  310. unsigned int gmc_thrshd_h;
  311. unsigned int gmc_value;
  312. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  313. mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs,
  314. DISP_REG_OVL_RDMA_CTRL(idx));
  315. gmc_thrshd_l = GMC_THRESHOLD_LOW >>
  316. (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
  317. gmc_thrshd_h = GMC_THRESHOLD_HIGH >>
  318. (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
  319. if (ovl->data->gmc_bits == 10)
  320. gmc_value = gmc_thrshd_h | gmc_thrshd_h << 16;
  321. else
  322. gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
  323. gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
  324. mtk_ddp_write(cmdq_pkt, gmc_value,
  325. &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RDMA_GMC(idx));
  326. mtk_ddp_write_mask(cmdq_pkt, BIT(idx), &ovl->cmdq_reg, ovl->regs,
  327. DISP_REG_OVL_SRC_CON, BIT(idx));
  328. }
  329. void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
  330. struct cmdq_pkt *cmdq_pkt)
  331. {
  332. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  333. mtk_ddp_write_mask(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs,
  334. DISP_REG_OVL_SRC_CON, BIT(idx));
  335. mtk_ddp_write(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs,
  336. DISP_REG_OVL_RDMA_CTRL(idx));
  337. }
  338. static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl,
  339. struct mtk_plane_state *state)
  340. {
  341. unsigned int fmt = state->pending.format;
  342. unsigned int blend_mode = DRM_MODE_BLEND_COVERAGE;
  343. /*
  344. * For the platforms where OVL_CON_CLRFMT_MAN is defined in the hardware data sheet
  345. * and supports premultiplied color formats, such as OVL_CON_CLRFMT_PARGB8888.
  346. *
  347. * Check blend_modes in the driver data to see if premultiplied mode is supported.
  348. * If not, use coverage mode instead to set it to the supported color formats.
  349. *
  350. * Current DRM assumption is that alpha is default premultiplied, so the bitmask of
  351. * blend_modes must include BIT(DRM_MODE_BLEND_PREMULTI). Otherwise, mtk_plane_init()
  352. * will get an error return from drm_plane_create_blend_mode_property() and
  353. * state->base.pixel_blend_mode should not be used.
  354. */
  355. if (ovl->data->blend_modes & BIT(DRM_MODE_BLEND_PREMULTI))
  356. blend_mode = state->base.pixel_blend_mode;
  357. switch (fmt) {
  358. default:
  359. case DRM_FORMAT_RGB565:
  360. return OVL_CON_CLRFMT_RGB565(ovl);
  361. case DRM_FORMAT_BGR565:
  362. return OVL_CON_CLRFMT_RGB565(ovl) | OVL_CON_BYTE_SWAP;
  363. case DRM_FORMAT_RGB888:
  364. return OVL_CON_CLRFMT_RGB888(ovl);
  365. case DRM_FORMAT_BGR888:
  366. return OVL_CON_CLRFMT_RGB888(ovl) | OVL_CON_BYTE_SWAP;
  367. case DRM_FORMAT_RGBX8888:
  368. case DRM_FORMAT_RGBA8888:
  369. case DRM_FORMAT_RGBX1010102:
  370. case DRM_FORMAT_RGBA1010102:
  371. return blend_mode == DRM_MODE_BLEND_COVERAGE ?
  372. OVL_CON_CLRFMT_RGBA8888 :
  373. OVL_CON_CLRFMT_PRGBA8888;
  374. case DRM_FORMAT_BGRX8888:
  375. case DRM_FORMAT_BGRA8888:
  376. case DRM_FORMAT_BGRX1010102:
  377. case DRM_FORMAT_BGRA1010102:
  378. return blend_mode == DRM_MODE_BLEND_COVERAGE ?
  379. OVL_CON_CLRFMT_BGRA8888 :
  380. OVL_CON_CLRFMT_PBGRA8888;
  381. case DRM_FORMAT_XRGB8888:
  382. case DRM_FORMAT_ARGB8888:
  383. case DRM_FORMAT_XRGB2101010:
  384. case DRM_FORMAT_ARGB2101010:
  385. return blend_mode == DRM_MODE_BLEND_COVERAGE ?
  386. OVL_CON_CLRFMT_ARGB8888 :
  387. OVL_CON_CLRFMT_PARGB8888;
  388. case DRM_FORMAT_XBGR8888:
  389. case DRM_FORMAT_ABGR8888:
  390. case DRM_FORMAT_XBGR2101010:
  391. case DRM_FORMAT_ABGR2101010:
  392. return blend_mode == DRM_MODE_BLEND_COVERAGE ?
  393. OVL_CON_CLRFMT_ABGR8888 :
  394. OVL_CON_CLRFMT_PABGR8888;
  395. case DRM_FORMAT_UYVY:
  396. return OVL_CON_CLRFMT_UYVY | OVL_CON_MTX_YUV_TO_RGB;
  397. case DRM_FORMAT_YUYV:
  398. return OVL_CON_CLRFMT_YUYV | OVL_CON_MTX_YUV_TO_RGB;
  399. }
  400. }
  401. static void mtk_ovl_afbc_layer_config(struct mtk_disp_ovl *ovl,
  402. unsigned int idx,
  403. struct mtk_plane_pending_state *pending,
  404. struct cmdq_pkt *cmdq_pkt)
  405. {
  406. unsigned int pitch_msb = pending->pitch >> 16;
  407. unsigned int hdr_pitch = pending->hdr_pitch;
  408. unsigned int hdr_addr = pending->hdr_addr;
  409. if (pending->modifier != DRM_FORMAT_MOD_LINEAR) {
  410. mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
  411. DISP_REG_OVL_HDR_ADDR(ovl, idx));
  412. mtk_ddp_write_relaxed(cmdq_pkt,
  413. OVL_PITCH_MSB_2ND_SUBBUF | pitch_msb,
  414. &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
  415. mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
  416. DISP_REG_OVL_HDR_PITCH(ovl, idx));
  417. } else {
  418. mtk_ddp_write_relaxed(cmdq_pkt, pitch_msb,
  419. &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
  420. }
  421. }
  422. void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
  423. struct mtk_plane_state *state,
  424. struct cmdq_pkt *cmdq_pkt)
  425. {
  426. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  427. struct mtk_plane_pending_state *pending = &state->pending;
  428. unsigned int addr = pending->addr;
  429. unsigned int pitch_lsb = pending->pitch & GENMASK(15, 0);
  430. unsigned int fmt = pending->format;
  431. unsigned int rotation = pending->rotation;
  432. unsigned int offset = (pending->y << 16) | pending->x;
  433. unsigned int src_size = (pending->height << 16) | pending->width;
  434. unsigned int blend_mode = state->base.pixel_blend_mode;
  435. unsigned int ignore_pixel_alpha = 0;
  436. unsigned int con;
  437. if (!pending->enable) {
  438. mtk_ovl_layer_off(dev, idx, cmdq_pkt);
  439. return;
  440. }
  441. con = mtk_ovl_fmt_convert(ovl, state);
  442. if (state->base.fb) {
  443. con |= state->base.alpha & OVL_CON_ALPHA;
  444. /*
  445. * For blend_modes supported SoCs, always enable alpha blending.
  446. * For blend_modes unsupported SoCs, enable alpha blending when has_alpha is set.
  447. */
  448. if (blend_mode || state->base.fb->format->has_alpha)
  449. con |= OVL_CON_AEN;
  450. /*
  451. * Although the alpha channel can be ignored, CONST_BLD must be enabled
  452. * for XRGB format, otherwise OVL will still read the value from memory.
  453. * For RGB888 related formats, whether CONST_BLD is enabled or not won't
  454. * affect the result. Therefore we use !has_alpha as the condition.
  455. */
  456. if (blend_mode == DRM_MODE_BLEND_PIXEL_NONE || !state->base.fb->format->has_alpha)
  457. ignore_pixel_alpha = OVL_CONST_BLEND;
  458. }
  459. /*
  460. * Treat rotate 180 as flip x + flip y, and XOR the original rotation value
  461. * to flip x + flip y to support both in the same time.
  462. */
  463. if (rotation & DRM_MODE_ROTATE_180)
  464. rotation ^= DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
  465. if (rotation & DRM_MODE_REFLECT_Y) {
  466. con |= OVL_CON_VIRT_FLIP;
  467. addr += (pending->height - 1) * pending->pitch;
  468. }
  469. if (rotation & DRM_MODE_REFLECT_X) {
  470. con |= OVL_CON_HORZ_FLIP;
  471. addr += pending->pitch - 1;
  472. }
  473. if (ovl->data->supports_afbc)
  474. mtk_ovl_set_afbc(ovl, cmdq_pkt, idx,
  475. pending->modifier != DRM_FORMAT_MOD_LINEAR);
  476. mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs,
  477. DISP_REG_OVL_CON(idx));
  478. mtk_ddp_write_relaxed(cmdq_pkt, pitch_lsb | ignore_pixel_alpha,
  479. &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH(idx));
  480. mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs,
  481. DISP_REG_OVL_SRC_SIZE(idx));
  482. mtk_ddp_write_relaxed(cmdq_pkt, offset, &ovl->cmdq_reg, ovl->regs,
  483. DISP_REG_OVL_OFFSET(idx));
  484. mtk_ddp_write_relaxed(cmdq_pkt, addr, &ovl->cmdq_reg, ovl->regs,
  485. DISP_REG_OVL_ADDR(ovl, idx));
  486. if (ovl->data->supports_afbc)
  487. mtk_ovl_afbc_layer_config(ovl, idx, pending, cmdq_pkt);
  488. mtk_ovl_set_bit_depth(dev, idx, fmt, cmdq_pkt);
  489. mtk_ovl_layer_on(dev, idx, cmdq_pkt);
  490. }
  491. void mtk_ovl_bgclr_in_on(struct device *dev)
  492. {
  493. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  494. unsigned int reg;
  495. reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
  496. reg = reg | OVL_BGCLR_SEL_IN;
  497. writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
  498. }
  499. void mtk_ovl_bgclr_in_off(struct device *dev)
  500. {
  501. struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
  502. unsigned int reg;
  503. reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
  504. reg = reg & ~OVL_BGCLR_SEL_IN;
  505. writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
  506. }
  507. static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
  508. void *data)
  509. {
  510. return 0;
  511. }
  512. static void mtk_disp_ovl_unbind(struct device *dev, struct device *master,
  513. void *data)
  514. {
  515. }
  516. static const struct component_ops mtk_disp_ovl_component_ops = {
  517. .bind = mtk_disp_ovl_bind,
  518. .unbind = mtk_disp_ovl_unbind,
  519. };
  520. static int mtk_disp_ovl_probe(struct platform_device *pdev)
  521. {
  522. struct device *dev = &pdev->dev;
  523. struct mtk_disp_ovl *priv;
  524. struct resource *res;
  525. int irq;
  526. int ret;
  527. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  528. if (!priv)
  529. return -ENOMEM;
  530. irq = platform_get_irq(pdev, 0);
  531. if (irq < 0)
  532. return irq;
  533. priv->clk = devm_clk_get(dev, NULL);
  534. if (IS_ERR(priv->clk))
  535. return dev_err_probe(dev, PTR_ERR(priv->clk),
  536. "failed to get ovl clk\n");
  537. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  538. priv->regs = devm_ioremap_resource(dev, res);
  539. if (IS_ERR(priv->regs))
  540. return dev_err_probe(dev, PTR_ERR(priv->regs),
  541. "failed to ioremap ovl\n");
  542. #if IS_REACHABLE(CONFIG_MTK_CMDQ)
  543. ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
  544. if (ret)
  545. dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
  546. #endif
  547. priv->data = of_device_get_match_data(dev);
  548. platform_set_drvdata(pdev, priv);
  549. ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
  550. IRQF_TRIGGER_NONE, dev_name(dev), priv);
  551. if (ret < 0)
  552. return dev_err_probe(dev, ret, "Failed to request irq %d\n", irq);
  553. pm_runtime_enable(dev);
  554. ret = component_add(dev, &mtk_disp_ovl_component_ops);
  555. if (ret) {
  556. pm_runtime_disable(dev);
  557. return dev_err_probe(dev, ret, "Failed to add component\n");
  558. }
  559. return 0;
  560. }
  561. static void mtk_disp_ovl_remove(struct platform_device *pdev)
  562. {
  563. component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
  564. pm_runtime_disable(&pdev->dev);
  565. }
  566. static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
  567. .addr = DISP_REG_OVL_ADDR_MT2701,
  568. .gmc_bits = 8,
  569. .layer_nr = 4,
  570. .fmt_rgb565_is_0 = false,
  571. .formats = mt8173_formats,
  572. .num_formats = ARRAY_SIZE(mt8173_formats),
  573. };
  574. static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
  575. .addr = DISP_REG_OVL_ADDR_MT8173,
  576. .gmc_bits = 8,
  577. .layer_nr = 4,
  578. .fmt_rgb565_is_0 = true,
  579. .formats = mt8173_formats,
  580. .num_formats = ARRAY_SIZE(mt8173_formats),
  581. };
  582. static const struct mtk_disp_ovl_data mt8183_ovl_driver_data = {
  583. .addr = DISP_REG_OVL_ADDR_MT8173,
  584. .gmc_bits = 10,
  585. .layer_nr = 4,
  586. .fmt_rgb565_is_0 = true,
  587. .formats = mt8173_formats,
  588. .num_formats = ARRAY_SIZE(mt8173_formats),
  589. };
  590. static const struct mtk_disp_ovl_data mt8183_ovl_2l_driver_data = {
  591. .addr = DISP_REG_OVL_ADDR_MT8173,
  592. .gmc_bits = 10,
  593. .layer_nr = 2,
  594. .fmt_rgb565_is_0 = true,
  595. .formats = mt8173_formats,
  596. .num_formats = ARRAY_SIZE(mt8173_formats),
  597. };
  598. static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = {
  599. .addr = DISP_REG_OVL_ADDR_MT8173,
  600. .gmc_bits = 10,
  601. .layer_nr = 4,
  602. .fmt_rgb565_is_0 = true,
  603. .smi_id_en = true,
  604. .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
  605. BIT(DRM_MODE_BLEND_COVERAGE) |
  606. BIT(DRM_MODE_BLEND_PIXEL_NONE),
  607. .formats = mt8173_formats,
  608. .num_formats = ARRAY_SIZE(mt8173_formats),
  609. };
  610. static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = {
  611. .addr = DISP_REG_OVL_ADDR_MT8173,
  612. .gmc_bits = 10,
  613. .layer_nr = 2,
  614. .fmt_rgb565_is_0 = true,
  615. .smi_id_en = true,
  616. .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
  617. BIT(DRM_MODE_BLEND_COVERAGE) |
  618. BIT(DRM_MODE_BLEND_PIXEL_NONE),
  619. .formats = mt8173_formats,
  620. .num_formats = ARRAY_SIZE(mt8173_formats),
  621. };
  622. static const struct mtk_disp_ovl_data mt8195_ovl_driver_data = {
  623. .addr = DISP_REG_OVL_ADDR_MT8173,
  624. .gmc_bits = 10,
  625. .layer_nr = 4,
  626. .fmt_rgb565_is_0 = true,
  627. .smi_id_en = true,
  628. .supports_afbc = true,
  629. .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
  630. BIT(DRM_MODE_BLEND_COVERAGE) |
  631. BIT(DRM_MODE_BLEND_PIXEL_NONE),
  632. .formats = mt8195_formats,
  633. .num_formats = ARRAY_SIZE(mt8195_formats),
  634. .supports_clrfmt_ext = true,
  635. };
  636. static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
  637. { .compatible = "mediatek,mt2701-disp-ovl",
  638. .data = &mt2701_ovl_driver_data},
  639. { .compatible = "mediatek,mt8173-disp-ovl",
  640. .data = &mt8173_ovl_driver_data},
  641. { .compatible = "mediatek,mt8183-disp-ovl",
  642. .data = &mt8183_ovl_driver_data},
  643. { .compatible = "mediatek,mt8183-disp-ovl-2l",
  644. .data = &mt8183_ovl_2l_driver_data},
  645. { .compatible = "mediatek,mt8192-disp-ovl",
  646. .data = &mt8192_ovl_driver_data},
  647. { .compatible = "mediatek,mt8192-disp-ovl-2l",
  648. .data = &mt8192_ovl_2l_driver_data},
  649. { .compatible = "mediatek,mt8195-disp-ovl",
  650. .data = &mt8195_ovl_driver_data},
  651. {},
  652. };
  653. MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
  654. struct platform_driver mtk_disp_ovl_driver = {
  655. .probe = mtk_disp_ovl_probe,
  656. .remove_new = mtk_disp_ovl_remove,
  657. .driver = {
  658. .name = "mediatek-disp-ovl",
  659. .of_match_table = mtk_disp_ovl_driver_dt_match,
  660. },
  661. };