rockchip_drm_vop.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660
  1. /*
  2. * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  3. * Author:Mark Yao <mark.yao@rock-chips.com>
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <drm/drm.h>
  15. #include <drm/drmP.h>
  16. #include <drm/drm_atomic.h>
  17. #include <drm/drm_crtc.h>
  18. #include <drm/drm_crtc_helper.h>
  19. #include <drm/drm_flip_work.h>
  20. #include <drm/drm_plane_helper.h>
  21. #ifdef CONFIG_DRM_ANALOGIX_DP
  22. #include <drm/bridge/analogix_dp.h>
  23. #endif
  24. #include <linux/kernel.h>
  25. #include <linux/module.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/clk.h>
  28. #include <linux/iopoll.h>
  29. #include <linux/of.h>
  30. #include <linux/of_device.h>
  31. #include <linux/pm_runtime.h>
  32. #include <linux/component.h>
  33. #include <linux/reset.h>
  34. #include <linux/delay.h>
  35. #include "rockchip_drm_drv.h"
  36. #include "rockchip_drm_gem.h"
  37. #include "rockchip_drm_fb.h"
  38. #include "rockchip_drm_psr.h"
  39. #include "rockchip_drm_vop.h"
  40. #define VOP_WIN_SET(x, win, name, v) \
  41. vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name)
  42. #define VOP_SCL_SET(x, win, name, v) \
  43. vop_reg_set(vop, &win->phy->scl->name, win->base, ~0, v, #name)
  44. #define VOP_SCL_SET_EXT(x, win, name, v) \
  45. vop_reg_set(vop, &win->phy->scl->ext->name, \
  46. win->base, ~0, v, #name)
  47. #define VOP_INTR_SET_MASK(vop, name, mask, v) \
  48. vop_reg_set(vop, &vop->data->intr->name, 0, mask, v, #name)
  49. #define VOP_REG_SET(vop, group, name, v) \
  50. vop_reg_set(vop, &vop->data->group->name, 0, ~0, v, #name)
  51. #define VOP_INTR_SET_TYPE(vop, name, type, v) \
  52. do { \
  53. int i, reg = 0, mask = 0; \
  54. for (i = 0; i < vop->data->intr->nintrs; i++) { \
  55. if (vop->data->intr->intrs[i] & type) { \
  56. reg |= (v) << i; \
  57. mask |= 1 << i; \
  58. } \
  59. } \
  60. VOP_INTR_SET_MASK(vop, name, mask, reg); \
  61. } while (0)
  62. #define VOP_INTR_GET_TYPE(vop, name, type) \
  63. vop_get_intr_type(vop, &vop->data->intr->name, type)
  64. #define VOP_WIN_GET(x, win, name) \
  65. vop_read_reg(x, win->offset, win->phy->name)
  66. #define VOP_WIN_GET_YRGBADDR(vop, win) \
  67. vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
  68. #define VOP_WIN_TO_INDEX(vop_win) \
  69. ((vop_win) - (vop_win)->vop->win)
  70. #define to_vop(x) container_of(x, struct vop, crtc)
  71. #define to_vop_win(x) container_of(x, struct vop_win, base)
  72. enum vop_pending {
  73. VOP_PENDING_FB_UNREF,
  74. };
  75. struct vop_win {
  76. struct drm_plane base;
  77. const struct vop_win_data *data;
  78. struct vop *vop;
  79. };
  80. struct vop {
  81. struct drm_crtc crtc;
  82. struct device *dev;
  83. struct drm_device *drm_dev;
  84. bool is_enabled;
  85. struct completion dsp_hold_completion;
  86. /* protected by dev->event_lock */
  87. struct drm_pending_vblank_event *event;
  88. struct drm_flip_work fb_unref_work;
  89. unsigned long pending;
  90. struct completion line_flag_completion;
  91. const struct vop_data *data;
  92. uint32_t *regsbak;
  93. void __iomem *regs;
  94. /* physical map length of vop register */
  95. uint32_t len;
  96. /* one time only one process allowed to config the register */
  97. spinlock_t reg_lock;
  98. /* lock vop irq reg */
  99. spinlock_t irq_lock;
  100. /* protects crtc enable/disable */
  101. struct mutex vop_lock;
  102. unsigned int irq;
  103. /* vop AHP clk */
  104. struct clk *hclk;
  105. /* vop dclk */
  106. struct clk *dclk;
  107. /* vop share memory frequency */
  108. struct clk *aclk;
  109. /* vop dclk reset */
  110. struct reset_control *dclk_rst;
  111. struct vop_win win[];
  112. };
  113. static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
  114. {
  115. writel(v, vop->regs + offset);
  116. vop->regsbak[offset >> 2] = v;
  117. }
  118. static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
  119. {
  120. return readl(vop->regs + offset);
  121. }
  122. static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
  123. const struct vop_reg *reg)
  124. {
  125. return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
  126. }
  127. static void vop_reg_set(struct vop *vop, const struct vop_reg *reg,
  128. uint32_t _offset, uint32_t _mask, uint32_t v,
  129. const char *reg_name)
  130. {
  131. int offset, mask, shift;
  132. if (!reg || !reg->mask) {
  133. DRM_DEV_DEBUG(vop->dev, "Warning: not support %s\n", reg_name);
  134. return;
  135. }
  136. offset = reg->offset + _offset;
  137. mask = reg->mask & _mask;
  138. shift = reg->shift;
  139. if (reg->write_mask) {
  140. v = ((v << shift) & 0xffff) | (mask << (shift + 16));
  141. } else {
  142. uint32_t cached_val = vop->regsbak[offset >> 2];
  143. v = (cached_val & ~(mask << shift)) | ((v & mask) << shift);
  144. vop->regsbak[offset >> 2] = v;
  145. }
  146. if (reg->relaxed)
  147. writel_relaxed(v, vop->regs + offset);
  148. else
  149. writel(v, vop->regs + offset);
  150. }
  151. static inline uint32_t vop_get_intr_type(struct vop *vop,
  152. const struct vop_reg *reg, int type)
  153. {
  154. uint32_t i, ret = 0;
  155. uint32_t regs = vop_read_reg(vop, 0, reg);
  156. for (i = 0; i < vop->data->intr->nintrs; i++) {
  157. if ((type & vop->data->intr->intrs[i]) && (regs & 1 << i))
  158. ret |= vop->data->intr->intrs[i];
  159. }
  160. return ret;
  161. }
  162. static inline void vop_cfg_done(struct vop *vop)
  163. {
  164. VOP_REG_SET(vop, common, cfg_done, 1);
  165. }
  166. static bool has_rb_swapped(uint32_t format)
  167. {
  168. switch (format) {
  169. case DRM_FORMAT_XBGR8888:
  170. case DRM_FORMAT_ABGR8888:
  171. case DRM_FORMAT_BGR888:
  172. case DRM_FORMAT_BGR565:
  173. return true;
  174. default:
  175. return false;
  176. }
  177. }
  178. static enum vop_data_format vop_convert_format(uint32_t format)
  179. {
  180. switch (format) {
  181. case DRM_FORMAT_XRGB8888:
  182. case DRM_FORMAT_ARGB8888:
  183. case DRM_FORMAT_XBGR8888:
  184. case DRM_FORMAT_ABGR8888:
  185. return VOP_FMT_ARGB8888;
  186. case DRM_FORMAT_RGB888:
  187. case DRM_FORMAT_BGR888:
  188. return VOP_FMT_RGB888;
  189. case DRM_FORMAT_RGB565:
  190. case DRM_FORMAT_BGR565:
  191. return VOP_FMT_RGB565;
  192. case DRM_FORMAT_NV12:
  193. return VOP_FMT_YUV420SP;
  194. case DRM_FORMAT_NV16:
  195. return VOP_FMT_YUV422SP;
  196. case DRM_FORMAT_NV24:
  197. return VOP_FMT_YUV444SP;
  198. default:
  199. DRM_ERROR("unsupported format[%08x]\n", format);
  200. return -EINVAL;
  201. }
  202. }
  203. static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
  204. uint32_t dst, bool is_horizontal,
  205. int vsu_mode, int *vskiplines)
  206. {
  207. uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
  208. if (vskiplines)
  209. *vskiplines = 0;
  210. if (is_horizontal) {
  211. if (mode == SCALE_UP)
  212. val = GET_SCL_FT_BIC(src, dst);
  213. else if (mode == SCALE_DOWN)
  214. val = GET_SCL_FT_BILI_DN(src, dst);
  215. } else {
  216. if (mode == SCALE_UP) {
  217. if (vsu_mode == SCALE_UP_BIL)
  218. val = GET_SCL_FT_BILI_UP(src, dst);
  219. else
  220. val = GET_SCL_FT_BIC(src, dst);
  221. } else if (mode == SCALE_DOWN) {
  222. if (vskiplines) {
  223. *vskiplines = scl_get_vskiplines(src, dst);
  224. val = scl_get_bili_dn_vskip(src, dst,
  225. *vskiplines);
  226. } else {
  227. val = GET_SCL_FT_BILI_DN(src, dst);
  228. }
  229. }
  230. }
  231. return val;
  232. }
  233. static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
  234. uint32_t src_w, uint32_t src_h, uint32_t dst_w,
  235. uint32_t dst_h, uint32_t pixel_format)
  236. {
  237. uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
  238. uint16_t cbcr_hor_scl_mode = SCALE_NONE;
  239. uint16_t cbcr_ver_scl_mode = SCALE_NONE;
  240. int hsub = drm_format_horz_chroma_subsampling(pixel_format);
  241. int vsub = drm_format_vert_chroma_subsampling(pixel_format);
  242. const struct drm_format_info *info;
  243. bool is_yuv = false;
  244. uint16_t cbcr_src_w = src_w / hsub;
  245. uint16_t cbcr_src_h = src_h / vsub;
  246. uint16_t vsu_mode;
  247. uint16_t lb_mode;
  248. uint32_t val;
  249. int vskiplines;
  250. info = drm_format_info(pixel_format);
  251. if (info->is_yuv)
  252. is_yuv = true;
  253. if (dst_w > 3840) {
  254. DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
  255. return;
  256. }
  257. if (!win->phy->scl->ext) {
  258. VOP_SCL_SET(vop, win, scale_yrgb_x,
  259. scl_cal_scale2(src_w, dst_w));
  260. VOP_SCL_SET(vop, win, scale_yrgb_y,
  261. scl_cal_scale2(src_h, dst_h));
  262. if (is_yuv) {
  263. VOP_SCL_SET(vop, win, scale_cbcr_x,
  264. scl_cal_scale2(cbcr_src_w, dst_w));
  265. VOP_SCL_SET(vop, win, scale_cbcr_y,
  266. scl_cal_scale2(cbcr_src_h, dst_h));
  267. }
  268. return;
  269. }
  270. yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
  271. yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
  272. if (is_yuv) {
  273. cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
  274. cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
  275. if (cbcr_hor_scl_mode == SCALE_DOWN)
  276. lb_mode = scl_vop_cal_lb_mode(dst_w, true);
  277. else
  278. lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
  279. } else {
  280. if (yrgb_hor_scl_mode == SCALE_DOWN)
  281. lb_mode = scl_vop_cal_lb_mode(dst_w, false);
  282. else
  283. lb_mode = scl_vop_cal_lb_mode(src_w, false);
  284. }
  285. VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
  286. if (lb_mode == LB_RGB_3840X2) {
  287. if (yrgb_ver_scl_mode != SCALE_NONE) {
  288. DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n");
  289. return;
  290. }
  291. if (cbcr_ver_scl_mode != SCALE_NONE) {
  292. DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n");
  293. return;
  294. }
  295. vsu_mode = SCALE_UP_BIL;
  296. } else if (lb_mode == LB_RGB_2560X4) {
  297. vsu_mode = SCALE_UP_BIL;
  298. } else {
  299. vsu_mode = SCALE_UP_BIC;
  300. }
  301. val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
  302. true, 0, NULL);
  303. VOP_SCL_SET(vop, win, scale_yrgb_x, val);
  304. val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
  305. false, vsu_mode, &vskiplines);
  306. VOP_SCL_SET(vop, win, scale_yrgb_y, val);
  307. VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt4, vskiplines == 4);
  308. VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt2, vskiplines == 2);
  309. VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
  310. VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
  311. VOP_SCL_SET_EXT(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
  312. VOP_SCL_SET_EXT(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
  313. VOP_SCL_SET_EXT(vop, win, yrgb_vsu_mode, vsu_mode);
  314. if (is_yuv) {
  315. val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
  316. dst_w, true, 0, NULL);
  317. VOP_SCL_SET(vop, win, scale_cbcr_x, val);
  318. val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
  319. dst_h, false, vsu_mode, &vskiplines);
  320. VOP_SCL_SET(vop, win, scale_cbcr_y, val);
  321. VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt4, vskiplines == 4);
  322. VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt2, vskiplines == 2);
  323. VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
  324. VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
  325. VOP_SCL_SET_EXT(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
  326. VOP_SCL_SET_EXT(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
  327. VOP_SCL_SET_EXT(vop, win, cbcr_vsu_mode, vsu_mode);
  328. }
  329. }
  330. static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
  331. {
  332. unsigned long flags;
  333. if (WARN_ON(!vop->is_enabled))
  334. return;
  335. spin_lock_irqsave(&vop->irq_lock, flags);
  336. VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1);
  337. VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
  338. spin_unlock_irqrestore(&vop->irq_lock, flags);
  339. }
  340. static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
  341. {
  342. unsigned long flags;
  343. if (WARN_ON(!vop->is_enabled))
  344. return;
  345. spin_lock_irqsave(&vop->irq_lock, flags);
  346. VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0);
  347. spin_unlock_irqrestore(&vop->irq_lock, flags);
  348. }
  349. /*
  350. * (1) each frame starts at the start of the Vsync pulse which is signaled by
  351. * the "FRAME_SYNC" interrupt.
  352. * (2) the active data region of each frame ends at dsp_vact_end
  353. * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
  354. * to get "LINE_FLAG" interrupt at the end of the active on screen data.
  355. *
  356. * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
  357. * Interrupts
  358. * LINE_FLAG -------------------------------+
  359. * FRAME_SYNC ----+ |
  360. * | |
  361. * v v
  362. * | Vsync | Vbp | Vactive | Vfp |
  363. * ^ ^ ^ ^
  364. * | | | |
  365. * | | | |
  366. * dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END
  367. * dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END
  368. * dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END
  369. * dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END
  370. */
  371. static bool vop_line_flag_irq_is_enabled(struct vop *vop)
  372. {
  373. uint32_t line_flag_irq;
  374. unsigned long flags;
  375. spin_lock_irqsave(&vop->irq_lock, flags);
  376. line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR);
  377. spin_unlock_irqrestore(&vop->irq_lock, flags);
  378. return !!line_flag_irq;
  379. }
  380. static void vop_line_flag_irq_enable(struct vop *vop)
  381. {
  382. unsigned long flags;
  383. if (WARN_ON(!vop->is_enabled))
  384. return;
  385. spin_lock_irqsave(&vop->irq_lock, flags);
  386. VOP_INTR_SET_TYPE(vop, clear, LINE_FLAG_INTR, 1);
  387. VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1);
  388. spin_unlock_irqrestore(&vop->irq_lock, flags);
  389. }
  390. static void vop_line_flag_irq_disable(struct vop *vop)
  391. {
  392. unsigned long flags;
  393. if (WARN_ON(!vop->is_enabled))
  394. return;
  395. spin_lock_irqsave(&vop->irq_lock, flags);
  396. VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0);
  397. spin_unlock_irqrestore(&vop->irq_lock, flags);
  398. }
  399. static int vop_core_clks_enable(struct vop *vop)
  400. {
  401. int ret;
  402. ret = clk_enable(vop->hclk);
  403. if (ret < 0)
  404. return ret;
  405. ret = clk_enable(vop->aclk);
  406. if (ret < 0)
  407. goto err_disable_hclk;
  408. return 0;
  409. err_disable_hclk:
  410. clk_disable(vop->hclk);
  411. return ret;
  412. }
  413. static void vop_core_clks_disable(struct vop *vop)
  414. {
  415. clk_disable(vop->aclk);
  416. clk_disable(vop->hclk);
  417. }
  418. static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
  419. {
  420. if (win->phy->scl && win->phy->scl->ext) {
  421. VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
  422. VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
  423. VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
  424. VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
  425. }
  426. VOP_WIN_SET(vop, win, enable, 0);
  427. }
  428. static int vop_enable(struct drm_crtc *crtc)
  429. {
  430. struct vop *vop = to_vop(crtc);
  431. int ret, i;
  432. ret = pm_runtime_get_sync(vop->dev);
  433. if (ret < 0) {
  434. DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
  435. return ret;
  436. }
  437. ret = vop_core_clks_enable(vop);
  438. if (WARN_ON(ret < 0))
  439. goto err_put_pm_runtime;
  440. ret = clk_enable(vop->dclk);
  441. if (WARN_ON(ret < 0))
  442. goto err_disable_core;
  443. /*
  444. * Slave iommu shares power, irq and clock with vop. It was associated
  445. * automatically with this master device via common driver code.
  446. * Now that we have enabled the clock we attach it to the shared drm
  447. * mapping.
  448. */
  449. ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
  450. if (ret) {
  451. DRM_DEV_ERROR(vop->dev,
  452. "failed to attach dma mapping, %d\n", ret);
  453. goto err_disable_dclk;
  454. }
  455. spin_lock(&vop->reg_lock);
  456. for (i = 0; i < vop->len; i += 4)
  457. writel_relaxed(vop->regsbak[i / 4], vop->regs + i);
  458. /*
  459. * We need to make sure that all windows are disabled before we
  460. * enable the crtc. Otherwise we might try to scan from a destroyed
  461. * buffer later.
  462. */
  463. for (i = 0; i < vop->data->win_size; i++) {
  464. struct vop_win *vop_win = &vop->win[i];
  465. const struct vop_win_data *win = vop_win->data;
  466. vop_win_disable(vop, win);
  467. }
  468. spin_unlock(&vop->reg_lock);
  469. vop_cfg_done(vop);
  470. /*
  471. * At here, vop clock & iommu is enable, R/W vop regs would be safe.
  472. */
  473. vop->is_enabled = true;
  474. spin_lock(&vop->reg_lock);
  475. VOP_REG_SET(vop, common, standby, 1);
  476. spin_unlock(&vop->reg_lock);
  477. drm_crtc_vblank_on(crtc);
  478. return 0;
  479. err_disable_dclk:
  480. clk_disable(vop->dclk);
  481. err_disable_core:
  482. vop_core_clks_disable(vop);
  483. err_put_pm_runtime:
  484. pm_runtime_put_sync(vop->dev);
  485. return ret;
  486. }
  487. static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
  488. struct drm_crtc_state *old_state)
  489. {
  490. struct vop *vop = to_vop(crtc);
  491. WARN_ON(vop->event);
  492. mutex_lock(&vop->vop_lock);
  493. drm_crtc_vblank_off(crtc);
  494. /*
  495. * Vop standby will take effect at end of current frame,
  496. * if dsp hold valid irq happen, it means standby complete.
  497. *
  498. * we must wait standby complete when we want to disable aclk,
  499. * if not, memory bus maybe dead.
  500. */
  501. reinit_completion(&vop->dsp_hold_completion);
  502. vop_dsp_hold_valid_irq_enable(vop);
  503. spin_lock(&vop->reg_lock);
  504. VOP_REG_SET(vop, common, standby, 1);
  505. spin_unlock(&vop->reg_lock);
  506. wait_for_completion(&vop->dsp_hold_completion);
  507. vop_dsp_hold_valid_irq_disable(vop);
  508. vop->is_enabled = false;
  509. /*
  510. * vop standby complete, so iommu detach is safe.
  511. */
  512. rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
  513. clk_disable(vop->dclk);
  514. vop_core_clks_disable(vop);
  515. pm_runtime_put(vop->dev);
  516. mutex_unlock(&vop->vop_lock);
  517. if (crtc->state->event && !crtc->state->active) {
  518. spin_lock_irq(&crtc->dev->event_lock);
  519. drm_crtc_send_vblank_event(crtc, crtc->state->event);
  520. spin_unlock_irq(&crtc->dev->event_lock);
  521. crtc->state->event = NULL;
  522. }
  523. }
  524. static void vop_plane_destroy(struct drm_plane *plane)
  525. {
  526. drm_plane_cleanup(plane);
  527. }
  528. static int vop_plane_atomic_check(struct drm_plane *plane,
  529. struct drm_plane_state *state)
  530. {
  531. struct drm_crtc *crtc = state->crtc;
  532. struct drm_crtc_state *crtc_state;
  533. struct drm_framebuffer *fb = state->fb;
  534. struct vop_win *vop_win = to_vop_win(plane);
  535. const struct vop_win_data *win = vop_win->data;
  536. int ret;
  537. int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
  538. DRM_PLANE_HELPER_NO_SCALING;
  539. int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
  540. DRM_PLANE_HELPER_NO_SCALING;
  541. if (!crtc || !fb)
  542. return 0;
  543. crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
  544. if (WARN_ON(!crtc_state))
  545. return -EINVAL;
  546. ret = drm_atomic_helper_check_plane_state(state, crtc_state,
  547. min_scale, max_scale,
  548. true, true);
  549. if (ret)
  550. return ret;
  551. if (!state->visible)
  552. return 0;
  553. ret = vop_convert_format(fb->format->format);
  554. if (ret < 0)
  555. return ret;
  556. /*
  557. * Src.x1 can be odd when do clip, but yuv plane start point
  558. * need align with 2 pixel.
  559. */
  560. if (fb->format->is_yuv && ((state->src.x1 >> 16) % 2)) {
  561. DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
  562. return -EINVAL;
  563. }
  564. return 0;
  565. }
  566. static void vop_plane_atomic_disable(struct drm_plane *plane,
  567. struct drm_plane_state *old_state)
  568. {
  569. struct vop_win *vop_win = to_vop_win(plane);
  570. const struct vop_win_data *win = vop_win->data;
  571. struct vop *vop = to_vop(old_state->crtc);
  572. if (!old_state->crtc)
  573. return;
  574. spin_lock(&vop->reg_lock);
  575. vop_win_disable(vop, win);
  576. spin_unlock(&vop->reg_lock);
  577. }
  578. static void vop_plane_atomic_update(struct drm_plane *plane,
  579. struct drm_plane_state *old_state)
  580. {
  581. struct drm_plane_state *state = plane->state;
  582. struct drm_crtc *crtc = state->crtc;
  583. struct vop_win *vop_win = to_vop_win(plane);
  584. const struct vop_win_data *win = vop_win->data;
  585. struct vop *vop = to_vop(state->crtc);
  586. struct drm_framebuffer *fb = state->fb;
  587. unsigned int actual_w, actual_h;
  588. unsigned int dsp_stx, dsp_sty;
  589. uint32_t act_info, dsp_info, dsp_st;
  590. struct drm_rect *src = &state->src;
  591. struct drm_rect *dest = &state->dst;
  592. struct drm_gem_object *obj, *uv_obj;
  593. struct rockchip_gem_object *rk_obj, *rk_uv_obj;
  594. unsigned long offset;
  595. dma_addr_t dma_addr;
  596. uint32_t val;
  597. bool rb_swap;
  598. int win_index = VOP_WIN_TO_INDEX(vop_win);
  599. int format;
  600. /*
  601. * can't update plane when vop is disabled.
  602. */
  603. if (WARN_ON(!crtc))
  604. return;
  605. if (WARN_ON(!vop->is_enabled))
  606. return;
  607. if (!state->visible) {
  608. vop_plane_atomic_disable(plane, old_state);
  609. return;
  610. }
  611. obj = fb->obj[0];
  612. rk_obj = to_rockchip_obj(obj);
  613. actual_w = drm_rect_width(src) >> 16;
  614. actual_h = drm_rect_height(src) >> 16;
  615. act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
  616. dsp_info = (drm_rect_height(dest) - 1) << 16;
  617. dsp_info |= (drm_rect_width(dest) - 1) & 0xffff;
  618. dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start;
  619. dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
  620. dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff);
  621. offset = (src->x1 >> 16) * fb->format->cpp[0];
  622. offset += (src->y1 >> 16) * fb->pitches[0];
  623. dma_addr = rk_obj->dma_addr + offset + fb->offsets[0];
  624. format = vop_convert_format(fb->format->format);
  625. spin_lock(&vop->reg_lock);
  626. VOP_WIN_SET(vop, win, format, format);
  627. VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
  628. VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
  629. if (fb->format->is_yuv) {
  630. int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
  631. int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
  632. int bpp = fb->format->cpp[1];
  633. uv_obj = fb->obj[1];
  634. rk_uv_obj = to_rockchip_obj(uv_obj);
  635. offset = (src->x1 >> 16) * bpp / hsub;
  636. offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
  637. dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
  638. VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
  639. VOP_WIN_SET(vop, win, uv_mst, dma_addr);
  640. }
  641. if (win->phy->scl)
  642. scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
  643. drm_rect_width(dest), drm_rect_height(dest),
  644. fb->format->format);
  645. VOP_WIN_SET(vop, win, act_info, act_info);
  646. VOP_WIN_SET(vop, win, dsp_info, dsp_info);
  647. VOP_WIN_SET(vop, win, dsp_st, dsp_st);
  648. rb_swap = has_rb_swapped(fb->format->format);
  649. VOP_WIN_SET(vop, win, rb_swap, rb_swap);
  650. /*
  651. * Blending win0 with the background color doesn't seem to work
  652. * correctly. We only get the background color, no matter the contents
  653. * of the win0 framebuffer. However, blending pre-multiplied color
  654. * with the default opaque black default background color is a no-op,
  655. * so we can just disable blending to get the correct result.
  656. */
  657. if (fb->format->has_alpha && win_index > 0) {
  658. VOP_WIN_SET(vop, win, dst_alpha_ctl,
  659. DST_FACTOR_M0(ALPHA_SRC_INVERSE));
  660. val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
  661. SRC_ALPHA_M0(ALPHA_STRAIGHT) |
  662. SRC_BLEND_M0(ALPHA_PER_PIX) |
  663. SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
  664. SRC_FACTOR_M0(ALPHA_ONE);
  665. VOP_WIN_SET(vop, win, src_alpha_ctl, val);
  666. } else {
  667. VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
  668. }
  669. VOP_WIN_SET(vop, win, enable, 1);
  670. spin_unlock(&vop->reg_lock);
  671. }
  672. static const struct drm_plane_helper_funcs plane_helper_funcs = {
  673. .atomic_check = vop_plane_atomic_check,
  674. .atomic_update = vop_plane_atomic_update,
  675. .atomic_disable = vop_plane_atomic_disable,
  676. };
  677. static const struct drm_plane_funcs vop_plane_funcs = {
  678. .update_plane = drm_atomic_helper_update_plane,
  679. .disable_plane = drm_atomic_helper_disable_plane,
  680. .destroy = vop_plane_destroy,
  681. .reset = drm_atomic_helper_plane_reset,
  682. .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
  683. .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
  684. };
  685. static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
  686. {
  687. struct vop *vop = to_vop(crtc);
  688. unsigned long flags;
  689. if (WARN_ON(!vop->is_enabled))
  690. return -EPERM;
  691. spin_lock_irqsave(&vop->irq_lock, flags);
  692. VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1);
  693. VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
  694. spin_unlock_irqrestore(&vop->irq_lock, flags);
  695. return 0;
  696. }
  697. static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
  698. {
  699. struct vop *vop = to_vop(crtc);
  700. unsigned long flags;
  701. if (WARN_ON(!vop->is_enabled))
  702. return;
  703. spin_lock_irqsave(&vop->irq_lock, flags);
  704. VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0);
  705. spin_unlock_irqrestore(&vop->irq_lock, flags);
  706. }
  707. static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
  708. const struct drm_display_mode *mode,
  709. struct drm_display_mode *adjusted_mode)
  710. {
  711. struct vop *vop = to_vop(crtc);
  712. adjusted_mode->clock =
  713. DIV_ROUND_UP(clk_round_rate(vop->dclk, mode->clock * 1000),
  714. 1000);
  715. return true;
  716. }
  717. static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
  718. struct drm_crtc_state *old_state)
  719. {
  720. struct vop *vop = to_vop(crtc);
  721. const struct vop_data *vop_data = vop->data;
  722. struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
  723. struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
  724. u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
  725. u16 hdisplay = adjusted_mode->hdisplay;
  726. u16 htotal = adjusted_mode->htotal;
  727. u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
  728. u16 hact_end = hact_st + hdisplay;
  729. u16 vdisplay = adjusted_mode->vdisplay;
  730. u16 vtotal = adjusted_mode->vtotal;
  731. u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
  732. u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
  733. u16 vact_end = vact_st + vdisplay;
  734. uint32_t pin_pol, val;
  735. int ret;
  736. mutex_lock(&vop->vop_lock);
  737. WARN_ON(vop->event);
  738. ret = vop_enable(crtc);
  739. if (ret) {
  740. mutex_unlock(&vop->vop_lock);
  741. DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
  742. return;
  743. }
  744. pin_pol = BIT(DCLK_INVERT);
  745. pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ?
  746. BIT(HSYNC_POSITIVE) : 0;
  747. pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ?
  748. BIT(VSYNC_POSITIVE) : 0;
  749. VOP_REG_SET(vop, output, pin_pol, pin_pol);
  750. switch (s->output_type) {
  751. case DRM_MODE_CONNECTOR_LVDS:
  752. VOP_REG_SET(vop, output, rgb_en, 1);
  753. VOP_REG_SET(vop, output, rgb_pin_pol, pin_pol);
  754. break;
  755. case DRM_MODE_CONNECTOR_eDP:
  756. VOP_REG_SET(vop, output, edp_pin_pol, pin_pol);
  757. VOP_REG_SET(vop, output, edp_en, 1);
  758. break;
  759. case DRM_MODE_CONNECTOR_HDMIA:
  760. VOP_REG_SET(vop, output, hdmi_pin_pol, pin_pol);
  761. VOP_REG_SET(vop, output, hdmi_en, 1);
  762. break;
  763. case DRM_MODE_CONNECTOR_DSI:
  764. VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol);
  765. VOP_REG_SET(vop, output, mipi_en, 1);
  766. break;
  767. case DRM_MODE_CONNECTOR_DisplayPort:
  768. pin_pol &= ~BIT(DCLK_INVERT);
  769. VOP_REG_SET(vop, output, dp_pin_pol, pin_pol);
  770. VOP_REG_SET(vop, output, dp_en, 1);
  771. break;
  772. default:
  773. DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
  774. s->output_type);
  775. }
  776. /*
  777. * if vop is not support RGB10 output, need force RGB10 to RGB888.
  778. */
  779. if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
  780. !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
  781. s->output_mode = ROCKCHIP_OUT_MODE_P888;
  782. if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && s->output_bpc == 8)
  783. VOP_REG_SET(vop, common, pre_dither_down, 1);
  784. else
  785. VOP_REG_SET(vop, common, pre_dither_down, 0);
  786. VOP_REG_SET(vop, common, out_mode, s->output_mode);
  787. VOP_REG_SET(vop, modeset, htotal_pw, (htotal << 16) | hsync_len);
  788. val = hact_st << 16;
  789. val |= hact_end;
  790. VOP_REG_SET(vop, modeset, hact_st_end, val);
  791. VOP_REG_SET(vop, modeset, hpost_st_end, val);
  792. VOP_REG_SET(vop, modeset, vtotal_pw, (vtotal << 16) | vsync_len);
  793. val = vact_st << 16;
  794. val |= vact_end;
  795. VOP_REG_SET(vop, modeset, vact_st_end, val);
  796. VOP_REG_SET(vop, modeset, vpost_st_end, val);
  797. VOP_REG_SET(vop, intr, line_flag_num[0], vact_end);
  798. clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
  799. VOP_REG_SET(vop, common, standby, 0);
  800. mutex_unlock(&vop->vop_lock);
  801. }
  802. static bool vop_fs_irq_is_pending(struct vop *vop)
  803. {
  804. return VOP_INTR_GET_TYPE(vop, status, FS_INTR);
  805. }
  806. static void vop_wait_for_irq_handler(struct vop *vop)
  807. {
  808. bool pending;
  809. int ret;
  810. /*
  811. * Spin until frame start interrupt status bit goes low, which means
  812. * that interrupt handler was invoked and cleared it. The timeout of
  813. * 10 msecs is really too long, but it is just a safety measure if
  814. * something goes really wrong. The wait will only happen in the very
  815. * unlikely case of a vblank happening exactly at the same time and
  816. * shouldn't exceed microseconds range.
  817. */
  818. ret = readx_poll_timeout_atomic(vop_fs_irq_is_pending, vop, pending,
  819. !pending, 0, 10 * 1000);
  820. if (ret)
  821. DRM_DEV_ERROR(vop->dev, "VOP vblank IRQ stuck for 10 ms\n");
  822. synchronize_irq(vop->irq);
  823. }
  824. static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
  825. struct drm_crtc_state *old_crtc_state)
  826. {
  827. struct drm_atomic_state *old_state = old_crtc_state->state;
  828. struct drm_plane_state *old_plane_state, *new_plane_state;
  829. struct vop *vop = to_vop(crtc);
  830. struct drm_plane *plane;
  831. int i;
  832. if (WARN_ON(!vop->is_enabled))
  833. return;
  834. spin_lock(&vop->reg_lock);
  835. vop_cfg_done(vop);
  836. spin_unlock(&vop->reg_lock);
  837. /*
  838. * There is a (rather unlikely) possiblity that a vblank interrupt
  839. * fired before we set the cfg_done bit. To avoid spuriously
  840. * signalling flip completion we need to wait for it to finish.
  841. */
  842. vop_wait_for_irq_handler(vop);
  843. spin_lock_irq(&crtc->dev->event_lock);
  844. if (crtc->state->event) {
  845. WARN_ON(drm_crtc_vblank_get(crtc) != 0);
  846. WARN_ON(vop->event);
  847. vop->event = crtc->state->event;
  848. crtc->state->event = NULL;
  849. }
  850. spin_unlock_irq(&crtc->dev->event_lock);
  851. for_each_oldnew_plane_in_state(old_state, plane, old_plane_state,
  852. new_plane_state, i) {
  853. if (!old_plane_state->fb)
  854. continue;
  855. if (old_plane_state->fb == new_plane_state->fb)
  856. continue;
  857. drm_framebuffer_get(old_plane_state->fb);
  858. WARN_ON(drm_crtc_vblank_get(crtc) != 0);
  859. drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
  860. set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
  861. }
  862. }
  863. static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
  864. .mode_fixup = vop_crtc_mode_fixup,
  865. .atomic_flush = vop_crtc_atomic_flush,
  866. .atomic_enable = vop_crtc_atomic_enable,
  867. .atomic_disable = vop_crtc_atomic_disable,
  868. };
  869. static void vop_crtc_destroy(struct drm_crtc *crtc)
  870. {
  871. drm_crtc_cleanup(crtc);
  872. }
  873. static void vop_crtc_reset(struct drm_crtc *crtc)
  874. {
  875. if (crtc->state)
  876. __drm_atomic_helper_crtc_destroy_state(crtc->state);
  877. kfree(crtc->state);
  878. crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
  879. if (crtc->state)
  880. crtc->state->crtc = crtc;
  881. }
  882. static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
  883. {
  884. struct rockchip_crtc_state *rockchip_state;
  885. rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
  886. if (!rockchip_state)
  887. return NULL;
  888. __drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base);
  889. return &rockchip_state->base;
  890. }
  891. static void vop_crtc_destroy_state(struct drm_crtc *crtc,
  892. struct drm_crtc_state *state)
  893. {
  894. struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
  895. __drm_atomic_helper_crtc_destroy_state(&s->base);
  896. kfree(s);
  897. }
  898. #ifdef CONFIG_DRM_ANALOGIX_DP
  899. static struct drm_connector *vop_get_edp_connector(struct vop *vop)
  900. {
  901. struct drm_connector *connector;
  902. struct drm_connector_list_iter conn_iter;
  903. drm_connector_list_iter_begin(vop->drm_dev, &conn_iter);
  904. drm_for_each_connector_iter(connector, &conn_iter) {
  905. if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
  906. drm_connector_list_iter_end(&conn_iter);
  907. return connector;
  908. }
  909. }
  910. drm_connector_list_iter_end(&conn_iter);
  911. return NULL;
  912. }
  913. static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
  914. const char *source_name, size_t *values_cnt)
  915. {
  916. struct vop *vop = to_vop(crtc);
  917. struct drm_connector *connector;
  918. int ret;
  919. connector = vop_get_edp_connector(vop);
  920. if (!connector)
  921. return -EINVAL;
  922. *values_cnt = 3;
  923. if (source_name && strcmp(source_name, "auto") == 0)
  924. ret = analogix_dp_start_crc(connector);
  925. else if (!source_name)
  926. ret = analogix_dp_stop_crc(connector);
  927. else
  928. ret = -EINVAL;
  929. return ret;
  930. }
  931. #else
  932. static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
  933. const char *source_name, size_t *values_cnt)
  934. {
  935. return -ENODEV;
  936. }
  937. #endif
  938. static const struct drm_crtc_funcs vop_crtc_funcs = {
  939. .set_config = drm_atomic_helper_set_config,
  940. .page_flip = drm_atomic_helper_page_flip,
  941. .destroy = vop_crtc_destroy,
  942. .reset = vop_crtc_reset,
  943. .atomic_duplicate_state = vop_crtc_duplicate_state,
  944. .atomic_destroy_state = vop_crtc_destroy_state,
  945. .enable_vblank = vop_crtc_enable_vblank,
  946. .disable_vblank = vop_crtc_disable_vblank,
  947. .set_crc_source = vop_crtc_set_crc_source,
  948. };
  949. static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
  950. {
  951. struct vop *vop = container_of(work, struct vop, fb_unref_work);
  952. struct drm_framebuffer *fb = val;
  953. drm_crtc_vblank_put(&vop->crtc);
  954. drm_framebuffer_put(fb);
  955. }
  956. static void vop_handle_vblank(struct vop *vop)
  957. {
  958. struct drm_device *drm = vop->drm_dev;
  959. struct drm_crtc *crtc = &vop->crtc;
  960. spin_lock(&drm->event_lock);
  961. if (vop->event) {
  962. drm_crtc_send_vblank_event(crtc, vop->event);
  963. drm_crtc_vblank_put(crtc);
  964. vop->event = NULL;
  965. }
  966. spin_unlock(&drm->event_lock);
  967. if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending))
  968. drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq);
  969. }
  970. static irqreturn_t vop_isr(int irq, void *data)
  971. {
  972. struct vop *vop = data;
  973. struct drm_crtc *crtc = &vop->crtc;
  974. uint32_t active_irqs;
  975. int ret = IRQ_NONE;
  976. /*
  977. * The irq is shared with the iommu. If the runtime-pm state of the
  978. * vop-device is disabled the irq has to be targeted at the iommu.
  979. */
  980. if (!pm_runtime_get_if_in_use(vop->dev))
  981. return IRQ_NONE;
  982. if (vop_core_clks_enable(vop)) {
  983. DRM_DEV_ERROR_RATELIMITED(vop->dev, "couldn't enable clocks\n");
  984. goto out;
  985. }
  986. /*
  987. * interrupt register has interrupt status, enable and clear bits, we
  988. * must hold irq_lock to avoid a race with enable/disable_vblank().
  989. */
  990. spin_lock(&vop->irq_lock);
  991. active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK);
  992. /* Clear all active interrupt sources */
  993. if (active_irqs)
  994. VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1);
  995. spin_unlock(&vop->irq_lock);
  996. /* This is expected for vop iommu irqs, since the irq is shared */
  997. if (!active_irqs)
  998. goto out_disable;
  999. if (active_irqs & DSP_HOLD_VALID_INTR) {
  1000. complete(&vop->dsp_hold_completion);
  1001. active_irqs &= ~DSP_HOLD_VALID_INTR;
  1002. ret = IRQ_HANDLED;
  1003. }
  1004. if (active_irqs & LINE_FLAG_INTR) {
  1005. complete(&vop->line_flag_completion);
  1006. active_irqs &= ~LINE_FLAG_INTR;
  1007. ret = IRQ_HANDLED;
  1008. }
  1009. if (active_irqs & FS_INTR) {
  1010. drm_crtc_handle_vblank(crtc);
  1011. vop_handle_vblank(vop);
  1012. active_irqs &= ~FS_INTR;
  1013. ret = IRQ_HANDLED;
  1014. }
  1015. /* Unhandled irqs are spurious. */
  1016. if (active_irqs)
  1017. DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
  1018. active_irqs);
  1019. out_disable:
  1020. vop_core_clks_disable(vop);
  1021. out:
  1022. pm_runtime_put(vop->dev);
  1023. return ret;
  1024. }
  1025. static int vop_create_crtc(struct vop *vop)
  1026. {
  1027. const struct vop_data *vop_data = vop->data;
  1028. struct device *dev = vop->dev;
  1029. struct drm_device *drm_dev = vop->drm_dev;
  1030. struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
  1031. struct drm_crtc *crtc = &vop->crtc;
  1032. struct device_node *port;
  1033. int ret;
  1034. int i;
  1035. /*
  1036. * Create drm_plane for primary and cursor planes first, since we need
  1037. * to pass them to drm_crtc_init_with_planes, which sets the
  1038. * "possible_crtcs" to the newly initialized crtc.
  1039. */
  1040. for (i = 0; i < vop_data->win_size; i++) {
  1041. struct vop_win *vop_win = &vop->win[i];
  1042. const struct vop_win_data *win_data = vop_win->data;
  1043. if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
  1044. win_data->type != DRM_PLANE_TYPE_CURSOR)
  1045. continue;
  1046. ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
  1047. 0, &vop_plane_funcs,
  1048. win_data->phy->data_formats,
  1049. win_data->phy->nformats,
  1050. NULL, win_data->type, NULL);
  1051. if (ret) {
  1052. DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
  1053. ret);
  1054. goto err_cleanup_planes;
  1055. }
  1056. plane = &vop_win->base;
  1057. drm_plane_helper_add(plane, &plane_helper_funcs);
  1058. if (plane->type == DRM_PLANE_TYPE_PRIMARY)
  1059. primary = plane;
  1060. else if (plane->type == DRM_PLANE_TYPE_CURSOR)
  1061. cursor = plane;
  1062. }
  1063. ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
  1064. &vop_crtc_funcs, NULL);
  1065. if (ret)
  1066. goto err_cleanup_planes;
  1067. drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
  1068. /*
  1069. * Create drm_planes for overlay windows with possible_crtcs restricted
  1070. * to the newly created crtc.
  1071. */
  1072. for (i = 0; i < vop_data->win_size; i++) {
  1073. struct vop_win *vop_win = &vop->win[i];
  1074. const struct vop_win_data *win_data = vop_win->data;
  1075. unsigned long possible_crtcs = drm_crtc_mask(crtc);
  1076. if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
  1077. continue;
  1078. ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
  1079. possible_crtcs,
  1080. &vop_plane_funcs,
  1081. win_data->phy->data_formats,
  1082. win_data->phy->nformats,
  1083. NULL, win_data->type, NULL);
  1084. if (ret) {
  1085. DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
  1086. ret);
  1087. goto err_cleanup_crtc;
  1088. }
  1089. drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
  1090. }
  1091. port = of_get_child_by_name(dev->of_node, "port");
  1092. if (!port) {
  1093. DRM_DEV_ERROR(vop->dev, "no port node found in %pOF\n",
  1094. dev->of_node);
  1095. ret = -ENOENT;
  1096. goto err_cleanup_crtc;
  1097. }
  1098. drm_flip_work_init(&vop->fb_unref_work, "fb_unref",
  1099. vop_fb_unref_worker);
  1100. init_completion(&vop->dsp_hold_completion);
  1101. init_completion(&vop->line_flag_completion);
  1102. crtc->port = port;
  1103. return 0;
  1104. err_cleanup_crtc:
  1105. drm_crtc_cleanup(crtc);
  1106. err_cleanup_planes:
  1107. list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
  1108. head)
  1109. drm_plane_cleanup(plane);
  1110. return ret;
  1111. }
  1112. static void vop_destroy_crtc(struct vop *vop)
  1113. {
  1114. struct drm_crtc *crtc = &vop->crtc;
  1115. struct drm_device *drm_dev = vop->drm_dev;
  1116. struct drm_plane *plane, *tmp;
  1117. of_node_put(crtc->port);
  1118. /*
  1119. * We need to cleanup the planes now. Why?
  1120. *
  1121. * The planes are "&vop->win[i].base". That means the memory is
  1122. * all part of the big "struct vop" chunk of memory. That memory
  1123. * was devm allocated and associated with this component. We need to
  1124. * free it ourselves before vop_unbind() finishes.
  1125. */
  1126. list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
  1127. head)
  1128. vop_plane_destroy(plane);
  1129. /*
  1130. * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
  1131. * references the CRTC.
  1132. */
  1133. drm_crtc_cleanup(crtc);
  1134. drm_flip_work_cleanup(&vop->fb_unref_work);
  1135. }
  1136. static int vop_initial(struct vop *vop)
  1137. {
  1138. const struct vop_data *vop_data = vop->data;
  1139. struct reset_control *ahb_rst;
  1140. int i, ret;
  1141. vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
  1142. if (IS_ERR(vop->hclk)) {
  1143. DRM_DEV_ERROR(vop->dev, "failed to get hclk source\n");
  1144. return PTR_ERR(vop->hclk);
  1145. }
  1146. vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
  1147. if (IS_ERR(vop->aclk)) {
  1148. DRM_DEV_ERROR(vop->dev, "failed to get aclk source\n");
  1149. return PTR_ERR(vop->aclk);
  1150. }
  1151. vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
  1152. if (IS_ERR(vop->dclk)) {
  1153. DRM_DEV_ERROR(vop->dev, "failed to get dclk source\n");
  1154. return PTR_ERR(vop->dclk);
  1155. }
  1156. ret = pm_runtime_get_sync(vop->dev);
  1157. if (ret < 0) {
  1158. DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
  1159. return ret;
  1160. }
  1161. ret = clk_prepare(vop->dclk);
  1162. if (ret < 0) {
  1163. DRM_DEV_ERROR(vop->dev, "failed to prepare dclk\n");
  1164. goto err_put_pm_runtime;
  1165. }
  1166. /* Enable both the hclk and aclk to setup the vop */
  1167. ret = clk_prepare_enable(vop->hclk);
  1168. if (ret < 0) {
  1169. DRM_DEV_ERROR(vop->dev, "failed to prepare/enable hclk\n");
  1170. goto err_unprepare_dclk;
  1171. }
  1172. ret = clk_prepare_enable(vop->aclk);
  1173. if (ret < 0) {
  1174. DRM_DEV_ERROR(vop->dev, "failed to prepare/enable aclk\n");
  1175. goto err_disable_hclk;
  1176. }
  1177. /*
  1178. * do hclk_reset, reset all vop registers.
  1179. */
  1180. ahb_rst = devm_reset_control_get(vop->dev, "ahb");
  1181. if (IS_ERR(ahb_rst)) {
  1182. DRM_DEV_ERROR(vop->dev, "failed to get ahb reset\n");
  1183. ret = PTR_ERR(ahb_rst);
  1184. goto err_disable_aclk;
  1185. }
  1186. reset_control_assert(ahb_rst);
  1187. usleep_range(10, 20);
  1188. reset_control_deassert(ahb_rst);
  1189. VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1);
  1190. VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0);
  1191. for (i = 0; i < vop->len; i += sizeof(u32))
  1192. vop->regsbak[i / 4] = readl_relaxed(vop->regs + i);
  1193. VOP_REG_SET(vop, misc, global_regdone_en, 1);
  1194. VOP_REG_SET(vop, common, dsp_blank, 0);
  1195. for (i = 0; i < vop_data->win_size; i++) {
  1196. const struct vop_win_data *win = &vop_data->win[i];
  1197. int channel = i * 2 + 1;
  1198. VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
  1199. vop_win_disable(vop, win);
  1200. VOP_WIN_SET(vop, win, gate, 1);
  1201. }
  1202. vop_cfg_done(vop);
  1203. /*
  1204. * do dclk_reset, let all config take affect.
  1205. */
  1206. vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
  1207. if (IS_ERR(vop->dclk_rst)) {
  1208. DRM_DEV_ERROR(vop->dev, "failed to get dclk reset\n");
  1209. ret = PTR_ERR(vop->dclk_rst);
  1210. goto err_disable_aclk;
  1211. }
  1212. reset_control_assert(vop->dclk_rst);
  1213. usleep_range(10, 20);
  1214. reset_control_deassert(vop->dclk_rst);
  1215. clk_disable(vop->hclk);
  1216. clk_disable(vop->aclk);
  1217. vop->is_enabled = false;
  1218. pm_runtime_put_sync(vop->dev);
  1219. return 0;
  1220. err_disable_aclk:
  1221. clk_disable_unprepare(vop->aclk);
  1222. err_disable_hclk:
  1223. clk_disable_unprepare(vop->hclk);
  1224. err_unprepare_dclk:
  1225. clk_unprepare(vop->dclk);
  1226. err_put_pm_runtime:
  1227. pm_runtime_put_sync(vop->dev);
  1228. return ret;
  1229. }
  1230. /*
  1231. * Initialize the vop->win array elements.
  1232. */
  1233. static void vop_win_init(struct vop *vop)
  1234. {
  1235. const struct vop_data *vop_data = vop->data;
  1236. unsigned int i;
  1237. for (i = 0; i < vop_data->win_size; i++) {
  1238. struct vop_win *vop_win = &vop->win[i];
  1239. const struct vop_win_data *win_data = &vop_data->win[i];
  1240. vop_win->data = win_data;
  1241. vop_win->vop = vop;
  1242. }
  1243. }
  1244. /**
  1245. * rockchip_drm_wait_vact_end
  1246. * @crtc: CRTC to enable line flag
  1247. * @mstimeout: millisecond for timeout
  1248. *
  1249. * Wait for vact_end line flag irq or timeout.
  1250. *
  1251. * Returns:
  1252. * Zero on success, negative errno on failure.
  1253. */
  1254. int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout)
  1255. {
  1256. struct vop *vop = to_vop(crtc);
  1257. unsigned long jiffies_left;
  1258. int ret = 0;
  1259. if (!crtc || !vop->is_enabled)
  1260. return -ENODEV;
  1261. mutex_lock(&vop->vop_lock);
  1262. if (mstimeout <= 0) {
  1263. ret = -EINVAL;
  1264. goto out;
  1265. }
  1266. if (vop_line_flag_irq_is_enabled(vop)) {
  1267. ret = -EBUSY;
  1268. goto out;
  1269. }
  1270. reinit_completion(&vop->line_flag_completion);
  1271. vop_line_flag_irq_enable(vop);
  1272. jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion,
  1273. msecs_to_jiffies(mstimeout));
  1274. vop_line_flag_irq_disable(vop);
  1275. if (jiffies_left == 0) {
  1276. DRM_DEV_ERROR(vop->dev, "Timeout waiting for IRQ\n");
  1277. ret = -ETIMEDOUT;
  1278. goto out;
  1279. }
  1280. out:
  1281. mutex_unlock(&vop->vop_lock);
  1282. return ret;
  1283. }
  1284. EXPORT_SYMBOL(rockchip_drm_wait_vact_end);
  1285. static int vop_bind(struct device *dev, struct device *master, void *data)
  1286. {
  1287. struct platform_device *pdev = to_platform_device(dev);
  1288. const struct vop_data *vop_data;
  1289. struct drm_device *drm_dev = data;
  1290. struct vop *vop;
  1291. struct resource *res;
  1292. size_t alloc_size;
  1293. int ret, irq;
  1294. vop_data = of_device_get_match_data(dev);
  1295. if (!vop_data)
  1296. return -ENODEV;
  1297. /* Allocate vop struct and its vop_win array */
  1298. alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
  1299. vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
  1300. if (!vop)
  1301. return -ENOMEM;
  1302. vop->dev = dev;
  1303. vop->data = vop_data;
  1304. vop->drm_dev = drm_dev;
  1305. dev_set_drvdata(dev, vop);
  1306. vop_win_init(vop);
  1307. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1308. vop->len = resource_size(res);
  1309. vop->regs = devm_ioremap_resource(dev, res);
  1310. if (IS_ERR(vop->regs))
  1311. return PTR_ERR(vop->regs);
  1312. vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
  1313. if (!vop->regsbak)
  1314. return -ENOMEM;
  1315. irq = platform_get_irq(pdev, 0);
  1316. if (irq < 0) {
  1317. DRM_DEV_ERROR(dev, "cannot find irq for vop\n");
  1318. return irq;
  1319. }
  1320. vop->irq = (unsigned int)irq;
  1321. spin_lock_init(&vop->reg_lock);
  1322. spin_lock_init(&vop->irq_lock);
  1323. mutex_init(&vop->vop_lock);
  1324. ret = vop_create_crtc(vop);
  1325. if (ret)
  1326. return ret;
  1327. pm_runtime_enable(&pdev->dev);
  1328. ret = vop_initial(vop);
  1329. if (ret < 0) {
  1330. DRM_DEV_ERROR(&pdev->dev,
  1331. "cannot initial vop dev - err %d\n", ret);
  1332. goto err_disable_pm_runtime;
  1333. }
  1334. ret = devm_request_irq(dev, vop->irq, vop_isr,
  1335. IRQF_SHARED, dev_name(dev), vop);
  1336. if (ret)
  1337. goto err_disable_pm_runtime;
  1338. return 0;
  1339. err_disable_pm_runtime:
  1340. pm_runtime_disable(&pdev->dev);
  1341. vop_destroy_crtc(vop);
  1342. return ret;
  1343. }
  1344. static void vop_unbind(struct device *dev, struct device *master, void *data)
  1345. {
  1346. struct vop *vop = dev_get_drvdata(dev);
  1347. pm_runtime_disable(dev);
  1348. vop_destroy_crtc(vop);
  1349. clk_unprepare(vop->aclk);
  1350. clk_unprepare(vop->hclk);
  1351. clk_unprepare(vop->dclk);
  1352. }
  1353. const struct component_ops vop_component_ops = {
  1354. .bind = vop_bind,
  1355. .unbind = vop_unbind,
  1356. };
  1357. EXPORT_SYMBOL_GPL(vop_component_ops);