vc4_kms.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449
  1. /*
  2. * Copyright (C) 2015 Broadcom
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. /**
  9. * DOC: VC4 KMS
  10. *
  11. * This is the general code for implementing KMS mode setting that
  12. * doesn't clearly associate with any of the other objects (plane,
  13. * crtc, HDMI encoder).
  14. */
  15. #include <drm/drm_crtc.h>
  16. #include <drm/drm_atomic.h>
  17. #include <drm/drm_atomic_helper.h>
  18. #include <drm/drm_crtc_helper.h>
  19. #include <drm/drm_plane_helper.h>
  20. #include <drm/drm_fb_helper.h>
  21. #include <drm/drm_fb_cma_helper.h>
  22. #include <drm/drm_gem_framebuffer_helper.h>
  23. #include "vc4_drv.h"
  24. #include "vc4_regs.h"
  25. struct vc4_ctm_state {
  26. struct drm_private_state base;
  27. struct drm_color_ctm *ctm;
  28. int fifo;
  29. };
  30. static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
  31. {
  32. return container_of(priv, struct vc4_ctm_state, base);
  33. }
  34. static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
  35. struct drm_private_obj *manager)
  36. {
  37. struct drm_device *dev = state->dev;
  38. struct vc4_dev *vc4 = dev->dev_private;
  39. struct drm_private_state *priv_state;
  40. int ret;
  41. ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
  42. if (ret)
  43. return ERR_PTR(ret);
  44. priv_state = drm_atomic_get_private_obj_state(state, manager);
  45. if (IS_ERR(priv_state))
  46. return ERR_CAST(priv_state);
  47. return to_vc4_ctm_state(priv_state);
  48. }
  49. static struct drm_private_state *
  50. vc4_ctm_duplicate_state(struct drm_private_obj *obj)
  51. {
  52. struct vc4_ctm_state *state;
  53. state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
  54. if (!state)
  55. return NULL;
  56. __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
  57. return &state->base;
  58. }
  59. static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
  60. struct drm_private_state *state)
  61. {
  62. struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
  63. kfree(ctm_state);
  64. }
  65. static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
  66. .atomic_duplicate_state = vc4_ctm_duplicate_state,
  67. .atomic_destroy_state = vc4_ctm_destroy_state,
  68. };
  69. /* Converts a DRM S31.32 value to the HW S0.9 format. */
  70. static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
  71. {
  72. u16 r;
  73. /* Sign bit. */
  74. r = in & BIT_ULL(63) ? BIT(9) : 0;
  75. if ((in & GENMASK_ULL(62, 32)) > 0) {
  76. /* We have zero integer bits so we can only saturate here. */
  77. r |= GENMASK(8, 0);
  78. } else {
  79. /* Otherwise take the 9 most important fractional bits. */
  80. r |= (in >> 23) & GENMASK(8, 0);
  81. }
  82. return r;
  83. }
  84. static void
  85. vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
  86. {
  87. struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
  88. struct drm_color_ctm *ctm = ctm_state->ctm;
  89. if (ctm_state->fifo) {
  90. HVS_WRITE(SCALER_OLEDCOEF2,
  91. VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
  92. SCALER_OLEDCOEF2_R_TO_R) |
  93. VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
  94. SCALER_OLEDCOEF2_R_TO_G) |
  95. VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
  96. SCALER_OLEDCOEF2_R_TO_B));
  97. HVS_WRITE(SCALER_OLEDCOEF1,
  98. VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
  99. SCALER_OLEDCOEF1_G_TO_R) |
  100. VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
  101. SCALER_OLEDCOEF1_G_TO_G) |
  102. VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
  103. SCALER_OLEDCOEF1_G_TO_B));
  104. HVS_WRITE(SCALER_OLEDCOEF0,
  105. VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
  106. SCALER_OLEDCOEF0_B_TO_R) |
  107. VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
  108. SCALER_OLEDCOEF0_B_TO_G) |
  109. VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
  110. SCALER_OLEDCOEF0_B_TO_B));
  111. }
  112. HVS_WRITE(SCALER_OLEDOFFS,
  113. VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
  114. }
  115. static void
  116. vc4_atomic_complete_commit(struct drm_atomic_state *state)
  117. {
  118. struct drm_device *dev = state->dev;
  119. struct vc4_dev *vc4 = to_vc4_dev(dev);
  120. drm_atomic_helper_wait_for_fences(dev, state, false);
  121. drm_atomic_helper_wait_for_dependencies(state);
  122. drm_atomic_helper_commit_modeset_disables(dev, state);
  123. vc4_ctm_commit(vc4, state);
  124. drm_atomic_helper_commit_planes(dev, state, 0);
  125. drm_atomic_helper_commit_modeset_enables(dev, state);
  126. drm_atomic_helper_fake_vblank(state);
  127. drm_atomic_helper_commit_hw_done(state);
  128. drm_atomic_helper_wait_for_flip_done(dev, state);
  129. drm_atomic_helper_cleanup_planes(dev, state);
  130. drm_atomic_helper_commit_cleanup_done(state);
  131. drm_atomic_state_put(state);
  132. up(&vc4->async_modeset);
  133. }
  134. static void commit_work(struct work_struct *work)
  135. {
  136. struct drm_atomic_state *state = container_of(work,
  137. struct drm_atomic_state,
  138. commit_work);
  139. vc4_atomic_complete_commit(state);
  140. }
  141. /**
  142. * vc4_atomic_commit - commit validated state object
  143. * @dev: DRM device
  144. * @state: the driver state object
  145. * @nonblock: nonblocking commit
  146. *
  147. * This function commits a with drm_atomic_helper_check() pre-validated state
  148. * object. This can still fail when e.g. the framebuffer reservation fails. For
  149. * now this doesn't implement asynchronous commits.
  150. *
  151. * RETURNS
  152. * Zero for success or -errno.
  153. */
  154. static int vc4_atomic_commit(struct drm_device *dev,
  155. struct drm_atomic_state *state,
  156. bool nonblock)
  157. {
  158. struct vc4_dev *vc4 = to_vc4_dev(dev);
  159. int ret;
  160. if (state->async_update) {
  161. ret = down_interruptible(&vc4->async_modeset);
  162. if (ret)
  163. return ret;
  164. ret = drm_atomic_helper_prepare_planes(dev, state);
  165. if (ret) {
  166. up(&vc4->async_modeset);
  167. return ret;
  168. }
  169. drm_atomic_helper_async_commit(dev, state);
  170. drm_atomic_helper_cleanup_planes(dev, state);
  171. up(&vc4->async_modeset);
  172. return 0;
  173. }
  174. /* We know for sure we don't want an async update here. Set
  175. * state->legacy_cursor_update to false to prevent
  176. * drm_atomic_helper_setup_commit() from auto-completing
  177. * commit->flip_done.
  178. */
  179. state->legacy_cursor_update = false;
  180. ret = drm_atomic_helper_setup_commit(state, nonblock);
  181. if (ret)
  182. return ret;
  183. INIT_WORK(&state->commit_work, commit_work);
  184. ret = down_interruptible(&vc4->async_modeset);
  185. if (ret)
  186. return ret;
  187. ret = drm_atomic_helper_prepare_planes(dev, state);
  188. if (ret) {
  189. up(&vc4->async_modeset);
  190. return ret;
  191. }
  192. if (!nonblock) {
  193. ret = drm_atomic_helper_wait_for_fences(dev, state, true);
  194. if (ret) {
  195. drm_atomic_helper_cleanup_planes(dev, state);
  196. up(&vc4->async_modeset);
  197. return ret;
  198. }
  199. }
  200. /*
  201. * This is the point of no return - everything below never fails except
  202. * when the hw goes bonghits. Which means we can commit the new state on
  203. * the software side now.
  204. */
  205. BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
  206. /*
  207. * Everything below can be run asynchronously without the need to grab
  208. * any modeset locks at all under one condition: It must be guaranteed
  209. * that the asynchronous work has either been cancelled (if the driver
  210. * supports it, which at least requires that the framebuffers get
  211. * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
  212. * before the new state gets committed on the software side with
  213. * drm_atomic_helper_swap_state().
  214. *
  215. * This scheme allows new atomic state updates to be prepared and
  216. * checked in parallel to the asynchronous completion of the previous
  217. * update. Which is important since compositors need to figure out the
  218. * composition of the next frame right after having submitted the
  219. * current layout.
  220. */
  221. drm_atomic_state_get(state);
  222. if (nonblock)
  223. queue_work(system_unbound_wq, &state->commit_work);
  224. else
  225. vc4_atomic_complete_commit(state);
  226. return 0;
  227. }
  228. static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
  229. struct drm_file *file_priv,
  230. const struct drm_mode_fb_cmd2 *mode_cmd)
  231. {
  232. struct drm_mode_fb_cmd2 mode_cmd_local;
  233. /* If the user didn't specify a modifier, use the
  234. * vc4_set_tiling_ioctl() state for the BO.
  235. */
  236. if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
  237. struct drm_gem_object *gem_obj;
  238. struct vc4_bo *bo;
  239. gem_obj = drm_gem_object_lookup(file_priv,
  240. mode_cmd->handles[0]);
  241. if (!gem_obj) {
  242. DRM_DEBUG("Failed to look up GEM BO %d\n",
  243. mode_cmd->handles[0]);
  244. return ERR_PTR(-ENOENT);
  245. }
  246. bo = to_vc4_bo(gem_obj);
  247. mode_cmd_local = *mode_cmd;
  248. if (bo->t_format) {
  249. mode_cmd_local.modifier[0] =
  250. DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
  251. } else {
  252. mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
  253. }
  254. drm_gem_object_put_unlocked(gem_obj);
  255. mode_cmd = &mode_cmd_local;
  256. }
  257. return drm_gem_fb_create(dev, file_priv, mode_cmd);
  258. }
  259. /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
  260. * at a time and the HW only supports S0.9 scalars. To account for the latter,
  261. * we don't allow userland to set a CTM that we have no hope of approximating.
  262. */
  263. static int
  264. vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
  265. {
  266. struct vc4_dev *vc4 = to_vc4_dev(dev);
  267. struct vc4_ctm_state *ctm_state = NULL;
  268. struct drm_crtc *crtc;
  269. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  270. struct drm_color_ctm *ctm;
  271. int i;
  272. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  273. /* CTM is being disabled. */
  274. if (!new_crtc_state->ctm && old_crtc_state->ctm) {
  275. ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
  276. if (IS_ERR(ctm_state))
  277. return PTR_ERR(ctm_state);
  278. ctm_state->fifo = 0;
  279. }
  280. }
  281. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  282. if (new_crtc_state->ctm == old_crtc_state->ctm)
  283. continue;
  284. if (!ctm_state) {
  285. ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
  286. if (IS_ERR(ctm_state))
  287. return PTR_ERR(ctm_state);
  288. }
  289. /* CTM is being enabled or the matrix changed. */
  290. if (new_crtc_state->ctm) {
  291. /* fifo is 1-based since 0 disables CTM. */
  292. int fifo = to_vc4_crtc(crtc)->channel + 1;
  293. /* Check userland isn't trying to turn on CTM for more
  294. * than one CRTC at a time.
  295. */
  296. if (ctm_state->fifo && ctm_state->fifo != fifo) {
  297. DRM_DEBUG_DRIVER("Too many CTM configured\n");
  298. return -EINVAL;
  299. }
  300. /* Check we can approximate the specified CTM.
  301. * We disallow scalars |c| > 1.0 since the HW has
  302. * no integer bits.
  303. */
  304. ctm = new_crtc_state->ctm->data;
  305. for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
  306. u64 val = ctm->matrix[i];
  307. val &= ~BIT_ULL(63);
  308. if (val > BIT_ULL(32))
  309. return -EINVAL;
  310. }
  311. ctm_state->fifo = fifo;
  312. ctm_state->ctm = ctm;
  313. }
  314. }
  315. return 0;
  316. }
  317. static int
  318. vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
  319. {
  320. int ret;
  321. ret = vc4_ctm_atomic_check(dev, state);
  322. if (ret < 0)
  323. return ret;
  324. return drm_atomic_helper_check(dev, state);
  325. }
  326. static const struct drm_mode_config_funcs vc4_mode_funcs = {
  327. .output_poll_changed = drm_fb_helper_output_poll_changed,
  328. .atomic_check = vc4_atomic_check,
  329. .atomic_commit = vc4_atomic_commit,
  330. .fb_create = vc4_fb_create,
  331. };
  332. int vc4_kms_load(struct drm_device *dev)
  333. {
  334. struct vc4_dev *vc4 = to_vc4_dev(dev);
  335. struct vc4_ctm_state *ctm_state;
  336. int ret;
  337. sema_init(&vc4->async_modeset, 1);
  338. /* Set support for vblank irq fast disable, before drm_vblank_init() */
  339. dev->vblank_disable_immediate = true;
  340. ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
  341. if (ret < 0) {
  342. dev_err(dev->dev, "failed to initialize vblank\n");
  343. return ret;
  344. }
  345. dev->mode_config.max_width = 2048;
  346. dev->mode_config.max_height = 2048;
  347. dev->mode_config.funcs = &vc4_mode_funcs;
  348. dev->mode_config.preferred_depth = 24;
  349. dev->mode_config.async_page_flip = true;
  350. dev->mode_config.allow_fb_modifiers = true;
  351. drm_modeset_lock_init(&vc4->ctm_state_lock);
  352. ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
  353. if (!ctm_state)
  354. return -ENOMEM;
  355. drm_atomic_private_obj_init(&vc4->ctm_manager, &ctm_state->base,
  356. &vc4_ctm_state_funcs);
  357. drm_mode_config_reset(dev);
  358. if (dev->mode_config.num_connector)
  359. drm_fb_cma_fbdev_init(dev, 32, 0);
  360. drm_kms_helper_poll_init(dev);
  361. return 0;
  362. }