drm_fbdev_dma.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. // SPDX-License-Identifier: MIT
  2. #include <linux/fb.h>
  3. #include <linux/vmalloc.h>
  4. #include <drm/drm_crtc_helper.h>
  5. #include <drm/drm_drv.h>
  6. #include <drm/drm_fb_dma_helper.h>
  7. #include <drm/drm_fb_helper.h>
  8. #include <drm/drm_framebuffer.h>
  9. #include <drm/drm_gem_dma_helper.h>
  10. #include <drm/drm_fbdev_dma.h>
  11. /*
  12. * struct fb_ops
  13. */
  14. static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
  15. {
  16. struct drm_fb_helper *fb_helper = info->par;
  17. /* No need to take a ref for fbcon because it unbinds on unregister */
  18. if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
  19. return -ENODEV;
  20. return 0;
  21. }
  22. static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
  23. {
  24. struct drm_fb_helper *fb_helper = info->par;
  25. if (user)
  26. module_put(fb_helper->dev->driver->fops->owner);
  27. return 0;
  28. }
  29. static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
  30. {
  31. struct drm_fb_helper *fb_helper = info->par;
  32. return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
  33. }
  34. static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
  35. {
  36. struct drm_fb_helper *fb_helper = info->par;
  37. if (!fb_helper->dev)
  38. return;
  39. if (info->fbdefio)
  40. fb_deferred_io_cleanup(info);
  41. drm_fb_helper_fini(fb_helper);
  42. drm_client_buffer_vunmap(fb_helper->buffer);
  43. drm_client_framebuffer_delete(fb_helper->buffer);
  44. drm_client_release(&fb_helper->client);
  45. drm_fb_helper_unprepare(fb_helper);
  46. kfree(fb_helper);
  47. }
  48. static const struct fb_ops drm_fbdev_dma_fb_ops = {
  49. .owner = THIS_MODULE,
  50. .fb_open = drm_fbdev_dma_fb_open,
  51. .fb_release = drm_fbdev_dma_fb_release,
  52. __FB_DEFAULT_DMAMEM_OPS_RDWR,
  53. DRM_FB_HELPER_DEFAULT_OPS,
  54. __FB_DEFAULT_DMAMEM_OPS_DRAW,
  55. .fb_mmap = drm_fbdev_dma_fb_mmap,
  56. .fb_destroy = drm_fbdev_dma_fb_destroy,
  57. };
  58. FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
  59. drm_fb_helper_damage_range,
  60. drm_fb_helper_damage_area);
  61. static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
  62. {
  63. struct drm_fb_helper *fb_helper = info->par;
  64. void *shadow = info->screen_buffer;
  65. if (!fb_helper->dev)
  66. return;
  67. if (info->fbdefio)
  68. fb_deferred_io_cleanup(info);
  69. drm_fb_helper_fini(fb_helper);
  70. vfree(shadow);
  71. drm_client_buffer_vunmap(fb_helper->buffer);
  72. drm_client_framebuffer_delete(fb_helper->buffer);
  73. drm_client_release(&fb_helper->client);
  74. drm_fb_helper_unprepare(fb_helper);
  75. kfree(fb_helper);
  76. }
  77. static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
  78. .owner = THIS_MODULE,
  79. .fb_open = drm_fbdev_dma_fb_open,
  80. .fb_release = drm_fbdev_dma_fb_release,
  81. FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
  82. DRM_FB_HELPER_DEFAULT_OPS,
  83. .fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
  84. };
  85. /*
  86. * struct drm_fb_helper
  87. */
  88. static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
  89. struct drm_clip_rect *clip,
  90. struct iosys_map *dst)
  91. {
  92. struct drm_framebuffer *fb = fb_helper->fb;
  93. size_t offset = clip->y1 * fb->pitches[0];
  94. size_t len = clip->x2 - clip->x1;
  95. unsigned int y;
  96. void *src;
  97. switch (drm_format_info_bpp(fb->format, 0)) {
  98. case 1:
  99. offset += clip->x1 / 8;
  100. len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
  101. break;
  102. case 2:
  103. offset += clip->x1 / 4;
  104. len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
  105. break;
  106. case 4:
  107. offset += clip->x1 / 2;
  108. len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
  109. break;
  110. default:
  111. offset += clip->x1 * fb->format->cpp[0];
  112. len *= fb->format->cpp[0];
  113. break;
  114. }
  115. src = fb_helper->info->screen_buffer + offset;
  116. iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
  117. for (y = clip->y1; y < clip->y2; y++) {
  118. iosys_map_memcpy_to(dst, 0, src, len);
  119. iosys_map_incr(dst, fb->pitches[0]);
  120. src += fb->pitches[0];
  121. }
  122. }
  123. static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
  124. struct drm_clip_rect *clip)
  125. {
  126. struct drm_client_buffer *buffer = fb_helper->buffer;
  127. struct iosys_map dst;
  128. /*
  129. * For fbdev emulation, we only have to protect against fbdev modeset
  130. * operations. Nothing else will involve the client buffer's BO. So it
  131. * is sufficient to acquire struct drm_fb_helper.lock here.
  132. */
  133. mutex_lock(&fb_helper->lock);
  134. dst = buffer->map;
  135. drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
  136. mutex_unlock(&fb_helper->lock);
  137. return 0;
  138. }
  139. static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
  140. struct drm_fb_helper_surface_size *sizes)
  141. {
  142. return drm_fbdev_dma_driver_fbdev_probe(fb_helper, sizes);
  143. }
  144. static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
  145. struct drm_clip_rect *clip)
  146. {
  147. struct drm_device *dev = helper->dev;
  148. int ret;
  149. /* Call damage handlers only if necessary */
  150. if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
  151. return 0;
  152. if (helper->fb->funcs->dirty) {
  153. ret = drm_fbdev_dma_damage_blit(helper, clip);
  154. if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
  155. return ret;
  156. ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
  157. if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
  158. return ret;
  159. }
  160. return 0;
  161. }
  162. static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
  163. .fb_probe = drm_fbdev_dma_helper_fb_probe,
  164. .fb_dirty = drm_fbdev_dma_helper_fb_dirty,
  165. };
  166. /*
  167. * struct drm_fb_helper
  168. */
  169. static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
  170. struct drm_fb_helper_surface_size *sizes)
  171. {
  172. struct drm_device *dev = fb_helper->dev;
  173. struct drm_client_buffer *buffer = fb_helper->buffer;
  174. struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
  175. struct drm_framebuffer *fb = fb_helper->fb;
  176. struct fb_info *info = fb_helper->info;
  177. struct iosys_map map = buffer->map;
  178. info->fbops = &drm_fbdev_dma_fb_ops;
  179. /* screen */
  180. info->flags |= FBINFO_VIRTFB; /* system memory */
  181. if (dma_obj->map_noncoherent)
  182. info->flags |= FBINFO_READS_FAST; /* signal caching */
  183. info->screen_size = sizes->surface_height * fb->pitches[0];
  184. info->screen_buffer = map.vaddr;
  185. if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
  186. if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
  187. info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
  188. }
  189. info->fix.smem_len = info->screen_size;
  190. return 0;
  191. }
  192. static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
  193. struct drm_fb_helper_surface_size *sizes)
  194. {
  195. struct drm_client_buffer *buffer = fb_helper->buffer;
  196. struct fb_info *info = fb_helper->info;
  197. size_t screen_size = buffer->gem->size;
  198. void *screen_buffer;
  199. int ret;
  200. /*
  201. * Deferred I/O requires struct page for framebuffer memory,
  202. * which is not guaranteed for all DMA ranges. We thus create
  203. * a shadow buffer in system memory.
  204. */
  205. screen_buffer = vzalloc(screen_size);
  206. if (!screen_buffer)
  207. return -ENOMEM;
  208. info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
  209. /* screen */
  210. info->flags |= FBINFO_VIRTFB; /* system memory */
  211. info->flags |= FBINFO_READS_FAST; /* signal caching */
  212. info->screen_buffer = screen_buffer;
  213. info->fix.smem_len = screen_size;
  214. fb_helper->fbdefio.delay = HZ / 20;
  215. fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
  216. info->fbdefio = &fb_helper->fbdefio;
  217. ret = fb_deferred_io_init(info);
  218. if (ret)
  219. goto err_vfree;
  220. return 0;
  221. err_vfree:
  222. vfree(screen_buffer);
  223. return ret;
  224. }
  225. int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
  226. struct drm_fb_helper_surface_size *sizes)
  227. {
  228. struct drm_client_dev *client = &fb_helper->client;
  229. struct drm_device *dev = fb_helper->dev;
  230. struct drm_client_buffer *buffer;
  231. struct drm_framebuffer *fb;
  232. struct fb_info *info;
  233. u32 format;
  234. struct iosys_map map;
  235. int ret;
  236. drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
  237. sizes->surface_width, sizes->surface_height,
  238. sizes->surface_bpp);
  239. format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
  240. sizes->surface_depth);
  241. buffer = drm_client_framebuffer_create(client, sizes->surface_width,
  242. sizes->surface_height, format);
  243. if (IS_ERR(buffer))
  244. return PTR_ERR(buffer);
  245. fb = buffer->fb;
  246. ret = drm_client_buffer_vmap(buffer, &map);
  247. if (ret) {
  248. goto err_drm_client_buffer_delete;
  249. } else if (drm_WARN_ON(dev, map.is_iomem)) {
  250. ret = -ENODEV; /* I/O memory not supported; use generic emulation */
  251. goto err_drm_client_buffer_delete;
  252. }
  253. fb_helper->funcs = &drm_fbdev_dma_helper_funcs;
  254. fb_helper->buffer = buffer;
  255. fb_helper->fb = fb;
  256. info = drm_fb_helper_alloc_info(fb_helper);
  257. if (IS_ERR(info)) {
  258. ret = PTR_ERR(info);
  259. goto err_drm_client_buffer_vunmap;
  260. }
  261. drm_fb_helper_fill_info(info, fb_helper, sizes);
  262. if (fb->funcs->dirty)
  263. ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
  264. else
  265. ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
  266. if (ret)
  267. goto err_drm_fb_helper_release_info;
  268. return 0;
  269. err_drm_fb_helper_release_info:
  270. drm_fb_helper_release_info(fb_helper);
  271. err_drm_client_buffer_vunmap:
  272. fb_helper->fb = NULL;
  273. fb_helper->buffer = NULL;
  274. drm_client_buffer_vunmap(buffer);
  275. err_drm_client_buffer_delete:
  276. drm_client_framebuffer_delete(buffer);
  277. return ret;
  278. }
  279. EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe);
  280. /*
  281. * struct drm_client_funcs
  282. */
  283. static void drm_fbdev_dma_client_unregister(struct drm_client_dev *client)
  284. {
  285. struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
  286. if (fb_helper->info) {
  287. drm_fb_helper_unregister_info(fb_helper);
  288. } else {
  289. drm_client_release(&fb_helper->client);
  290. drm_fb_helper_unprepare(fb_helper);
  291. kfree(fb_helper);
  292. }
  293. }
  294. static int drm_fbdev_dma_client_restore(struct drm_client_dev *client)
  295. {
  296. drm_fb_helper_lastclose(client->dev);
  297. return 0;
  298. }
  299. static int drm_fbdev_dma_client_hotplug(struct drm_client_dev *client)
  300. {
  301. struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
  302. struct drm_device *dev = client->dev;
  303. int ret;
  304. if (dev->fb_helper)
  305. return drm_fb_helper_hotplug_event(dev->fb_helper);
  306. ret = drm_fb_helper_init(dev, fb_helper);
  307. if (ret)
  308. goto err_drm_err;
  309. if (!drm_drv_uses_atomic_modeset(dev))
  310. drm_helper_disable_unused_functions(dev);
  311. ret = drm_fb_helper_initial_config(fb_helper);
  312. if (ret)
  313. goto err_drm_fb_helper_fini;
  314. return 0;
  315. err_drm_fb_helper_fini:
  316. drm_fb_helper_fini(fb_helper);
  317. err_drm_err:
  318. drm_err(dev, "fbdev-dma: Failed to setup generic emulation (ret=%d)\n", ret);
  319. return ret;
  320. }
  321. static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
  322. .owner = THIS_MODULE,
  323. .unregister = drm_fbdev_dma_client_unregister,
  324. .restore = drm_fbdev_dma_client_restore,
  325. .hotplug = drm_fbdev_dma_client_hotplug,
  326. };
  327. /**
  328. * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
  329. * @dev: DRM device
  330. * @preferred_bpp: Preferred bits per pixel for the device.
  331. * 32 is used if this is zero.
  332. *
  333. * This function sets up fbdev emulation for GEM DMA drivers that support
  334. * dumb buffers with a virtual address and that can be mmap'ed.
  335. * drm_fbdev_dma_setup() shall be called after the DRM driver registered
  336. * the new DRM device with drm_dev_register().
  337. *
  338. * Restore, hotplug events and teardown are all taken care of. Drivers that do
  339. * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
  340. * Simple drivers might use drm_mode_config_helper_suspend().
  341. *
  342. * This function is safe to call even when there are no connectors present.
  343. * Setup will be retried on the next hotplug event.
  344. *
  345. * The fbdev is destroyed by drm_dev_unregister().
  346. */
  347. void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
  348. {
  349. struct drm_fb_helper *fb_helper;
  350. int ret;
  351. drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
  352. drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
  353. fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
  354. if (!fb_helper)
  355. return;
  356. drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_dma_helper_funcs);
  357. ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_dma_client_funcs);
  358. if (ret) {
  359. drm_err(dev, "Failed to register client: %d\n", ret);
  360. goto err_drm_client_init;
  361. }
  362. drm_client_register(&fb_helper->client);
  363. return;
  364. err_drm_client_init:
  365. drm_fb_helper_unprepare(fb_helper);
  366. kfree(fb_helper);
  367. }
  368. EXPORT_SYMBOL(drm_fbdev_dma_setup);