xilinx-dma.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Xilinx Video DMA
  4. *
  5. * Copyright (C) 2013-2015 Ideas on Board
  6. * Copyright (C) 2013-2015 Xilinx, Inc.
  7. *
  8. * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
  9. * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  10. */
  11. #include <linux/dma/xilinx_dma.h>
  12. #include <linux/lcm.h>
  13. #include <linux/list.h>
  14. #include <linux/module.h>
  15. #include <linux/of.h>
  16. #include <linux/slab.h>
  17. #include <media/v4l2-dev.h>
  18. #include <media/v4l2-fh.h>
  19. #include <media/v4l2-ioctl.h>
  20. #include <media/videobuf2-v4l2.h>
  21. #include <media/videobuf2-dma-contig.h>
  22. #include "xilinx-dma.h"
  23. #include "xilinx-vip.h"
  24. #include "xilinx-vipp.h"
  25. #define XVIP_DMA_DEF_WIDTH 1920
  26. #define XVIP_DMA_DEF_HEIGHT 1080
  27. /* Minimum and maximum widths are expressed in bytes */
  28. #define XVIP_DMA_MIN_WIDTH 1U
  29. #define XVIP_DMA_MAX_WIDTH 65535U
  30. #define XVIP_DMA_MIN_HEIGHT 1U
  31. #define XVIP_DMA_MAX_HEIGHT 8191U
  32. /* -----------------------------------------------------------------------------
  33. * Helper functions
  34. */
  35. static struct v4l2_subdev *
  36. xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
  37. {
  38. struct media_pad *remote;
  39. remote = media_pad_remote_pad_first(local);
  40. if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
  41. return NULL;
  42. if (pad)
  43. *pad = remote->index;
  44. return media_entity_to_v4l2_subdev(remote->entity);
  45. }
  46. static int xvip_dma_verify_format(struct xvip_dma *dma)
  47. {
  48. struct v4l2_subdev_format fmt = {
  49. .which = V4L2_SUBDEV_FORMAT_ACTIVE,
  50. };
  51. struct v4l2_subdev *subdev;
  52. int ret;
  53. subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
  54. if (subdev == NULL)
  55. return -EPIPE;
  56. ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
  57. if (ret < 0)
  58. return ret == -ENOIOCTLCMD ? -EINVAL : ret;
  59. if (dma->fmtinfo->code != fmt.format.code ||
  60. dma->format.height != fmt.format.height ||
  61. dma->format.width != fmt.format.width ||
  62. dma->format.colorspace != fmt.format.colorspace)
  63. return -EINVAL;
  64. return 0;
  65. }
  66. /* -----------------------------------------------------------------------------
  67. * Pipeline Stream Management
  68. */
  69. /**
  70. * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
  71. * @pipe: The pipeline
  72. * @start: Start (when true) or stop (when false) the pipeline
  73. *
  74. * Walk the entities chain starting at the pipeline output video node and start
  75. * or stop all of them.
  76. *
  77. * Return: 0 if successful, or the return value of the failed video::s_stream
  78. * operation otherwise.
  79. */
  80. static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
  81. {
  82. struct xvip_dma *dma = pipe->output;
  83. struct media_entity *entity;
  84. struct media_pad *pad;
  85. struct v4l2_subdev *subdev;
  86. int ret;
  87. entity = &dma->video.entity;
  88. while (1) {
  89. pad = &entity->pads[0];
  90. if (!(pad->flags & MEDIA_PAD_FL_SINK))
  91. break;
  92. pad = media_pad_remote_pad_first(pad);
  93. if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
  94. break;
  95. entity = pad->entity;
  96. subdev = media_entity_to_v4l2_subdev(entity);
  97. ret = v4l2_subdev_call(subdev, video, s_stream, start);
  98. if (start && ret < 0 && ret != -ENOIOCTLCMD)
  99. return ret;
  100. }
  101. return 0;
  102. }
  103. /**
  104. * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
  105. * @pipe: The pipeline
  106. * @on: Turn the stream on when true or off when false
  107. *
  108. * The pipeline is shared between all DMA engines connect at its input and
  109. * output. While the stream state of DMA engines can be controlled
  110. * independently, pipelines have a shared stream state that enable or disable
  111. * all entities in the pipeline. For this reason the pipeline uses a streaming
  112. * counter that tracks the number of DMA engines that have requested the stream
  113. * to be enabled.
  114. *
  115. * When called with the @on argument set to true, this function will increment
  116. * the pipeline streaming count. If the streaming count reaches the number of
  117. * DMA engines in the pipeline it will enable all entities that belong to the
  118. * pipeline.
  119. *
  120. * Similarly, when called with the @on argument set to false, this function will
  121. * decrement the pipeline streaming count and disable all entities in the
  122. * pipeline when the streaming count reaches zero.
  123. *
  124. * Return: 0 if successful, or the return value of the failed video::s_stream
  125. * operation otherwise. Stopping the pipeline never fails. The pipeline state is
  126. * not updated when the operation fails.
  127. */
  128. static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
  129. {
  130. int ret = 0;
  131. mutex_lock(&pipe->lock);
  132. if (on) {
  133. if (pipe->stream_count == pipe->num_dmas - 1) {
  134. ret = xvip_pipeline_start_stop(pipe, true);
  135. if (ret < 0)
  136. goto done;
  137. }
  138. pipe->stream_count++;
  139. } else {
  140. if (--pipe->stream_count == 0)
  141. xvip_pipeline_start_stop(pipe, false);
  142. }
  143. done:
  144. mutex_unlock(&pipe->lock);
  145. return ret;
  146. }
  147. static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
  148. struct xvip_dma *start)
  149. {
  150. struct media_pipeline_pad_iter iter;
  151. unsigned int num_inputs = 0;
  152. unsigned int num_outputs = 0;
  153. struct media_pad *pad;
  154. /* Locate the video nodes in the pipeline. */
  155. media_pipeline_for_each_pad(&pipe->pipe, &iter, pad) {
  156. struct xvip_dma *dma;
  157. if (pad->entity->function != MEDIA_ENT_F_IO_V4L)
  158. continue;
  159. dma = to_xvip_dma(media_entity_to_video_device(pad->entity));
  160. if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
  161. pipe->output = dma;
  162. num_outputs++;
  163. } else {
  164. num_inputs++;
  165. }
  166. }
  167. /* We need exactly one output and zero or one input. */
  168. if (num_outputs != 1 || num_inputs > 1)
  169. return -EPIPE;
  170. pipe->num_dmas = num_inputs + num_outputs;
  171. return 0;
  172. }
  173. static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
  174. {
  175. pipe->num_dmas = 0;
  176. pipe->output = NULL;
  177. }
  178. /**
  179. * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
  180. * @pipe: the pipeline
  181. *
  182. * Decrease the pipeline use count and clean it up if we were the last user.
  183. */
  184. static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
  185. {
  186. mutex_lock(&pipe->lock);
  187. /* If we're the last user clean up the pipeline. */
  188. if (--pipe->use_count == 0)
  189. __xvip_pipeline_cleanup(pipe);
  190. mutex_unlock(&pipe->lock);
  191. }
  192. /**
  193. * xvip_pipeline_prepare - Prepare the pipeline for streaming
  194. * @pipe: the pipeline
  195. * @dma: DMA engine at one end of the pipeline
  196. *
  197. * Validate the pipeline if no user exists yet, otherwise just increase the use
  198. * count.
  199. *
  200. * Return: 0 if successful or -EPIPE if the pipeline is not valid.
  201. */
  202. static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
  203. struct xvip_dma *dma)
  204. {
  205. int ret;
  206. mutex_lock(&pipe->lock);
  207. /* If we're the first user validate and initialize the pipeline. */
  208. if (pipe->use_count == 0) {
  209. ret = xvip_pipeline_validate(pipe, dma);
  210. if (ret < 0) {
  211. __xvip_pipeline_cleanup(pipe);
  212. goto done;
  213. }
  214. }
  215. pipe->use_count++;
  216. ret = 0;
  217. done:
  218. mutex_unlock(&pipe->lock);
  219. return ret;
  220. }
  221. /* -----------------------------------------------------------------------------
  222. * videobuf2 queue operations
  223. */
  224. /**
  225. * struct xvip_dma_buffer - Video DMA buffer
  226. * @buf: vb2 buffer base object
  227. * @queue: buffer list entry in the DMA engine queued buffers list
  228. * @dma: DMA channel that uses the buffer
  229. */
  230. struct xvip_dma_buffer {
  231. struct vb2_v4l2_buffer buf;
  232. struct list_head queue;
  233. struct xvip_dma *dma;
  234. };
  235. #define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf)
  236. static void xvip_dma_complete(void *param)
  237. {
  238. struct xvip_dma_buffer *buf = param;
  239. struct xvip_dma *dma = buf->dma;
  240. spin_lock(&dma->queued_lock);
  241. list_del(&buf->queue);
  242. spin_unlock(&dma->queued_lock);
  243. buf->buf.field = V4L2_FIELD_NONE;
  244. buf->buf.sequence = dma->sequence++;
  245. buf->buf.vb2_buf.timestamp = ktime_get_ns();
  246. vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
  247. vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
  248. }
  249. static int
  250. xvip_dma_queue_setup(struct vb2_queue *vq,
  251. unsigned int *nbuffers, unsigned int *nplanes,
  252. unsigned int sizes[], struct device *alloc_devs[])
  253. {
  254. struct xvip_dma *dma = vb2_get_drv_priv(vq);
  255. /* Make sure the image size is large enough. */
  256. if (*nplanes)
  257. return sizes[0] < dma->format.sizeimage ? -EINVAL : 0;
  258. *nplanes = 1;
  259. sizes[0] = dma->format.sizeimage;
  260. return 0;
  261. }
  262. static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
  263. {
  264. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  265. struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
  266. struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
  267. buf->dma = dma;
  268. return 0;
  269. }
  270. static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
  271. {
  272. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  273. struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
  274. struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
  275. struct dma_async_tx_descriptor *desc;
  276. dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
  277. u32 flags;
  278. if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
  279. flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
  280. dma->xt.dir = DMA_DEV_TO_MEM;
  281. dma->xt.src_sgl = false;
  282. dma->xt.dst_sgl = true;
  283. dma->xt.dst_start = addr;
  284. } else {
  285. flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
  286. dma->xt.dir = DMA_MEM_TO_DEV;
  287. dma->xt.src_sgl = true;
  288. dma->xt.dst_sgl = false;
  289. dma->xt.src_start = addr;
  290. }
  291. dma->xt.frame_size = 1;
  292. dma->sgl.size = dma->format.width * dma->fmtinfo->bpp;
  293. dma->sgl.icg = dma->format.bytesperline - dma->sgl.size;
  294. dma->xt.numf = dma->format.height;
  295. desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
  296. if (!desc) {
  297. dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
  298. vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
  299. return;
  300. }
  301. desc->callback = xvip_dma_complete;
  302. desc->callback_param = buf;
  303. spin_lock_irq(&dma->queued_lock);
  304. list_add_tail(&buf->queue, &dma->queued_bufs);
  305. spin_unlock_irq(&dma->queued_lock);
  306. dmaengine_submit(desc);
  307. if (vb2_is_streaming(&dma->queue))
  308. dma_async_issue_pending(dma->dma);
  309. }
  310. static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
  311. {
  312. struct xvip_dma *dma = vb2_get_drv_priv(vq);
  313. struct xvip_dma_buffer *buf, *nbuf;
  314. struct xvip_pipeline *pipe;
  315. int ret;
  316. dma->sequence = 0;
  317. /*
  318. * Start streaming on the pipeline. No link touching an entity in the
  319. * pipeline can be activated or deactivated once streaming is started.
  320. *
  321. * Use the pipeline object embedded in the first DMA object that starts
  322. * streaming.
  323. */
  324. pipe = to_xvip_pipeline(&dma->video) ? : &dma->pipe;
  325. ret = video_device_pipeline_start(&dma->video, &pipe->pipe);
  326. if (ret < 0)
  327. goto error;
  328. /* Verify that the configured format matches the output of the
  329. * connected subdev.
  330. */
  331. ret = xvip_dma_verify_format(dma);
  332. if (ret < 0)
  333. goto error_stop;
  334. ret = xvip_pipeline_prepare(pipe, dma);
  335. if (ret < 0)
  336. goto error_stop;
  337. /* Start the DMA engine. This must be done before starting the blocks
  338. * in the pipeline to avoid DMA synchronization issues.
  339. */
  340. dma_async_issue_pending(dma->dma);
  341. /* Start the pipeline. */
  342. xvip_pipeline_set_stream(pipe, true);
  343. return 0;
  344. error_stop:
  345. video_device_pipeline_stop(&dma->video);
  346. error:
  347. /* Give back all queued buffers to videobuf2. */
  348. spin_lock_irq(&dma->queued_lock);
  349. list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
  350. vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
  351. list_del(&buf->queue);
  352. }
  353. spin_unlock_irq(&dma->queued_lock);
  354. return ret;
  355. }
  356. static void xvip_dma_stop_streaming(struct vb2_queue *vq)
  357. {
  358. struct xvip_dma *dma = vb2_get_drv_priv(vq);
  359. struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video);
  360. struct xvip_dma_buffer *buf, *nbuf;
  361. /* Stop the pipeline. */
  362. xvip_pipeline_set_stream(pipe, false);
  363. /* Stop and reset the DMA engine. */
  364. dmaengine_terminate_all(dma->dma);
  365. /* Cleanup the pipeline and mark it as being stopped. */
  366. xvip_pipeline_cleanup(pipe);
  367. video_device_pipeline_stop(&dma->video);
  368. /* Give back all queued buffers to videobuf2. */
  369. spin_lock_irq(&dma->queued_lock);
  370. list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
  371. vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
  372. list_del(&buf->queue);
  373. }
  374. spin_unlock_irq(&dma->queued_lock);
  375. }
  376. static const struct vb2_ops xvip_dma_queue_qops = {
  377. .queue_setup = xvip_dma_queue_setup,
  378. .buf_prepare = xvip_dma_buffer_prepare,
  379. .buf_queue = xvip_dma_buffer_queue,
  380. .wait_prepare = vb2_ops_wait_prepare,
  381. .wait_finish = vb2_ops_wait_finish,
  382. .start_streaming = xvip_dma_start_streaming,
  383. .stop_streaming = xvip_dma_stop_streaming,
  384. };
  385. /* -----------------------------------------------------------------------------
  386. * V4L2 ioctls
  387. */
  388. static int
  389. xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
  390. {
  391. struct v4l2_fh *vfh = file->private_data;
  392. struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
  393. cap->capabilities = dma->xdev->v4l2_caps | V4L2_CAP_STREAMING |
  394. V4L2_CAP_DEVICE_CAPS;
  395. strscpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
  396. strscpy(cap->card, dma->video.name, sizeof(cap->card));
  397. snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%pOFn:%u",
  398. dma->xdev->dev->of_node, dma->port);
  399. return 0;
  400. }
  401. /* FIXME: without this callback function, some applications are not configured
  402. * with correct formats, and it results in frames in wrong format. Whether this
  403. * callback needs to be required is not clearly defined, so it should be
  404. * clarified through the mailing list.
  405. */
  406. static int
  407. xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
  408. {
  409. struct v4l2_fh *vfh = file->private_data;
  410. struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
  411. if (f->index > 0)
  412. return -EINVAL;
  413. f->pixelformat = dma->format.pixelformat;
  414. return 0;
  415. }
  416. static int
  417. xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
  418. {
  419. struct v4l2_fh *vfh = file->private_data;
  420. struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
  421. format->fmt.pix = dma->format;
  422. return 0;
  423. }
  424. static void
  425. __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
  426. const struct xvip_video_format **fmtinfo)
  427. {
  428. const struct xvip_video_format *info;
  429. unsigned int min_width;
  430. unsigned int max_width;
  431. unsigned int min_bpl;
  432. unsigned int max_bpl;
  433. unsigned int width;
  434. unsigned int align;
  435. unsigned int bpl;
  436. /* Retrieve format information and select the default format if the
  437. * requested format isn't supported.
  438. */
  439. info = xvip_get_format_by_fourcc(pix->pixelformat);
  440. pix->pixelformat = info->fourcc;
  441. pix->field = V4L2_FIELD_NONE;
  442. /* The transfer alignment requirements are expressed in bytes. Compute
  443. * the minimum and maximum values, clamp the requested width and convert
  444. * it back to pixels.
  445. */
  446. align = lcm(dma->align, info->bpp);
  447. min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
  448. max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
  449. width = rounddown(pix->width * info->bpp, align);
  450. pix->width = clamp(width, min_width, max_width) / info->bpp;
  451. pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
  452. XVIP_DMA_MAX_HEIGHT);
  453. /* Clamp the requested bytes per line value. If the maximum bytes per
  454. * line value is zero, the module doesn't support user configurable line
  455. * sizes. Override the requested value with the minimum in that case.
  456. */
  457. min_bpl = pix->width * info->bpp;
  458. max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
  459. bpl = rounddown(pix->bytesperline, dma->align);
  460. pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
  461. pix->sizeimage = pix->bytesperline * pix->height;
  462. if (fmtinfo)
  463. *fmtinfo = info;
  464. }
  465. static int
  466. xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
  467. {
  468. struct v4l2_fh *vfh = file->private_data;
  469. struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
  470. __xvip_dma_try_format(dma, &format->fmt.pix, NULL);
  471. return 0;
  472. }
  473. static int
  474. xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
  475. {
  476. struct v4l2_fh *vfh = file->private_data;
  477. struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
  478. const struct xvip_video_format *info;
  479. __xvip_dma_try_format(dma, &format->fmt.pix, &info);
  480. if (vb2_is_busy(&dma->queue))
  481. return -EBUSY;
  482. dma->format = format->fmt.pix;
  483. dma->fmtinfo = info;
  484. return 0;
  485. }
  486. static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
  487. .vidioc_querycap = xvip_dma_querycap,
  488. .vidioc_enum_fmt_vid_cap = xvip_dma_enum_format,
  489. .vidioc_g_fmt_vid_cap = xvip_dma_get_format,
  490. .vidioc_g_fmt_vid_out = xvip_dma_get_format,
  491. .vidioc_s_fmt_vid_cap = xvip_dma_set_format,
  492. .vidioc_s_fmt_vid_out = xvip_dma_set_format,
  493. .vidioc_try_fmt_vid_cap = xvip_dma_try_format,
  494. .vidioc_try_fmt_vid_out = xvip_dma_try_format,
  495. .vidioc_reqbufs = vb2_ioctl_reqbufs,
  496. .vidioc_querybuf = vb2_ioctl_querybuf,
  497. .vidioc_qbuf = vb2_ioctl_qbuf,
  498. .vidioc_dqbuf = vb2_ioctl_dqbuf,
  499. .vidioc_create_bufs = vb2_ioctl_create_bufs,
  500. .vidioc_expbuf = vb2_ioctl_expbuf,
  501. .vidioc_streamon = vb2_ioctl_streamon,
  502. .vidioc_streamoff = vb2_ioctl_streamoff,
  503. };
  504. /* -----------------------------------------------------------------------------
  505. * V4L2 file operations
  506. */
  507. static const struct v4l2_file_operations xvip_dma_fops = {
  508. .owner = THIS_MODULE,
  509. .unlocked_ioctl = video_ioctl2,
  510. .open = v4l2_fh_open,
  511. .release = vb2_fop_release,
  512. .poll = vb2_fop_poll,
  513. .mmap = vb2_fop_mmap,
  514. };
  515. /* -----------------------------------------------------------------------------
  516. * Xilinx Video DMA Core
  517. */
  518. int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
  519. enum v4l2_buf_type type, unsigned int port)
  520. {
  521. char name[16];
  522. int ret;
  523. dma->xdev = xdev;
  524. dma->port = port;
  525. mutex_init(&dma->lock);
  526. mutex_init(&dma->pipe.lock);
  527. INIT_LIST_HEAD(&dma->queued_bufs);
  528. spin_lock_init(&dma->queued_lock);
  529. dma->fmtinfo = xvip_get_format_by_fourcc(V4L2_PIX_FMT_YUYV);
  530. dma->format.pixelformat = dma->fmtinfo->fourcc;
  531. dma->format.colorspace = V4L2_COLORSPACE_SRGB;
  532. dma->format.field = V4L2_FIELD_NONE;
  533. dma->format.width = XVIP_DMA_DEF_WIDTH;
  534. dma->format.height = XVIP_DMA_DEF_HEIGHT;
  535. dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp;
  536. dma->format.sizeimage = dma->format.bytesperline * dma->format.height;
  537. /* Initialize the media entity... */
  538. dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
  539. ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
  540. ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
  541. if (ret < 0)
  542. goto error;
  543. /* ... and the video node... */
  544. dma->video.fops = &xvip_dma_fops;
  545. dma->video.v4l2_dev = &xdev->v4l2_dev;
  546. dma->video.queue = &dma->queue;
  547. snprintf(dma->video.name, sizeof(dma->video.name), "%pOFn %s %u",
  548. xdev->dev->of_node,
  549. type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input",
  550. port);
  551. dma->video.vfl_type = VFL_TYPE_VIDEO;
  552. dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
  553. ? VFL_DIR_RX : VFL_DIR_TX;
  554. dma->video.release = video_device_release_empty;
  555. dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
  556. dma->video.lock = &dma->lock;
  557. dma->video.device_caps = V4L2_CAP_STREAMING;
  558. if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
  559. dma->video.device_caps |= V4L2_CAP_VIDEO_CAPTURE;
  560. else
  561. dma->video.device_caps |= V4L2_CAP_VIDEO_OUTPUT;
  562. video_set_drvdata(&dma->video, dma);
  563. /* ... and the buffers queue... */
  564. /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
  565. * V4L2 APIs would be inefficient. Testing on the command line with a
  566. * 'cat /dev/video?' thus won't be possible, but given that the driver
  567. * anyway requires a test tool to setup the pipeline before any video
  568. * stream can be started, requiring a specific V4L2 test tool as well
  569. * instead of 'cat' isn't really a drawback.
  570. */
  571. dma->queue.type = type;
  572. dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
  573. dma->queue.lock = &dma->lock;
  574. dma->queue.drv_priv = dma;
  575. dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
  576. dma->queue.ops = &xvip_dma_queue_qops;
  577. dma->queue.mem_ops = &vb2_dma_contig_memops;
  578. dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
  579. | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
  580. dma->queue.dev = dma->xdev->dev;
  581. ret = vb2_queue_init(&dma->queue);
  582. if (ret < 0) {
  583. dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
  584. goto error;
  585. }
  586. /* ... and the DMA channel. */
  587. snprintf(name, sizeof(name), "port%u", port);
  588. dma->dma = dma_request_chan(dma->xdev->dev, name);
  589. if (IS_ERR(dma->dma)) {
  590. ret = dev_err_probe(dma->xdev->dev, PTR_ERR(dma->dma),
  591. "no VDMA channel found\n");
  592. goto error;
  593. }
  594. dma->align = 1 << dma->dma->device->copy_align;
  595. ret = video_register_device(&dma->video, VFL_TYPE_VIDEO, -1);
  596. if (ret < 0) {
  597. dev_err(dma->xdev->dev, "failed to register video device\n");
  598. goto error;
  599. }
  600. return 0;
  601. error:
  602. xvip_dma_cleanup(dma);
  603. return ret;
  604. }
  605. void xvip_dma_cleanup(struct xvip_dma *dma)
  606. {
  607. if (video_is_registered(&dma->video))
  608. video_unregister_device(&dma->video);
  609. if (!IS_ERR_OR_NULL(dma->dma))
  610. dma_release_channel(dma->dma);
  611. media_entity_cleanup(&dma->video.entity);
  612. mutex_destroy(&dma->lock);
  613. mutex_destroy(&dma->pipe.lock);
  614. }