vsp1_video.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * vsp1_video.c -- R-Car VSP1 Video Node
  4. *
  5. * Copyright (C) 2013-2015 Renesas Electronics Corporation
  6. *
  7. * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  8. */
  9. #include <linux/list.h>
  10. #include <linux/module.h>
  11. #include <linux/mutex.h>
  12. #include <linux/slab.h>
  13. #include <linux/v4l2-mediabus.h>
  14. #include <linux/videodev2.h>
  15. #include <linux/wait.h>
  16. #include <media/media-entity.h>
  17. #include <media/v4l2-dev.h>
  18. #include <media/v4l2-fh.h>
  19. #include <media/v4l2-ioctl.h>
  20. #include <media/v4l2-subdev.h>
  21. #include <media/videobuf2-v4l2.h>
  22. #include <media/videobuf2-dma-contig.h>
  23. #include "vsp1.h"
  24. #include "vsp1_brx.h"
  25. #include "vsp1_dl.h"
  26. #include "vsp1_entity.h"
  27. #include "vsp1_hgo.h"
  28. #include "vsp1_hgt.h"
  29. #include "vsp1_pipe.h"
  30. #include "vsp1_rwpf.h"
  31. #include "vsp1_uds.h"
  32. #include "vsp1_video.h"
  33. #define VSP1_VIDEO_DEF_FORMAT V4L2_PIX_FMT_YUYV
  34. #define VSP1_VIDEO_DEF_WIDTH 1024
  35. #define VSP1_VIDEO_DEF_HEIGHT 768
  36. #define VSP1_VIDEO_MIN_WIDTH 2U
  37. #define VSP1_VIDEO_MAX_WIDTH 8190U
  38. #define VSP1_VIDEO_MIN_HEIGHT 2U
  39. #define VSP1_VIDEO_MAX_HEIGHT 8190U
  40. /* -----------------------------------------------------------------------------
  41. * Helper functions
  42. */
  43. static struct v4l2_subdev *
  44. vsp1_video_remote_subdev(struct media_pad *local, u32 *pad)
  45. {
  46. struct media_pad *remote;
  47. remote = media_entity_remote_pad(local);
  48. if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
  49. return NULL;
  50. if (pad)
  51. *pad = remote->index;
  52. return media_entity_to_v4l2_subdev(remote->entity);
  53. }
  54. static int vsp1_video_verify_format(struct vsp1_video *video)
  55. {
  56. struct v4l2_subdev_format fmt;
  57. struct v4l2_subdev *subdev;
  58. int ret;
  59. subdev = vsp1_video_remote_subdev(&video->pad, &fmt.pad);
  60. if (subdev == NULL)
  61. return -EINVAL;
  62. fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  63. ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
  64. if (ret < 0)
  65. return ret == -ENOIOCTLCMD ? -EINVAL : ret;
  66. if (video->rwpf->fmtinfo->mbus != fmt.format.code ||
  67. video->rwpf->format.height != fmt.format.height ||
  68. video->rwpf->format.width != fmt.format.width)
  69. return -EINVAL;
  70. return 0;
  71. }
  72. static int __vsp1_video_try_format(struct vsp1_video *video,
  73. struct v4l2_pix_format_mplane *pix,
  74. const struct vsp1_format_info **fmtinfo)
  75. {
  76. static const u32 xrgb_formats[][2] = {
  77. { V4L2_PIX_FMT_RGB444, V4L2_PIX_FMT_XRGB444 },
  78. { V4L2_PIX_FMT_RGB555, V4L2_PIX_FMT_XRGB555 },
  79. { V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_XBGR32 },
  80. { V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_XRGB32 },
  81. };
  82. const struct vsp1_format_info *info;
  83. unsigned int width = pix->width;
  84. unsigned int height = pix->height;
  85. unsigned int i;
  86. /*
  87. * Backward compatibility: replace deprecated RGB formats by their XRGB
  88. * equivalent. This selects the format older userspace applications want
  89. * while still exposing the new format.
  90. */
  91. for (i = 0; i < ARRAY_SIZE(xrgb_formats); ++i) {
  92. if (xrgb_formats[i][0] == pix->pixelformat) {
  93. pix->pixelformat = xrgb_formats[i][1];
  94. break;
  95. }
  96. }
  97. /*
  98. * Retrieve format information and select the default format if the
  99. * requested format isn't supported.
  100. */
  101. info = vsp1_get_format_info(video->vsp1, pix->pixelformat);
  102. if (info == NULL)
  103. info = vsp1_get_format_info(video->vsp1, VSP1_VIDEO_DEF_FORMAT);
  104. pix->pixelformat = info->fourcc;
  105. pix->colorspace = V4L2_COLORSPACE_SRGB;
  106. pix->field = V4L2_FIELD_NONE;
  107. if (info->fourcc == V4L2_PIX_FMT_HSV24 ||
  108. info->fourcc == V4L2_PIX_FMT_HSV32)
  109. pix->hsv_enc = V4L2_HSV_ENC_256;
  110. memset(pix->reserved, 0, sizeof(pix->reserved));
  111. /* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
  112. width = round_down(width, info->hsub);
  113. height = round_down(height, info->vsub);
  114. /* Clamp the width and height. */
  115. pix->width = clamp(width, VSP1_VIDEO_MIN_WIDTH, VSP1_VIDEO_MAX_WIDTH);
  116. pix->height = clamp(height, VSP1_VIDEO_MIN_HEIGHT,
  117. VSP1_VIDEO_MAX_HEIGHT);
  118. /*
  119. * Compute and clamp the stride and image size. While not documented in
  120. * the datasheet, strides not aligned to a multiple of 128 bytes result
  121. * in image corruption.
  122. */
  123. for (i = 0; i < min(info->planes, 2U); ++i) {
  124. unsigned int hsub = i > 0 ? info->hsub : 1;
  125. unsigned int vsub = i > 0 ? info->vsub : 1;
  126. unsigned int align = 128;
  127. unsigned int bpl;
  128. bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
  129. pix->width / hsub * info->bpp[i] / 8,
  130. round_down(65535U, align));
  131. pix->plane_fmt[i].bytesperline = round_up(bpl, align);
  132. pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
  133. * pix->height / vsub;
  134. }
  135. if (info->planes == 3) {
  136. /* The second and third planes must have the same stride. */
  137. pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
  138. pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
  139. }
  140. pix->num_planes = info->planes;
  141. if (fmtinfo)
  142. *fmtinfo = info;
  143. return 0;
  144. }
  145. /* -----------------------------------------------------------------------------
  146. * VSP1 Partition Algorithm support
  147. */
  148. /**
  149. * vsp1_video_calculate_partition - Calculate the active partition output window
  150. *
  151. * @pipe: the pipeline
  152. * @partition: partition that will hold the calculated values
  153. * @div_size: pre-determined maximum partition division size
  154. * @index: partition index
  155. */
  156. static void vsp1_video_calculate_partition(struct vsp1_pipeline *pipe,
  157. struct vsp1_partition *partition,
  158. unsigned int div_size,
  159. unsigned int index)
  160. {
  161. const struct v4l2_mbus_framefmt *format;
  162. struct vsp1_partition_window window;
  163. unsigned int modulus;
  164. /*
  165. * Partitions are computed on the size before rotation, use the format
  166. * at the WPF sink.
  167. */
  168. format = vsp1_entity_get_pad_format(&pipe->output->entity,
  169. pipe->output->entity.config,
  170. RWPF_PAD_SINK);
  171. /* A single partition simply processes the output size in full. */
  172. if (pipe->partitions <= 1) {
  173. window.left = 0;
  174. window.width = format->width;
  175. vsp1_pipeline_propagate_partition(pipe, partition, index,
  176. &window);
  177. return;
  178. }
  179. /* Initialise the partition with sane starting conditions. */
  180. window.left = index * div_size;
  181. window.width = div_size;
  182. modulus = format->width % div_size;
  183. /*
  184. * We need to prevent the last partition from being smaller than the
  185. * *minimum* width of the hardware capabilities.
  186. *
  187. * If the modulus is less than half of the partition size,
  188. * the penultimate partition is reduced to half, which is added
  189. * to the final partition: |1234|1234|1234|12|341|
  190. * to prevents this: |1234|1234|1234|1234|1|.
  191. */
  192. if (modulus) {
  193. /*
  194. * pipe->partitions is 1 based, whilst index is a 0 based index.
  195. * Normalise this locally.
  196. */
  197. unsigned int partitions = pipe->partitions - 1;
  198. if (modulus < div_size / 2) {
  199. if (index == partitions - 1) {
  200. /* Halve the penultimate partition. */
  201. window.width = div_size / 2;
  202. } else if (index == partitions) {
  203. /* Increase the final partition. */
  204. window.width = (div_size / 2) + modulus;
  205. window.left -= div_size / 2;
  206. }
  207. } else if (index == partitions) {
  208. window.width = modulus;
  209. }
  210. }
  211. vsp1_pipeline_propagate_partition(pipe, partition, index, &window);
  212. }
  213. static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
  214. {
  215. struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
  216. const struct v4l2_mbus_framefmt *format;
  217. struct vsp1_entity *entity;
  218. unsigned int div_size;
  219. unsigned int i;
  220. /*
  221. * Partitions are computed on the size before rotation, use the format
  222. * at the WPF sink.
  223. */
  224. format = vsp1_entity_get_pad_format(&pipe->output->entity,
  225. pipe->output->entity.config,
  226. RWPF_PAD_SINK);
  227. div_size = format->width;
  228. /*
  229. * Only Gen3 hardware requires image partitioning, Gen2 will operate
  230. * with a single partition that covers the whole output.
  231. */
  232. if (vsp1->info->gen == 3) {
  233. list_for_each_entry(entity, &pipe->entities, list_pipe) {
  234. unsigned int entity_max;
  235. if (!entity->ops->max_width)
  236. continue;
  237. entity_max = entity->ops->max_width(entity, pipe);
  238. if (entity_max)
  239. div_size = min(div_size, entity_max);
  240. }
  241. }
  242. pipe->partitions = DIV_ROUND_UP(format->width, div_size);
  243. pipe->part_table = kcalloc(pipe->partitions, sizeof(*pipe->part_table),
  244. GFP_KERNEL);
  245. if (!pipe->part_table)
  246. return -ENOMEM;
  247. for (i = 0; i < pipe->partitions; ++i)
  248. vsp1_video_calculate_partition(pipe, &pipe->part_table[i],
  249. div_size, i);
  250. return 0;
  251. }
  252. /* -----------------------------------------------------------------------------
  253. * Pipeline Management
  254. */
  255. /*
  256. * vsp1_video_complete_buffer - Complete the current buffer
  257. * @video: the video node
  258. *
  259. * This function completes the current buffer by filling its sequence number,
  260. * time stamp and payload size, and hands it back to the videobuf core.
  261. *
  262. * When operating in DU output mode (deep pipeline to the DU through the LIF),
  263. * the VSP1 needs to constantly supply frames to the display. In that case, if
  264. * no other buffer is queued, reuse the one that has just been processed instead
  265. * of handing it back to the videobuf core.
  266. *
  267. * Return the next queued buffer or NULL if the queue is empty.
  268. */
  269. static struct vsp1_vb2_buffer *
  270. vsp1_video_complete_buffer(struct vsp1_video *video)
  271. {
  272. struct vsp1_pipeline *pipe = video->rwpf->entity.pipe;
  273. struct vsp1_vb2_buffer *next = NULL;
  274. struct vsp1_vb2_buffer *done;
  275. unsigned long flags;
  276. unsigned int i;
  277. spin_lock_irqsave(&video->irqlock, flags);
  278. if (list_empty(&video->irqqueue)) {
  279. spin_unlock_irqrestore(&video->irqlock, flags);
  280. return NULL;
  281. }
  282. done = list_first_entry(&video->irqqueue,
  283. struct vsp1_vb2_buffer, queue);
  284. /* In DU output mode reuse the buffer if the list is singular. */
  285. if (pipe->lif && list_is_singular(&video->irqqueue)) {
  286. spin_unlock_irqrestore(&video->irqlock, flags);
  287. return done;
  288. }
  289. list_del(&done->queue);
  290. if (!list_empty(&video->irqqueue))
  291. next = list_first_entry(&video->irqqueue,
  292. struct vsp1_vb2_buffer, queue);
  293. spin_unlock_irqrestore(&video->irqlock, flags);
  294. done->buf.sequence = pipe->sequence;
  295. done->buf.vb2_buf.timestamp = ktime_get_ns();
  296. for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
  297. vb2_set_plane_payload(&done->buf.vb2_buf, i,
  298. vb2_plane_size(&done->buf.vb2_buf, i));
  299. vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
  300. return next;
  301. }
  302. static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
  303. struct vsp1_rwpf *rwpf)
  304. {
  305. struct vsp1_video *video = rwpf->video;
  306. struct vsp1_vb2_buffer *buf;
  307. buf = vsp1_video_complete_buffer(video);
  308. if (buf == NULL)
  309. return;
  310. video->rwpf->mem = buf->mem;
  311. pipe->buffers_ready |= 1 << video->pipe_index;
  312. }
  313. static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline *pipe,
  314. struct vsp1_dl_list *dl,
  315. unsigned int partition)
  316. {
  317. struct vsp1_dl_body *dlb = vsp1_dl_list_get_body0(dl);
  318. struct vsp1_entity *entity;
  319. pipe->partition = &pipe->part_table[partition];
  320. list_for_each_entry(entity, &pipe->entities, list_pipe)
  321. vsp1_entity_configure_partition(entity, pipe, dl, dlb);
  322. }
  323. static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
  324. {
  325. struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
  326. struct vsp1_entity *entity;
  327. struct vsp1_dl_body *dlb;
  328. struct vsp1_dl_list *dl;
  329. unsigned int partition;
  330. dl = vsp1_dl_list_get(pipe->output->dlm);
  331. /*
  332. * If the VSP hardware isn't configured yet (which occurs either when
  333. * processing the first frame or after a system suspend/resume), add the
  334. * cached stream configuration to the display list to perform a full
  335. * initialisation.
  336. */
  337. if (!pipe->configured)
  338. vsp1_dl_list_add_body(dl, pipe->stream_config);
  339. dlb = vsp1_dl_list_get_body0(dl);
  340. list_for_each_entry(entity, &pipe->entities, list_pipe)
  341. vsp1_entity_configure_frame(entity, pipe, dl, dlb);
  342. /* Run the first partition. */
  343. vsp1_video_pipeline_run_partition(pipe, dl, 0);
  344. /* Process consecutive partitions as necessary. */
  345. for (partition = 1; partition < pipe->partitions; ++partition) {
  346. struct vsp1_dl_list *dl_next;
  347. dl_next = vsp1_dl_list_get(pipe->output->dlm);
  348. /*
  349. * An incomplete chain will still function, but output only
  350. * the partitions that had a dl available. The frame end
  351. * interrupt will be marked on the last dl in the chain.
  352. */
  353. if (!dl_next) {
  354. dev_err(vsp1->dev, "Failed to obtain a dl list. Frame will be incomplete\n");
  355. break;
  356. }
  357. vsp1_video_pipeline_run_partition(pipe, dl_next, partition);
  358. vsp1_dl_list_add_chain(dl, dl_next);
  359. }
  360. /* Complete, and commit the head display list. */
  361. vsp1_dl_list_commit(dl, false);
  362. pipe->configured = true;
  363. vsp1_pipeline_run(pipe);
  364. }
  365. static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe,
  366. unsigned int completion)
  367. {
  368. struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
  369. enum vsp1_pipeline_state state;
  370. unsigned long flags;
  371. unsigned int i;
  372. /* M2M Pipelines should never call here with an incomplete frame. */
  373. WARN_ON_ONCE(!(completion & VSP1_DL_FRAME_END_COMPLETED));
  374. spin_lock_irqsave(&pipe->irqlock, flags);
  375. /* Complete buffers on all video nodes. */
  376. for (i = 0; i < vsp1->info->rpf_count; ++i) {
  377. if (!pipe->inputs[i])
  378. continue;
  379. vsp1_video_frame_end(pipe, pipe->inputs[i]);
  380. }
  381. vsp1_video_frame_end(pipe, pipe->output);
  382. state = pipe->state;
  383. pipe->state = VSP1_PIPELINE_STOPPED;
  384. /*
  385. * If a stop has been requested, mark the pipeline as stopped and
  386. * return. Otherwise restart the pipeline if ready.
  387. */
  388. if (state == VSP1_PIPELINE_STOPPING)
  389. wake_up(&pipe->wq);
  390. else if (vsp1_pipeline_ready(pipe))
  391. vsp1_video_pipeline_run(pipe);
  392. spin_unlock_irqrestore(&pipe->irqlock, flags);
  393. }
  394. static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
  395. struct vsp1_rwpf *input,
  396. struct vsp1_rwpf *output)
  397. {
  398. struct media_entity_enum ent_enum;
  399. struct vsp1_entity *entity;
  400. struct media_pad *pad;
  401. struct vsp1_brx *brx = NULL;
  402. int ret;
  403. ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev);
  404. if (ret < 0)
  405. return ret;
  406. /*
  407. * The main data path doesn't include the HGO or HGT, use
  408. * vsp1_entity_remote_pad() to traverse the graph.
  409. */
  410. pad = vsp1_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]);
  411. while (1) {
  412. if (pad == NULL) {
  413. ret = -EPIPE;
  414. goto out;
  415. }
  416. /* We've reached a video node, that shouldn't have happened. */
  417. if (!is_media_entity_v4l2_subdev(pad->entity)) {
  418. ret = -EPIPE;
  419. goto out;
  420. }
  421. entity = to_vsp1_entity(
  422. media_entity_to_v4l2_subdev(pad->entity));
  423. /*
  424. * A BRU or BRS is present in the pipeline, store its input pad
  425. * number in the input RPF for use when configuring the RPF.
  426. */
  427. if (entity->type == VSP1_ENTITY_BRU ||
  428. entity->type == VSP1_ENTITY_BRS) {
  429. /* BRU and BRS can't be chained. */
  430. if (brx) {
  431. ret = -EPIPE;
  432. goto out;
  433. }
  434. brx = to_brx(&entity->subdev);
  435. brx->inputs[pad->index].rpf = input;
  436. input->brx_input = pad->index;
  437. }
  438. /* We've reached the WPF, we're done. */
  439. if (entity->type == VSP1_ENTITY_WPF)
  440. break;
  441. /* Ensure the branch has no loop. */
  442. if (media_entity_enum_test_and_set(&ent_enum,
  443. &entity->subdev.entity)) {
  444. ret = -EPIPE;
  445. goto out;
  446. }
  447. /* UDS can't be chained. */
  448. if (entity->type == VSP1_ENTITY_UDS) {
  449. if (pipe->uds) {
  450. ret = -EPIPE;
  451. goto out;
  452. }
  453. pipe->uds = entity;
  454. pipe->uds_input = brx ? &brx->entity : &input->entity;
  455. }
  456. /* Follow the source link, ignoring any HGO or HGT. */
  457. pad = &entity->pads[entity->source_pad];
  458. pad = vsp1_entity_remote_pad(pad);
  459. }
  460. /* The last entity must be the output WPF. */
  461. if (entity != &output->entity)
  462. ret = -EPIPE;
  463. out:
  464. media_entity_enum_cleanup(&ent_enum);
  465. return ret;
  466. }
  467. static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
  468. struct vsp1_video *video)
  469. {
  470. struct media_graph graph;
  471. struct media_entity *entity = &video->video.entity;
  472. struct media_device *mdev = entity->graph_obj.mdev;
  473. unsigned int i;
  474. int ret;
  475. /* Walk the graph to locate the entities and video nodes. */
  476. ret = media_graph_walk_init(&graph, mdev);
  477. if (ret)
  478. return ret;
  479. media_graph_walk_start(&graph, entity);
  480. while ((entity = media_graph_walk_next(&graph))) {
  481. struct v4l2_subdev *subdev;
  482. struct vsp1_rwpf *rwpf;
  483. struct vsp1_entity *e;
  484. if (!is_media_entity_v4l2_subdev(entity))
  485. continue;
  486. subdev = media_entity_to_v4l2_subdev(entity);
  487. e = to_vsp1_entity(subdev);
  488. list_add_tail(&e->list_pipe, &pipe->entities);
  489. e->pipe = pipe;
  490. switch (e->type) {
  491. case VSP1_ENTITY_RPF:
  492. rwpf = to_rwpf(subdev);
  493. pipe->inputs[rwpf->entity.index] = rwpf;
  494. rwpf->video->pipe_index = ++pipe->num_inputs;
  495. break;
  496. case VSP1_ENTITY_WPF:
  497. rwpf = to_rwpf(subdev);
  498. pipe->output = rwpf;
  499. rwpf->video->pipe_index = 0;
  500. break;
  501. case VSP1_ENTITY_LIF:
  502. pipe->lif = e;
  503. break;
  504. case VSP1_ENTITY_BRU:
  505. case VSP1_ENTITY_BRS:
  506. pipe->brx = e;
  507. break;
  508. case VSP1_ENTITY_HGO:
  509. pipe->hgo = e;
  510. break;
  511. case VSP1_ENTITY_HGT:
  512. pipe->hgt = e;
  513. break;
  514. default:
  515. break;
  516. }
  517. }
  518. media_graph_walk_cleanup(&graph);
  519. /* We need one output and at least one input. */
  520. if (pipe->num_inputs == 0 || !pipe->output)
  521. return -EPIPE;
  522. /*
  523. * Follow links downstream for each input and make sure the graph
  524. * contains no loop and that all branches end at the output WPF.
  525. */
  526. for (i = 0; i < video->vsp1->info->rpf_count; ++i) {
  527. if (!pipe->inputs[i])
  528. continue;
  529. ret = vsp1_video_pipeline_build_branch(pipe, pipe->inputs[i],
  530. pipe->output);
  531. if (ret < 0)
  532. return ret;
  533. }
  534. return 0;
  535. }
  536. static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe,
  537. struct vsp1_video *video)
  538. {
  539. vsp1_pipeline_init(pipe);
  540. pipe->frame_end = vsp1_video_pipeline_frame_end;
  541. return vsp1_video_pipeline_build(pipe, video);
  542. }
  543. static struct vsp1_pipeline *vsp1_video_pipeline_get(struct vsp1_video *video)
  544. {
  545. struct vsp1_pipeline *pipe;
  546. int ret;
  547. /*
  548. * Get a pipeline object for the video node. If a pipeline has already
  549. * been allocated just increment its reference count and return it.
  550. * Otherwise allocate a new pipeline and initialize it, it will be freed
  551. * when the last reference is released.
  552. */
  553. if (!video->rwpf->entity.pipe) {
  554. pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
  555. if (!pipe)
  556. return ERR_PTR(-ENOMEM);
  557. ret = vsp1_video_pipeline_init(pipe, video);
  558. if (ret < 0) {
  559. vsp1_pipeline_reset(pipe);
  560. kfree(pipe);
  561. return ERR_PTR(ret);
  562. }
  563. } else {
  564. pipe = video->rwpf->entity.pipe;
  565. kref_get(&pipe->kref);
  566. }
  567. return pipe;
  568. }
  569. static void vsp1_video_pipeline_release(struct kref *kref)
  570. {
  571. struct vsp1_pipeline *pipe = container_of(kref, typeof(*pipe), kref);
  572. vsp1_pipeline_reset(pipe);
  573. kfree(pipe);
  574. }
  575. static void vsp1_video_pipeline_put(struct vsp1_pipeline *pipe)
  576. {
  577. struct media_device *mdev = &pipe->output->entity.vsp1->media_dev;
  578. mutex_lock(&mdev->graph_mutex);
  579. kref_put(&pipe->kref, vsp1_video_pipeline_release);
  580. mutex_unlock(&mdev->graph_mutex);
  581. }
  582. /* -----------------------------------------------------------------------------
  583. * videobuf2 Queue Operations
  584. */
  585. static int
  586. vsp1_video_queue_setup(struct vb2_queue *vq,
  587. unsigned int *nbuffers, unsigned int *nplanes,
  588. unsigned int sizes[], struct device *alloc_devs[])
  589. {
  590. struct vsp1_video *video = vb2_get_drv_priv(vq);
  591. const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
  592. unsigned int i;
  593. if (*nplanes) {
  594. if (*nplanes != format->num_planes)
  595. return -EINVAL;
  596. for (i = 0; i < *nplanes; i++)
  597. if (sizes[i] < format->plane_fmt[i].sizeimage)
  598. return -EINVAL;
  599. return 0;
  600. }
  601. *nplanes = format->num_planes;
  602. for (i = 0; i < format->num_planes; ++i)
  603. sizes[i] = format->plane_fmt[i].sizeimage;
  604. return 0;
  605. }
  606. static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
  607. {
  608. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  609. struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
  610. struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
  611. const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
  612. unsigned int i;
  613. if (vb->num_planes < format->num_planes)
  614. return -EINVAL;
  615. for (i = 0; i < vb->num_planes; ++i) {
  616. buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
  617. if (vb2_plane_size(vb, i) < format->plane_fmt[i].sizeimage)
  618. return -EINVAL;
  619. }
  620. for ( ; i < 3; ++i)
  621. buf->mem.addr[i] = 0;
  622. return 0;
  623. }
  624. static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
  625. {
  626. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  627. struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
  628. struct vsp1_pipeline *pipe = video->rwpf->entity.pipe;
  629. struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
  630. unsigned long flags;
  631. bool empty;
  632. spin_lock_irqsave(&video->irqlock, flags);
  633. empty = list_empty(&video->irqqueue);
  634. list_add_tail(&buf->queue, &video->irqqueue);
  635. spin_unlock_irqrestore(&video->irqlock, flags);
  636. if (!empty)
  637. return;
  638. spin_lock_irqsave(&pipe->irqlock, flags);
  639. video->rwpf->mem = buf->mem;
  640. pipe->buffers_ready |= 1 << video->pipe_index;
  641. if (vb2_is_streaming(&video->queue) &&
  642. vsp1_pipeline_ready(pipe))
  643. vsp1_video_pipeline_run(pipe);
  644. spin_unlock_irqrestore(&pipe->irqlock, flags);
  645. }
  646. static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
  647. {
  648. struct vsp1_entity *entity;
  649. int ret;
  650. /* Determine this pipelines sizes for image partitioning support. */
  651. ret = vsp1_video_pipeline_setup_partitions(pipe);
  652. if (ret < 0)
  653. return ret;
  654. if (pipe->uds) {
  655. struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
  656. /*
  657. * If a BRU or BRS is present in the pipeline before the UDS,
  658. * the alpha component doesn't need to be scaled as the BRU and
  659. * BRS output alpha value is fixed to 255. Otherwise we need to
  660. * scale the alpha component only when available at the input
  661. * RPF.
  662. */
  663. if (pipe->uds_input->type == VSP1_ENTITY_BRU ||
  664. pipe->uds_input->type == VSP1_ENTITY_BRS) {
  665. uds->scale_alpha = false;
  666. } else {
  667. struct vsp1_rwpf *rpf =
  668. to_rwpf(&pipe->uds_input->subdev);
  669. uds->scale_alpha = rpf->fmtinfo->alpha;
  670. }
  671. }
  672. /*
  673. * Compute and cache the stream configuration into a body. The cached
  674. * body will be added to the display list by vsp1_video_pipeline_run()
  675. * whenever the pipeline needs to be fully reconfigured.
  676. */
  677. pipe->stream_config = vsp1_dlm_dl_body_get(pipe->output->dlm);
  678. if (!pipe->stream_config)
  679. return -ENOMEM;
  680. list_for_each_entry(entity, &pipe->entities, list_pipe) {
  681. vsp1_entity_route_setup(entity, pipe, pipe->stream_config);
  682. vsp1_entity_configure_stream(entity, pipe, pipe->stream_config);
  683. }
  684. return 0;
  685. }
  686. static void vsp1_video_release_buffers(struct vsp1_video *video)
  687. {
  688. struct vsp1_vb2_buffer *buffer;
  689. unsigned long flags;
  690. /* Remove all buffers from the IRQ queue. */
  691. spin_lock_irqsave(&video->irqlock, flags);
  692. list_for_each_entry(buffer, &video->irqqueue, queue)
  693. vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
  694. INIT_LIST_HEAD(&video->irqqueue);
  695. spin_unlock_irqrestore(&video->irqlock, flags);
  696. }
  697. static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
  698. {
  699. lockdep_assert_held(&pipe->lock);
  700. /* Release any cached configuration from our output video. */
  701. vsp1_dl_body_put(pipe->stream_config);
  702. pipe->stream_config = NULL;
  703. pipe->configured = false;
  704. /* Release our partition table allocation */
  705. kfree(pipe->part_table);
  706. pipe->part_table = NULL;
  707. }
  708. static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
  709. {
  710. struct vsp1_video *video = vb2_get_drv_priv(vq);
  711. struct vsp1_pipeline *pipe = video->rwpf->entity.pipe;
  712. bool start_pipeline = false;
  713. unsigned long flags;
  714. int ret;
  715. mutex_lock(&pipe->lock);
  716. if (pipe->stream_count == pipe->num_inputs) {
  717. ret = vsp1_video_setup_pipeline(pipe);
  718. if (ret < 0) {
  719. vsp1_video_release_buffers(video);
  720. vsp1_video_cleanup_pipeline(pipe);
  721. mutex_unlock(&pipe->lock);
  722. return ret;
  723. }
  724. start_pipeline = true;
  725. }
  726. pipe->stream_count++;
  727. mutex_unlock(&pipe->lock);
  728. /*
  729. * vsp1_pipeline_ready() is not sufficient to establish that all streams
  730. * are prepared and the pipeline is configured, as multiple streams
  731. * can race through streamon with buffers already queued; Therefore we
  732. * don't even attempt to start the pipeline until the last stream has
  733. * called through here.
  734. */
  735. if (!start_pipeline)
  736. return 0;
  737. spin_lock_irqsave(&pipe->irqlock, flags);
  738. if (vsp1_pipeline_ready(pipe))
  739. vsp1_video_pipeline_run(pipe);
  740. spin_unlock_irqrestore(&pipe->irqlock, flags);
  741. return 0;
  742. }
  743. static void vsp1_video_stop_streaming(struct vb2_queue *vq)
  744. {
  745. struct vsp1_video *video = vb2_get_drv_priv(vq);
  746. struct vsp1_pipeline *pipe = video->rwpf->entity.pipe;
  747. unsigned long flags;
  748. int ret;
  749. /*
  750. * Clear the buffers ready flag to make sure the device won't be started
  751. * by a QBUF on the video node on the other side of the pipeline.
  752. */
  753. spin_lock_irqsave(&video->irqlock, flags);
  754. pipe->buffers_ready &= ~(1 << video->pipe_index);
  755. spin_unlock_irqrestore(&video->irqlock, flags);
  756. mutex_lock(&pipe->lock);
  757. if (--pipe->stream_count == pipe->num_inputs) {
  758. /* Stop the pipeline. */
  759. ret = vsp1_pipeline_stop(pipe);
  760. if (ret == -ETIMEDOUT)
  761. dev_err(video->vsp1->dev, "pipeline stop timeout\n");
  762. vsp1_video_cleanup_pipeline(pipe);
  763. }
  764. mutex_unlock(&pipe->lock);
  765. media_pipeline_stop(&video->video.entity);
  766. vsp1_video_release_buffers(video);
  767. vsp1_video_pipeline_put(pipe);
  768. }
  769. static const struct vb2_ops vsp1_video_queue_qops = {
  770. .queue_setup = vsp1_video_queue_setup,
  771. .buf_prepare = vsp1_video_buffer_prepare,
  772. .buf_queue = vsp1_video_buffer_queue,
  773. .wait_prepare = vb2_ops_wait_prepare,
  774. .wait_finish = vb2_ops_wait_finish,
  775. .start_streaming = vsp1_video_start_streaming,
  776. .stop_streaming = vsp1_video_stop_streaming,
  777. };
  778. /* -----------------------------------------------------------------------------
  779. * V4L2 ioctls
  780. */
  781. static int
  782. vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
  783. {
  784. struct v4l2_fh *vfh = file->private_data;
  785. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  786. cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
  787. | V4L2_CAP_VIDEO_CAPTURE_MPLANE
  788. | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
  789. if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
  790. cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE
  791. | V4L2_CAP_STREAMING;
  792. else
  793. cap->device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE
  794. | V4L2_CAP_STREAMING;
  795. strlcpy(cap->driver, "vsp1", sizeof(cap->driver));
  796. strlcpy(cap->card, video->video.name, sizeof(cap->card));
  797. snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
  798. dev_name(video->vsp1->dev));
  799. return 0;
  800. }
  801. static int
  802. vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
  803. {
  804. struct v4l2_fh *vfh = file->private_data;
  805. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  806. if (format->type != video->queue.type)
  807. return -EINVAL;
  808. mutex_lock(&video->lock);
  809. format->fmt.pix_mp = video->rwpf->format;
  810. mutex_unlock(&video->lock);
  811. return 0;
  812. }
  813. static int
  814. vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
  815. {
  816. struct v4l2_fh *vfh = file->private_data;
  817. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  818. if (format->type != video->queue.type)
  819. return -EINVAL;
  820. return __vsp1_video_try_format(video, &format->fmt.pix_mp, NULL);
  821. }
  822. static int
  823. vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
  824. {
  825. struct v4l2_fh *vfh = file->private_data;
  826. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  827. const struct vsp1_format_info *info;
  828. int ret;
  829. if (format->type != video->queue.type)
  830. return -EINVAL;
  831. ret = __vsp1_video_try_format(video, &format->fmt.pix_mp, &info);
  832. if (ret < 0)
  833. return ret;
  834. mutex_lock(&video->lock);
  835. if (vb2_is_busy(&video->queue)) {
  836. ret = -EBUSY;
  837. goto done;
  838. }
  839. video->rwpf->format = format->fmt.pix_mp;
  840. video->rwpf->fmtinfo = info;
  841. done:
  842. mutex_unlock(&video->lock);
  843. return ret;
  844. }
  845. static int
  846. vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
  847. {
  848. struct v4l2_fh *vfh = file->private_data;
  849. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  850. struct media_device *mdev = &video->vsp1->media_dev;
  851. struct vsp1_pipeline *pipe;
  852. int ret;
  853. if (video->queue.owner && video->queue.owner != file->private_data)
  854. return -EBUSY;
  855. /*
  856. * Get a pipeline for the video node and start streaming on it. No link
  857. * touching an entity in the pipeline can be activated or deactivated
  858. * once streaming is started.
  859. */
  860. mutex_lock(&mdev->graph_mutex);
  861. pipe = vsp1_video_pipeline_get(video);
  862. if (IS_ERR(pipe)) {
  863. mutex_unlock(&mdev->graph_mutex);
  864. return PTR_ERR(pipe);
  865. }
  866. ret = __media_pipeline_start(&video->video.entity, &pipe->pipe);
  867. if (ret < 0) {
  868. mutex_unlock(&mdev->graph_mutex);
  869. goto err_pipe;
  870. }
  871. mutex_unlock(&mdev->graph_mutex);
  872. /*
  873. * Verify that the configured format matches the output of the connected
  874. * subdev.
  875. */
  876. ret = vsp1_video_verify_format(video);
  877. if (ret < 0)
  878. goto err_stop;
  879. /* Start the queue. */
  880. ret = vb2_streamon(&video->queue, type);
  881. if (ret < 0)
  882. goto err_stop;
  883. return 0;
  884. err_stop:
  885. media_pipeline_stop(&video->video.entity);
  886. err_pipe:
  887. vsp1_video_pipeline_put(pipe);
  888. return ret;
  889. }
  890. static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops = {
  891. .vidioc_querycap = vsp1_video_querycap,
  892. .vidioc_g_fmt_vid_cap_mplane = vsp1_video_get_format,
  893. .vidioc_s_fmt_vid_cap_mplane = vsp1_video_set_format,
  894. .vidioc_try_fmt_vid_cap_mplane = vsp1_video_try_format,
  895. .vidioc_g_fmt_vid_out_mplane = vsp1_video_get_format,
  896. .vidioc_s_fmt_vid_out_mplane = vsp1_video_set_format,
  897. .vidioc_try_fmt_vid_out_mplane = vsp1_video_try_format,
  898. .vidioc_reqbufs = vb2_ioctl_reqbufs,
  899. .vidioc_querybuf = vb2_ioctl_querybuf,
  900. .vidioc_qbuf = vb2_ioctl_qbuf,
  901. .vidioc_dqbuf = vb2_ioctl_dqbuf,
  902. .vidioc_expbuf = vb2_ioctl_expbuf,
  903. .vidioc_create_bufs = vb2_ioctl_create_bufs,
  904. .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
  905. .vidioc_streamon = vsp1_video_streamon,
  906. .vidioc_streamoff = vb2_ioctl_streamoff,
  907. };
  908. /* -----------------------------------------------------------------------------
  909. * V4L2 File Operations
  910. */
  911. static int vsp1_video_open(struct file *file)
  912. {
  913. struct vsp1_video *video = video_drvdata(file);
  914. struct v4l2_fh *vfh;
  915. int ret = 0;
  916. vfh = kzalloc(sizeof(*vfh), GFP_KERNEL);
  917. if (vfh == NULL)
  918. return -ENOMEM;
  919. v4l2_fh_init(vfh, &video->video);
  920. v4l2_fh_add(vfh);
  921. file->private_data = vfh;
  922. ret = vsp1_device_get(video->vsp1);
  923. if (ret < 0) {
  924. v4l2_fh_del(vfh);
  925. v4l2_fh_exit(vfh);
  926. kfree(vfh);
  927. }
  928. return ret;
  929. }
  930. static int vsp1_video_release(struct file *file)
  931. {
  932. struct vsp1_video *video = video_drvdata(file);
  933. struct v4l2_fh *vfh = file->private_data;
  934. mutex_lock(&video->lock);
  935. if (video->queue.owner == vfh) {
  936. vb2_queue_release(&video->queue);
  937. video->queue.owner = NULL;
  938. }
  939. mutex_unlock(&video->lock);
  940. vsp1_device_put(video->vsp1);
  941. v4l2_fh_release(file);
  942. file->private_data = NULL;
  943. return 0;
  944. }
  945. static const struct v4l2_file_operations vsp1_video_fops = {
  946. .owner = THIS_MODULE,
  947. .unlocked_ioctl = video_ioctl2,
  948. .open = vsp1_video_open,
  949. .release = vsp1_video_release,
  950. .poll = vb2_fop_poll,
  951. .mmap = vb2_fop_mmap,
  952. };
  953. /* -----------------------------------------------------------------------------
  954. * Suspend and Resume
  955. */
  956. void vsp1_video_suspend(struct vsp1_device *vsp1)
  957. {
  958. unsigned long flags;
  959. unsigned int i;
  960. int ret;
  961. /*
  962. * To avoid increasing the system suspend time needlessly, loop over the
  963. * pipelines twice, first to set them all to the stopping state, and
  964. * then to wait for the stop to complete.
  965. */
  966. for (i = 0; i < vsp1->info->wpf_count; ++i) {
  967. struct vsp1_rwpf *wpf = vsp1->wpf[i];
  968. struct vsp1_pipeline *pipe;
  969. if (wpf == NULL)
  970. continue;
  971. pipe = wpf->entity.pipe;
  972. if (pipe == NULL)
  973. continue;
  974. spin_lock_irqsave(&pipe->irqlock, flags);
  975. if (pipe->state == VSP1_PIPELINE_RUNNING)
  976. pipe->state = VSP1_PIPELINE_STOPPING;
  977. spin_unlock_irqrestore(&pipe->irqlock, flags);
  978. }
  979. for (i = 0; i < vsp1->info->wpf_count; ++i) {
  980. struct vsp1_rwpf *wpf = vsp1->wpf[i];
  981. struct vsp1_pipeline *pipe;
  982. if (wpf == NULL)
  983. continue;
  984. pipe = wpf->entity.pipe;
  985. if (pipe == NULL)
  986. continue;
  987. ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe),
  988. msecs_to_jiffies(500));
  989. if (ret == 0)
  990. dev_warn(vsp1->dev, "pipeline %u stop timeout\n",
  991. wpf->entity.index);
  992. }
  993. }
  994. void vsp1_video_resume(struct vsp1_device *vsp1)
  995. {
  996. unsigned long flags;
  997. unsigned int i;
  998. /* Resume all running pipelines. */
  999. for (i = 0; i < vsp1->info->wpf_count; ++i) {
  1000. struct vsp1_rwpf *wpf = vsp1->wpf[i];
  1001. struct vsp1_pipeline *pipe;
  1002. if (wpf == NULL)
  1003. continue;
  1004. pipe = wpf->entity.pipe;
  1005. if (pipe == NULL)
  1006. continue;
  1007. /*
  1008. * The hardware may have been reset during a suspend and will
  1009. * need a full reconfiguration.
  1010. */
  1011. pipe->configured = false;
  1012. spin_lock_irqsave(&pipe->irqlock, flags);
  1013. if (vsp1_pipeline_ready(pipe))
  1014. vsp1_video_pipeline_run(pipe);
  1015. spin_unlock_irqrestore(&pipe->irqlock, flags);
  1016. }
  1017. }
  1018. /* -----------------------------------------------------------------------------
  1019. * Initialization and Cleanup
  1020. */
  1021. struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
  1022. struct vsp1_rwpf *rwpf)
  1023. {
  1024. struct vsp1_video *video;
  1025. const char *direction;
  1026. int ret;
  1027. video = devm_kzalloc(vsp1->dev, sizeof(*video), GFP_KERNEL);
  1028. if (!video)
  1029. return ERR_PTR(-ENOMEM);
  1030. rwpf->video = video;
  1031. video->vsp1 = vsp1;
  1032. video->rwpf = rwpf;
  1033. if (rwpf->entity.type == VSP1_ENTITY_RPF) {
  1034. direction = "input";
  1035. video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
  1036. video->pad.flags = MEDIA_PAD_FL_SOURCE;
  1037. video->video.vfl_dir = VFL_DIR_TX;
  1038. } else {
  1039. direction = "output";
  1040. video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
  1041. video->pad.flags = MEDIA_PAD_FL_SINK;
  1042. video->video.vfl_dir = VFL_DIR_RX;
  1043. }
  1044. mutex_init(&video->lock);
  1045. spin_lock_init(&video->irqlock);
  1046. INIT_LIST_HEAD(&video->irqqueue);
  1047. /* Initialize the media entity... */
  1048. ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
  1049. if (ret < 0)
  1050. return ERR_PTR(ret);
  1051. /* ... and the format ... */
  1052. rwpf->format.pixelformat = VSP1_VIDEO_DEF_FORMAT;
  1053. rwpf->format.width = VSP1_VIDEO_DEF_WIDTH;
  1054. rwpf->format.height = VSP1_VIDEO_DEF_HEIGHT;
  1055. __vsp1_video_try_format(video, &rwpf->format, &rwpf->fmtinfo);
  1056. /* ... and the video node... */
  1057. video->video.v4l2_dev = &video->vsp1->v4l2_dev;
  1058. video->video.fops = &vsp1_video_fops;
  1059. snprintf(video->video.name, sizeof(video->video.name), "%s %s",
  1060. rwpf->entity.subdev.name, direction);
  1061. video->video.vfl_type = VFL_TYPE_GRABBER;
  1062. video->video.release = video_device_release_empty;
  1063. video->video.ioctl_ops = &vsp1_video_ioctl_ops;
  1064. video_set_drvdata(&video->video, video);
  1065. video->queue.type = video->type;
  1066. video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
  1067. video->queue.lock = &video->lock;
  1068. video->queue.drv_priv = video;
  1069. video->queue.buf_struct_size = sizeof(struct vsp1_vb2_buffer);
  1070. video->queue.ops = &vsp1_video_queue_qops;
  1071. video->queue.mem_ops = &vb2_dma_contig_memops;
  1072. video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  1073. video->queue.dev = video->vsp1->bus_master;
  1074. ret = vb2_queue_init(&video->queue);
  1075. if (ret < 0) {
  1076. dev_err(video->vsp1->dev, "failed to initialize vb2 queue\n");
  1077. goto error;
  1078. }
  1079. /* ... and register the video device. */
  1080. video->video.queue = &video->queue;
  1081. ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
  1082. if (ret < 0) {
  1083. dev_err(video->vsp1->dev, "failed to register video device\n");
  1084. goto error;
  1085. }
  1086. return video;
  1087. error:
  1088. vsp1_video_cleanup(video);
  1089. return ERR_PTR(ret);
  1090. }
  1091. void vsp1_video_cleanup(struct vsp1_video *video)
  1092. {
  1093. if (video_is_registered(&video->video))
  1094. video_unregister_device(&video->video);
  1095. media_entity_cleanup(&video->video.entity);
  1096. }