v4l2-common.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Video for Linux Two
  4. *
  5. * A generic video device interface for the LINUX operating system
  6. * using a set of device structures/vectors for low level operations.
  7. *
  8. * This file replaces the videodev.c file that comes with the
  9. * regular kernel distribution.
  10. *
  11. * Author: Bill Dirks <bill@thedirks.org>
  12. * based on code by Alan Cox, <alan@cymru.net>
  13. */
  14. /*
  15. * Video capture interface for Linux
  16. *
  17. * A generic video device interface for the LINUX operating system
  18. * using a set of device structures/vectors for low level operations.
  19. *
  20. * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk>
  21. *
  22. * Fixes:
  23. */
  24. /*
  25. * Video4linux 1/2 integration by Justin Schoeman
  26. * <justin@suntiger.ee.up.ac.za>
  27. * 2.4 PROCFS support ported from 2.4 kernels by
  28. * Iñaki García Etxebarria <garetxe@euskalnet.net>
  29. * Makefile fix by "W. Michael Petullo" <mike@flyn.org>
  30. * 2.4 devfs support ported from 2.4 kernels by
  31. * Dan Merillat <dan@merillat.org>
  32. * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman)
  33. */
  34. #include <linux/module.h>
  35. #include <linux/types.h>
  36. #include <linux/kernel.h>
  37. #include <linux/mm.h>
  38. #include <linux/string.h>
  39. #include <linux/errno.h>
  40. #include <linux/uaccess.h>
  41. #include <asm/io.h>
  42. #include <asm/div64.h>
  43. #include <media/v4l2-common.h>
  44. #include <media/v4l2-device.h>
  45. #include <media/v4l2-ctrls.h>
  46. #include <linux/videodev2.h>
  47. /*
  48. *
  49. * V 4 L 2 D R I V E R H E L P E R A P I
  50. *
  51. */
  52. /*
  53. * Video Standard Operations (contributed by Michael Schimek)
  54. */
  55. /* Helper functions for control handling */
  56. /* Fill in a struct v4l2_queryctrl */
  57. int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def)
  58. {
  59. const char *name;
  60. s64 min = _min;
  61. s64 max = _max;
  62. u64 step = _step;
  63. s64 def = _def;
  64. v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type,
  65. &min, &max, &step, &def, &qctrl->flags);
  66. if (name == NULL)
  67. return -EINVAL;
  68. qctrl->minimum = min;
  69. qctrl->maximum = max;
  70. qctrl->step = step;
  71. qctrl->default_value = def;
  72. qctrl->reserved[0] = qctrl->reserved[1] = 0;
  73. strscpy(qctrl->name, name, sizeof(qctrl->name));
  74. return 0;
  75. }
  76. EXPORT_SYMBOL(v4l2_ctrl_query_fill);
  77. /* Clamp x to be between min and max, aligned to a multiple of 2^align. min
  78. * and max don't have to be aligned, but there must be at least one valid
  79. * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
  80. * of 16 between 17 and 31. */
  81. static unsigned int clamp_align(unsigned int x, unsigned int min,
  82. unsigned int max, unsigned int align)
  83. {
  84. /* Bits that must be zero to be aligned */
  85. unsigned int mask = ~((1 << align) - 1);
  86. /* Clamp to aligned min and max */
  87. x = clamp(x, (min + ~mask) & mask, max & mask);
  88. /* Round to nearest aligned value */
  89. if (align)
  90. x = (x + (1 << (align - 1))) & mask;
  91. return x;
  92. }
  93. static unsigned int clamp_roundup(unsigned int x, unsigned int min,
  94. unsigned int max, unsigned int alignment)
  95. {
  96. x = clamp(x, min, max);
  97. if (alignment)
  98. x = round_up(x, alignment);
  99. return x;
  100. }
  101. void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
  102. unsigned int walign,
  103. u32 *h, unsigned int hmin, unsigned int hmax,
  104. unsigned int halign, unsigned int salign)
  105. {
  106. *w = clamp_align(*w, wmin, wmax, walign);
  107. *h = clamp_align(*h, hmin, hmax, halign);
  108. /* Usually we don't need to align the size and are done now. */
  109. if (!salign)
  110. return;
  111. /* How much alignment do we have? */
  112. walign = __ffs(*w);
  113. halign = __ffs(*h);
  114. /* Enough to satisfy the image alignment? */
  115. if (walign + halign < salign) {
  116. /* Max walign where there is still a valid width */
  117. unsigned int wmaxa = __fls(wmax ^ (wmin - 1));
  118. /* Max halign where there is still a valid height */
  119. unsigned int hmaxa = __fls(hmax ^ (hmin - 1));
  120. /* up the smaller alignment until we have enough */
  121. do {
  122. if (halign >= hmaxa ||
  123. (walign <= halign && walign < wmaxa)) {
  124. *w = clamp_align(*w, wmin, wmax, walign + 1);
  125. walign = __ffs(*w);
  126. } else {
  127. *h = clamp_align(*h, hmin, hmax, halign + 1);
  128. halign = __ffs(*h);
  129. }
  130. } while (halign + walign < salign);
  131. }
  132. }
  133. EXPORT_SYMBOL_GPL(v4l_bound_align_image);
  134. const void *
  135. __v4l2_find_nearest_size(const void *array, size_t array_size,
  136. size_t entry_size, size_t width_offset,
  137. size_t height_offset, s32 width, s32 height)
  138. {
  139. u32 error, min_error = U32_MAX;
  140. const void *best = NULL;
  141. unsigned int i;
  142. if (!array)
  143. return NULL;
  144. for (i = 0; i < array_size; i++, array += entry_size) {
  145. const u32 *entry_width = array + width_offset;
  146. const u32 *entry_height = array + height_offset;
  147. error = abs(*entry_width - width) + abs(*entry_height - height);
  148. if (error > min_error)
  149. continue;
  150. min_error = error;
  151. best = array;
  152. if (!error)
  153. break;
  154. }
  155. return best;
  156. }
  157. EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size);
  158. int v4l2_g_parm_cap(struct video_device *vdev,
  159. struct v4l2_subdev *sd, struct v4l2_streamparm *a)
  160. {
  161. struct v4l2_subdev_frame_interval ival = { 0 };
  162. int ret;
  163. if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
  164. a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
  165. return -EINVAL;
  166. if (vdev->device_caps & V4L2_CAP_READWRITE)
  167. a->parm.capture.readbuffers = 2;
  168. if (v4l2_subdev_has_op(sd, pad, get_frame_interval))
  169. a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
  170. ret = v4l2_subdev_call_state_active(sd, pad, get_frame_interval, &ival);
  171. if (!ret)
  172. a->parm.capture.timeperframe = ival.interval;
  173. return ret;
  174. }
  175. EXPORT_SYMBOL_GPL(v4l2_g_parm_cap);
  176. int v4l2_s_parm_cap(struct video_device *vdev,
  177. struct v4l2_subdev *sd, struct v4l2_streamparm *a)
  178. {
  179. struct v4l2_subdev_frame_interval ival = {
  180. .interval = a->parm.capture.timeperframe
  181. };
  182. int ret;
  183. if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
  184. a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
  185. return -EINVAL;
  186. memset(&a->parm, 0, sizeof(a->parm));
  187. if (vdev->device_caps & V4L2_CAP_READWRITE)
  188. a->parm.capture.readbuffers = 2;
  189. else
  190. a->parm.capture.readbuffers = 0;
  191. if (v4l2_subdev_has_op(sd, pad, get_frame_interval))
  192. a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
  193. ret = v4l2_subdev_call_state_active(sd, pad, set_frame_interval, &ival);
  194. if (!ret)
  195. a->parm.capture.timeperframe = ival.interval;
  196. return ret;
  197. }
  198. EXPORT_SYMBOL_GPL(v4l2_s_parm_cap);
  199. const struct v4l2_format_info *v4l2_format_info(u32 format)
  200. {
  201. static const struct v4l2_format_info formats[] = {
  202. /* RGB formats */
  203. { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  204. { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  205. { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  206. { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  207. { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  208. { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  209. { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  210. { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  211. { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  212. { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  213. { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  214. { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  215. { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  216. { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  217. { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  218. { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  219. { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  220. { .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  221. { .format = V4L2_PIX_FMT_BGR48, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  222. { .format = V4L2_PIX_FMT_RGB48, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  223. { .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  224. { .format = V4L2_PIX_FMT_RGBA1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  225. { .format = V4L2_PIX_FMT_RGBX1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  226. { .format = V4L2_PIX_FMT_ARGB2101010, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  227. /* YUV packed formats */
  228. { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
  229. { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
  230. { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
  231. { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
  232. { .format = V4L2_PIX_FMT_Y210, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
  233. { .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
  234. { .format = V4L2_PIX_FMT_Y216, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
  235. { .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  236. { .format = V4L2_PIX_FMT_MT2110T, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
  237. .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
  238. { .format = V4L2_PIX_FMT_MT2110R, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
  239. .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
  240. /* YUV planar formats */
  241. { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
  242. { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
  243. { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
  244. { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
  245. { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  246. { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  247. { .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
  248. { .format = V4L2_PIX_FMT_P012, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
  249. { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
  250. { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
  251. { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 1 },
  252. { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
  253. { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
  254. { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
  255. { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  256. /* Tiled YUV formats */
  257. { .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
  258. { .format = V4L2_PIX_FMT_NV15_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
  259. .block_w = { 4, 2, 0, 0 }, .block_h = { 1, 1, 0, 0 }},
  260. { .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
  261. /* YUV planar formats, non contiguous variant */
  262. { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
  263. { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
  264. { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
  265. { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
  266. { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  267. { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  268. { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
  269. { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
  270. { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
  271. { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
  272. { .format = V4L2_PIX_FMT_P012M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
  273. /* Bayer RGB formats */
  274. { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  275. { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  276. { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  277. { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  278. { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  279. { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  280. { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  281. { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  282. { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  283. { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  284. { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  285. { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  286. { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  287. { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  288. { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  289. { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  290. { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  291. { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  292. { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  293. { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
  294. };
  295. unsigned int i;
  296. for (i = 0; i < ARRAY_SIZE(formats); ++i)
  297. if (formats[i].format == format)
  298. return &formats[i];
  299. return NULL;
  300. }
  301. EXPORT_SYMBOL(v4l2_format_info);
  302. static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane)
  303. {
  304. if (!info->block_w[plane])
  305. return 1;
  306. return info->block_w[plane];
  307. }
  308. static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane)
  309. {
  310. if (!info->block_h[plane])
  311. return 1;
  312. return info->block_h[plane];
  313. }
  314. void v4l2_apply_frmsize_constraints(u32 *width, u32 *height,
  315. const struct v4l2_frmsize_stepwise *frmsize)
  316. {
  317. if (!frmsize)
  318. return;
  319. /*
  320. * Clamp width/height to meet min/max constraints and round it up to
  321. * macroblock alignment.
  322. */
  323. *width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width,
  324. frmsize->step_width);
  325. *height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height,
  326. frmsize->step_height);
  327. }
  328. EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints);
  329. int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
  330. u32 pixelformat, u32 width, u32 height)
  331. {
  332. const struct v4l2_format_info *info;
  333. struct v4l2_plane_pix_format *plane;
  334. int i;
  335. info = v4l2_format_info(pixelformat);
  336. if (!info)
  337. return -EINVAL;
  338. pixfmt->width = width;
  339. pixfmt->height = height;
  340. pixfmt->pixelformat = pixelformat;
  341. pixfmt->num_planes = info->mem_planes;
  342. if (info->mem_planes == 1) {
  343. plane = &pixfmt->plane_fmt[0];
  344. plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
  345. plane->sizeimage = 0;
  346. for (i = 0; i < info->comp_planes; i++) {
  347. unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
  348. unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
  349. unsigned int aligned_width;
  350. unsigned int aligned_height;
  351. aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
  352. aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
  353. plane->sizeimage += info->bpp[i] *
  354. DIV_ROUND_UP(aligned_width, hdiv) *
  355. DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
  356. }
  357. } else {
  358. for (i = 0; i < info->comp_planes; i++) {
  359. unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
  360. unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
  361. unsigned int aligned_width;
  362. unsigned int aligned_height;
  363. aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
  364. aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
  365. plane = &pixfmt->plane_fmt[i];
  366. plane->bytesperline =
  367. info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv) / info->bpp_div[i];
  368. plane->sizeimage =
  369. plane->bytesperline * DIV_ROUND_UP(aligned_height, vdiv);
  370. }
  371. }
  372. return 0;
  373. }
  374. EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp);
  375. int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
  376. u32 width, u32 height)
  377. {
  378. const struct v4l2_format_info *info;
  379. int i;
  380. info = v4l2_format_info(pixelformat);
  381. if (!info)
  382. return -EINVAL;
  383. /* Single planar API cannot be used for multi plane formats. */
  384. if (info->mem_planes > 1)
  385. return -EINVAL;
  386. pixfmt->width = width;
  387. pixfmt->height = height;
  388. pixfmt->pixelformat = pixelformat;
  389. pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
  390. pixfmt->sizeimage = 0;
  391. for (i = 0; i < info->comp_planes; i++) {
  392. unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
  393. unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
  394. unsigned int aligned_width;
  395. unsigned int aligned_height;
  396. aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
  397. aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
  398. pixfmt->sizeimage += info->bpp[i] *
  399. DIV_ROUND_UP(aligned_width, hdiv) *
  400. DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
  401. }
  402. return 0;
  403. }
  404. EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
  405. s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
  406. unsigned int div)
  407. {
  408. struct v4l2_ctrl *ctrl;
  409. s64 freq;
  410. ctrl = v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ);
  411. if (ctrl) {
  412. struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ };
  413. int ret;
  414. qm.index = v4l2_ctrl_g_ctrl(ctrl);
  415. ret = v4l2_querymenu(handler, &qm);
  416. if (ret)
  417. return -ENOENT;
  418. freq = qm.value;
  419. } else {
  420. if (!mul || !div)
  421. return -ENOENT;
  422. ctrl = v4l2_ctrl_find(handler, V4L2_CID_PIXEL_RATE);
  423. if (!ctrl)
  424. return -ENOENT;
  425. freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div);
  426. pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n",
  427. __func__);
  428. pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n",
  429. __func__);
  430. }
  431. return freq > 0 ? freq : -EINVAL;
  432. }
  433. EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
  434. /*
  435. * Simplify a fraction using a simple continued fraction decomposition. The
  436. * idea here is to convert fractions such as 333333/10000000 to 1/30 using
  437. * 32 bit arithmetic only. The algorithm is not perfect and relies upon two
  438. * arbitrary parameters to remove non-significative terms from the simple
  439. * continued fraction decomposition. Using 8 and 333 for n_terms and threshold
  440. * respectively seems to give nice results.
  441. */
  442. void v4l2_simplify_fraction(u32 *numerator, u32 *denominator,
  443. unsigned int n_terms, unsigned int threshold)
  444. {
  445. u32 *an;
  446. u32 x, y, r;
  447. unsigned int i, n;
  448. an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL);
  449. if (an == NULL)
  450. return;
  451. /*
  452. * Convert the fraction to a simple continued fraction. See
  453. * https://en.wikipedia.org/wiki/Continued_fraction
  454. * Stop if the current term is bigger than or equal to the given
  455. * threshold.
  456. */
  457. x = *numerator;
  458. y = *denominator;
  459. for (n = 0; n < n_terms && y != 0; ++n) {
  460. an[n] = x / y;
  461. if (an[n] >= threshold) {
  462. if (n < 2)
  463. n++;
  464. break;
  465. }
  466. r = x - an[n] * y;
  467. x = y;
  468. y = r;
  469. }
  470. /* Expand the simple continued fraction back to an integer fraction. */
  471. x = 0;
  472. y = 1;
  473. for (i = n; i > 0; --i) {
  474. r = y;
  475. y = an[i-1] * y + x;
  476. x = r;
  477. }
  478. *numerator = y;
  479. *denominator = x;
  480. kfree(an);
  481. }
  482. EXPORT_SYMBOL_GPL(v4l2_simplify_fraction);
  483. /*
  484. * Convert a fraction to a frame interval in 100ns multiples. The idea here is
  485. * to compute numerator / denominator * 10000000 using 32 bit fixed point
  486. * arithmetic only.
  487. */
  488. u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator)
  489. {
  490. u32 multiplier;
  491. /* Saturate the result if the operation would overflow. */
  492. if (denominator == 0 ||
  493. numerator/denominator >= ((u32)-1)/10000000)
  494. return (u32)-1;
  495. /*
  496. * Divide both the denominator and the multiplier by two until
  497. * numerator * multiplier doesn't overflow. If anyone knows a better
  498. * algorithm please let me know.
  499. */
  500. multiplier = 10000000;
  501. while (numerator > ((u32)-1)/multiplier) {
  502. multiplier /= 2;
  503. denominator /= 2;
  504. }
  505. return denominator ? numerator * multiplier / denominator : 0;
  506. }
  507. EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval);
  508. int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs,
  509. unsigned int num_of_fw_link_freqs,
  510. const s64 *driver_link_freqs,
  511. unsigned int num_of_driver_link_freqs,
  512. unsigned long *bitmap)
  513. {
  514. unsigned int i;
  515. *bitmap = 0;
  516. if (!num_of_fw_link_freqs) {
  517. dev_err(dev, "no link frequencies in firmware\n");
  518. return -ENODATA;
  519. }
  520. for (i = 0; i < num_of_fw_link_freqs; i++) {
  521. unsigned int j;
  522. for (j = 0; j < num_of_driver_link_freqs; j++) {
  523. if (fw_link_freqs[i] != driver_link_freqs[j])
  524. continue;
  525. dev_dbg(dev, "enabling link frequency %lld Hz\n",
  526. driver_link_freqs[j]);
  527. *bitmap |= BIT(j);
  528. break;
  529. }
  530. }
  531. if (!*bitmap) {
  532. dev_err(dev, "no matching link frequencies found\n");
  533. dev_dbg(dev, "specified in firmware:\n");
  534. for (i = 0; i < num_of_fw_link_freqs; i++)
  535. dev_dbg(dev, "\t%llu Hz\n", fw_link_freqs[i]);
  536. dev_dbg(dev, "driver supported:\n");
  537. for (i = 0; i < num_of_driver_link_freqs; i++)
  538. dev_dbg(dev, "\t%lld Hz\n", driver_link_freqs[i]);
  539. return -ENOENT;
  540. }
  541. return 0;
  542. }
  543. EXPORT_SYMBOL_GPL(v4l2_link_freq_to_bitmap);