ipu-image-convert.c 69 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2012-2016 Mentor Graphics Inc.
  4. *
  5. * Queued image conversion support, with tiling and rotation.
  6. */
  7. #include <linux/interrupt.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/math.h>
  10. #include <video/imx-ipu-image-convert.h>
  11. #include "ipu-prv.h"
  12. /*
  13. * The IC Resizer has a restriction that the output frame from the
  14. * resizer must be 1024 or less in both width (pixels) and height
  15. * (lines).
  16. *
  17. * The image converter attempts to split up a conversion when
  18. * the desired output (converted) frame resolution exceeds the
  19. * IC resizer limit of 1024 in either dimension.
  20. *
  21. * If either dimension of the output frame exceeds the limit, the
  22. * dimension is split into 1, 2, or 4 equal stripes, for a maximum
  23. * of 4*4 or 16 tiles. A conversion is then carried out for each
  24. * tile (but taking care to pass the full frame stride length to
  25. * the DMA channel's parameter memory!). IDMA double-buffering is used
  26. * to convert each tile back-to-back when possible (see note below
  27. * when double_buffering boolean is set).
  28. *
  29. * Note that the input frame must be split up into the same number
  30. * of tiles as the output frame:
  31. *
  32. * +---------+-----+
  33. * +-----+---+ | A | B |
  34. * | A | B | | | |
  35. * +-----+---+ --> +---------+-----+
  36. * | C | D | | C | D |
  37. * +-----+---+ | | |
  38. * +---------+-----+
  39. *
  40. * Clockwise 90° rotations are handled by first rescaling into a
  41. * reusable temporary tile buffer and then rotating with the 8x8
  42. * block rotator, writing to the correct destination:
  43. *
  44. * +-----+-----+
  45. * | | |
  46. * +-----+---+ +---------+ | C | A |
  47. * | A | B | | A,B, | | | | |
  48. * +-----+---+ --> | C,D | | --> | | |
  49. * | C | D | +---------+ +-----+-----+
  50. * +-----+---+ | D | B |
  51. * | | |
  52. * +-----+-----+
  53. *
  54. * If the 8x8 block rotator is used, horizontal or vertical flipping
  55. * is done during the rotation step, otherwise flipping is done
  56. * during the scaling step.
  57. * With rotation or flipping, tile order changes between input and
  58. * output image. Tiles are numbered row major from top left to bottom
  59. * right for both input and output image.
  60. */
  61. #define MAX_STRIPES_W 4
  62. #define MAX_STRIPES_H 4
  63. #define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
  64. #define MIN_W 16
  65. #define MIN_H 8
  66. #define MAX_W 4096
  67. #define MAX_H 4096
  68. enum ipu_image_convert_type {
  69. IMAGE_CONVERT_IN = 0,
  70. IMAGE_CONVERT_OUT,
  71. };
  72. struct ipu_image_convert_dma_buf {
  73. void *virt;
  74. dma_addr_t phys;
  75. unsigned long len;
  76. };
  77. struct ipu_image_convert_dma_chan {
  78. int in;
  79. int out;
  80. int rot_in;
  81. int rot_out;
  82. int vdi_in_p;
  83. int vdi_in;
  84. int vdi_in_n;
  85. };
  86. /* dimensions of one tile */
  87. struct ipu_image_tile {
  88. u32 width;
  89. u32 height;
  90. u32 left;
  91. u32 top;
  92. /* size and strides are in bytes */
  93. u32 size;
  94. u32 stride;
  95. u32 rot_stride;
  96. /* start Y or packed offset of this tile */
  97. u32 offset;
  98. /* offset from start to tile in U plane, for planar formats */
  99. u32 u_off;
  100. /* offset from start to tile in V plane, for planar formats */
  101. u32 v_off;
  102. };
  103. struct ipu_image_convert_image {
  104. struct ipu_image base;
  105. enum ipu_image_convert_type type;
  106. const struct ipu_image_pixfmt *fmt;
  107. unsigned int stride;
  108. /* # of rows (horizontal stripes) if dest height is > 1024 */
  109. unsigned int num_rows;
  110. /* # of columns (vertical stripes) if dest width is > 1024 */
  111. unsigned int num_cols;
  112. struct ipu_image_tile tile[MAX_TILES];
  113. };
  114. struct ipu_image_pixfmt {
  115. u32 fourcc; /* V4L2 fourcc */
  116. int bpp; /* total bpp */
  117. int uv_width_dec; /* decimation in width for U/V planes */
  118. int uv_height_dec; /* decimation in height for U/V planes */
  119. bool planar; /* planar format */
  120. bool uv_swapped; /* U and V planes are swapped */
  121. bool uv_packed; /* partial planar (U and V in same plane) */
  122. };
  123. struct ipu_image_convert_ctx;
  124. struct ipu_image_convert_chan;
  125. struct ipu_image_convert_priv;
  126. enum eof_irq_mask {
  127. EOF_IRQ_IN = BIT(0),
  128. EOF_IRQ_ROT_IN = BIT(1),
  129. EOF_IRQ_OUT = BIT(2),
  130. EOF_IRQ_ROT_OUT = BIT(3),
  131. };
  132. #define EOF_IRQ_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT)
  133. #define EOF_IRQ_ROT_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT | \
  134. EOF_IRQ_ROT_IN | EOF_IRQ_ROT_OUT)
  135. struct ipu_image_convert_ctx {
  136. struct ipu_image_convert_chan *chan;
  137. ipu_image_convert_cb_t complete;
  138. void *complete_context;
  139. /* Source/destination image data and rotation mode */
  140. struct ipu_image_convert_image in;
  141. struct ipu_image_convert_image out;
  142. struct ipu_ic_csc csc;
  143. enum ipu_rotate_mode rot_mode;
  144. u32 downsize_coeff_h;
  145. u32 downsize_coeff_v;
  146. u32 image_resize_coeff_h;
  147. u32 image_resize_coeff_v;
  148. u32 resize_coeffs_h[MAX_STRIPES_W];
  149. u32 resize_coeffs_v[MAX_STRIPES_H];
  150. /* intermediate buffer for rotation */
  151. struct ipu_image_convert_dma_buf rot_intermediate[2];
  152. /* current buffer number for double buffering */
  153. int cur_buf_num;
  154. bool aborting;
  155. struct completion aborted;
  156. /* can we use double-buffering for this conversion operation? */
  157. bool double_buffering;
  158. /* num_rows * num_cols */
  159. unsigned int num_tiles;
  160. /* next tile to process */
  161. unsigned int next_tile;
  162. /* where to place converted tile in dest image */
  163. unsigned int out_tile_map[MAX_TILES];
  164. /* mask of completed EOF irqs at every tile conversion */
  165. enum eof_irq_mask eof_mask;
  166. struct list_head list;
  167. };
  168. struct ipu_image_convert_chan {
  169. struct ipu_image_convert_priv *priv;
  170. enum ipu_ic_task ic_task;
  171. const struct ipu_image_convert_dma_chan *dma_ch;
  172. struct ipu_ic *ic;
  173. struct ipuv3_channel *in_chan;
  174. struct ipuv3_channel *out_chan;
  175. struct ipuv3_channel *rotation_in_chan;
  176. struct ipuv3_channel *rotation_out_chan;
  177. /* the IPU end-of-frame irqs */
  178. int in_eof_irq;
  179. int rot_in_eof_irq;
  180. int out_eof_irq;
  181. int rot_out_eof_irq;
  182. spinlock_t irqlock;
  183. /* list of convert contexts */
  184. struct list_head ctx_list;
  185. /* queue of conversion runs */
  186. struct list_head pending_q;
  187. /* queue of completed runs */
  188. struct list_head done_q;
  189. /* the current conversion run */
  190. struct ipu_image_convert_run *current_run;
  191. };
  192. struct ipu_image_convert_priv {
  193. struct ipu_image_convert_chan chan[IC_NUM_TASKS];
  194. struct ipu_soc *ipu;
  195. };
  196. static const struct ipu_image_convert_dma_chan
  197. image_convert_dma_chan[IC_NUM_TASKS] = {
  198. [IC_TASK_VIEWFINDER] = {
  199. .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
  200. .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
  201. .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
  202. .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
  203. .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
  204. .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
  205. .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
  206. },
  207. [IC_TASK_POST_PROCESSOR] = {
  208. .in = IPUV3_CHANNEL_MEM_IC_PP,
  209. .out = IPUV3_CHANNEL_IC_PP_MEM,
  210. .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
  211. .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
  212. },
  213. };
  214. static const struct ipu_image_pixfmt image_convert_formats[] = {
  215. {
  216. .fourcc = V4L2_PIX_FMT_RGB565,
  217. .bpp = 16,
  218. }, {
  219. .fourcc = V4L2_PIX_FMT_RGB24,
  220. .bpp = 24,
  221. }, {
  222. .fourcc = V4L2_PIX_FMT_BGR24,
  223. .bpp = 24,
  224. }, {
  225. .fourcc = V4L2_PIX_FMT_RGB32,
  226. .bpp = 32,
  227. }, {
  228. .fourcc = V4L2_PIX_FMT_BGR32,
  229. .bpp = 32,
  230. }, {
  231. .fourcc = V4L2_PIX_FMT_XRGB32,
  232. .bpp = 32,
  233. }, {
  234. .fourcc = V4L2_PIX_FMT_XBGR32,
  235. .bpp = 32,
  236. }, {
  237. .fourcc = V4L2_PIX_FMT_BGRX32,
  238. .bpp = 32,
  239. }, {
  240. .fourcc = V4L2_PIX_FMT_RGBX32,
  241. .bpp = 32,
  242. }, {
  243. .fourcc = V4L2_PIX_FMT_YUYV,
  244. .bpp = 16,
  245. .uv_width_dec = 2,
  246. .uv_height_dec = 1,
  247. }, {
  248. .fourcc = V4L2_PIX_FMT_UYVY,
  249. .bpp = 16,
  250. .uv_width_dec = 2,
  251. .uv_height_dec = 1,
  252. }, {
  253. .fourcc = V4L2_PIX_FMT_YUV420,
  254. .bpp = 12,
  255. .planar = true,
  256. .uv_width_dec = 2,
  257. .uv_height_dec = 2,
  258. }, {
  259. .fourcc = V4L2_PIX_FMT_YVU420,
  260. .bpp = 12,
  261. .planar = true,
  262. .uv_width_dec = 2,
  263. .uv_height_dec = 2,
  264. .uv_swapped = true,
  265. }, {
  266. .fourcc = V4L2_PIX_FMT_NV12,
  267. .bpp = 12,
  268. .planar = true,
  269. .uv_width_dec = 2,
  270. .uv_height_dec = 2,
  271. .uv_packed = true,
  272. }, {
  273. .fourcc = V4L2_PIX_FMT_YUV422P,
  274. .bpp = 16,
  275. .planar = true,
  276. .uv_width_dec = 2,
  277. .uv_height_dec = 1,
  278. }, {
  279. .fourcc = V4L2_PIX_FMT_NV16,
  280. .bpp = 16,
  281. .planar = true,
  282. .uv_width_dec = 2,
  283. .uv_height_dec = 1,
  284. .uv_packed = true,
  285. },
  286. };
  287. static const struct ipu_image_pixfmt *get_format(u32 fourcc)
  288. {
  289. const struct ipu_image_pixfmt *ret = NULL;
  290. unsigned int i;
  291. for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
  292. if (image_convert_formats[i].fourcc == fourcc) {
  293. ret = &image_convert_formats[i];
  294. break;
  295. }
  296. }
  297. return ret;
  298. }
  299. static void dump_format(struct ipu_image_convert_ctx *ctx,
  300. struct ipu_image_convert_image *ic_image)
  301. {
  302. struct ipu_image_convert_chan *chan = ctx->chan;
  303. struct ipu_image_convert_priv *priv = chan->priv;
  304. dev_dbg(priv->ipu->dev,
  305. "task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n",
  306. chan->ic_task, ctx,
  307. ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
  308. ic_image->base.pix.width, ic_image->base.pix.height,
  309. ic_image->num_cols, ic_image->num_rows,
  310. ic_image->fmt->fourcc & 0xff,
  311. (ic_image->fmt->fourcc >> 8) & 0xff,
  312. (ic_image->fmt->fourcc >> 16) & 0xff,
  313. (ic_image->fmt->fourcc >> 24) & 0xff);
  314. }
  315. int ipu_image_convert_enum_format(int index, u32 *fourcc)
  316. {
  317. const struct ipu_image_pixfmt *fmt;
  318. if (index >= (int)ARRAY_SIZE(image_convert_formats))
  319. return -EINVAL;
  320. /* Format found */
  321. fmt = &image_convert_formats[index];
  322. *fourcc = fmt->fourcc;
  323. return 0;
  324. }
  325. EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
  326. static void free_dma_buf(struct ipu_image_convert_priv *priv,
  327. struct ipu_image_convert_dma_buf *buf)
  328. {
  329. if (buf->virt)
  330. dma_free_coherent(priv->ipu->dev,
  331. buf->len, buf->virt, buf->phys);
  332. buf->virt = NULL;
  333. buf->phys = 0;
  334. }
  335. static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
  336. struct ipu_image_convert_dma_buf *buf,
  337. int size)
  338. {
  339. buf->len = PAGE_ALIGN(size);
  340. buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
  341. GFP_DMA | GFP_KERNEL);
  342. if (!buf->virt) {
  343. dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
  344. return -ENOMEM;
  345. }
  346. return 0;
  347. }
  348. static inline int num_stripes(int dim)
  349. {
  350. return (dim - 1) / 1024 + 1;
  351. }
  352. /*
  353. * Calculate downsizing coefficients, which are the same for all tiles,
  354. * and initial bilinear resizing coefficients, which are used to find the
  355. * best seam positions.
  356. * Also determine the number of tiles necessary to guarantee that no tile
  357. * is larger than 1024 pixels in either dimension at the output and between
  358. * IC downsizing and main processing sections.
  359. */
  360. static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
  361. struct ipu_image *in,
  362. struct ipu_image *out)
  363. {
  364. u32 downsized_width = in->rect.width;
  365. u32 downsized_height = in->rect.height;
  366. u32 downsize_coeff_v = 0;
  367. u32 downsize_coeff_h = 0;
  368. u32 resized_width = out->rect.width;
  369. u32 resized_height = out->rect.height;
  370. u32 resize_coeff_h;
  371. u32 resize_coeff_v;
  372. u32 cols;
  373. u32 rows;
  374. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  375. resized_width = out->rect.height;
  376. resized_height = out->rect.width;
  377. }
  378. /* Do not let invalid input lead to an endless loop below */
  379. if (WARN_ON(resized_width == 0 || resized_height == 0))
  380. return -EINVAL;
  381. while (downsized_width >= resized_width * 2) {
  382. downsized_width >>= 1;
  383. downsize_coeff_h++;
  384. }
  385. while (downsized_height >= resized_height * 2) {
  386. downsized_height >>= 1;
  387. downsize_coeff_v++;
  388. }
  389. /*
  390. * Calculate the bilinear resizing coefficients that could be used if
  391. * we were converting with a single tile. The bottom right output pixel
  392. * should sample as close as possible to the bottom right input pixel
  393. * out of the decimator, but not overshoot it:
  394. */
  395. resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1);
  396. resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1);
  397. /*
  398. * Both the output of the IC downsizing section before being passed to
  399. * the IC main processing section and the final output of the IC main
  400. * processing section must be <= 1024 pixels in both dimensions.
  401. */
  402. cols = num_stripes(max_t(u32, downsized_width, resized_width));
  403. rows = num_stripes(max_t(u32, downsized_height, resized_height));
  404. dev_dbg(ctx->chan->priv->ipu->dev,
  405. "%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n",
  406. __func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v,
  407. resize_coeff_v, cols, rows);
  408. if (downsize_coeff_h > 2 || downsize_coeff_v > 2 ||
  409. resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff)
  410. return -EINVAL;
  411. ctx->downsize_coeff_h = downsize_coeff_h;
  412. ctx->downsize_coeff_v = downsize_coeff_v;
  413. ctx->image_resize_coeff_h = resize_coeff_h;
  414. ctx->image_resize_coeff_v = resize_coeff_v;
  415. ctx->in.num_cols = cols;
  416. ctx->in.num_rows = rows;
  417. return 0;
  418. }
  419. #define round_closest(x, y) round_down((x) + (y)/2, (y))
  420. /*
  421. * Find the best aligned seam position for the given column / row index.
  422. * Rotation and image offsets are out of scope.
  423. *
  424. * @index: column / row index, used to calculate valid interval
  425. * @in_edge: input right / bottom edge
  426. * @out_edge: output right / bottom edge
  427. * @in_align: input alignment, either horizontal 8-byte line start address
  428. * alignment, or pixel alignment due to image format
  429. * @out_align: output alignment, either horizontal 8-byte line start address
  430. * alignment, or pixel alignment due to image format or rotator
  431. * block size
  432. * @in_burst: horizontal input burst size in case of horizontal flip
  433. * @out_burst: horizontal output burst size or rotator block size
  434. * @downsize_coeff: downsizing section coefficient
  435. * @resize_coeff: main processing section resizing coefficient
  436. * @_in_seam: aligned input seam position return value
  437. * @_out_seam: aligned output seam position return value
  438. */
  439. static void find_best_seam(struct ipu_image_convert_ctx *ctx,
  440. unsigned int index,
  441. unsigned int in_edge,
  442. unsigned int out_edge,
  443. unsigned int in_align,
  444. unsigned int out_align,
  445. unsigned int in_burst,
  446. unsigned int out_burst,
  447. unsigned int downsize_coeff,
  448. unsigned int resize_coeff,
  449. u32 *_in_seam,
  450. u32 *_out_seam)
  451. {
  452. struct device *dev = ctx->chan->priv->ipu->dev;
  453. unsigned int out_pos;
  454. /* Input / output seam position candidates */
  455. unsigned int out_seam = 0;
  456. unsigned int in_seam = 0;
  457. unsigned int min_diff = UINT_MAX;
  458. unsigned int out_start;
  459. unsigned int out_end;
  460. unsigned int in_start;
  461. unsigned int in_end;
  462. /* Start within 1024 pixels of the right / bottom edge */
  463. out_start = max_t(int, index * out_align, out_edge - 1024);
  464. /* End before having to add more columns to the left / rows above */
  465. out_end = min_t(unsigned int, out_edge, index * 1024 + 1);
  466. /*
  467. * Limit input seam position to make sure that the downsized input tile
  468. * to the right or bottom does not exceed 1024 pixels.
  469. */
  470. in_start = max_t(int, index * in_align,
  471. in_edge - (1024 << downsize_coeff));
  472. in_end = min_t(unsigned int, in_edge,
  473. index * (1024 << downsize_coeff) + 1);
  474. /*
  475. * Output tiles must start at a multiple of 8 bytes horizontally and
  476. * possibly at an even line horizontally depending on the pixel format.
  477. * Only consider output aligned positions for the seam.
  478. */
  479. out_start = round_up(out_start, out_align);
  480. for (out_pos = out_start; out_pos < out_end; out_pos += out_align) {
  481. unsigned int in_pos;
  482. unsigned int in_pos_aligned;
  483. unsigned int in_pos_rounded;
  484. unsigned int diff;
  485. /*
  486. * Tiles in the right row / bottom column may not be allowed to
  487. * overshoot horizontally / vertically. out_burst may be the
  488. * actual DMA burst size, or the rotator block size.
  489. */
  490. if ((out_burst > 1) && (out_edge - out_pos) % out_burst)
  491. continue;
  492. /*
  493. * Input sample position, corresponding to out_pos, 19.13 fixed
  494. * point.
  495. */
  496. in_pos = (out_pos * resize_coeff) << downsize_coeff;
  497. /*
  498. * The closest input sample position that we could actually
  499. * start the input tile at, 19.13 fixed point.
  500. */
  501. in_pos_aligned = round_closest(in_pos, 8192U * in_align);
  502. /* Convert 19.13 fixed point to integer */
  503. in_pos_rounded = in_pos_aligned / 8192U;
  504. if (in_pos_rounded < in_start)
  505. continue;
  506. if (in_pos_rounded >= in_end)
  507. break;
  508. if ((in_burst > 1) &&
  509. (in_edge - in_pos_rounded) % in_burst)
  510. continue;
  511. diff = abs_diff(in_pos, in_pos_aligned);
  512. if (diff < min_diff) {
  513. in_seam = in_pos_rounded;
  514. out_seam = out_pos;
  515. min_diff = diff;
  516. }
  517. }
  518. *_out_seam = out_seam;
  519. *_in_seam = in_seam;
  520. dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) in [%u, %u] diff %u.%03u\n",
  521. __func__, out_seam, out_align, out_start, out_end,
  522. in_seam, in_align, in_start, in_end, min_diff / 8192,
  523. DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192));
  524. }
  525. /*
  526. * Tile left edges are required to be aligned to multiples of 8 bytes
  527. * by the IDMAC.
  528. */
  529. static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt)
  530. {
  531. if (fmt->planar)
  532. return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec;
  533. else
  534. return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8;
  535. }
  536. /*
  537. * Tile top edge alignment is only limited by chroma subsampling.
  538. */
  539. static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt)
  540. {
  541. return fmt->uv_height_dec > 1 ? 2 : 1;
  542. }
  543. static inline u32 tile_width_align(enum ipu_image_convert_type type,
  544. const struct ipu_image_pixfmt *fmt,
  545. enum ipu_rotate_mode rot_mode)
  546. {
  547. if (type == IMAGE_CONVERT_IN) {
  548. /*
  549. * The IC burst reads 8 pixels at a time. Reading beyond the
  550. * end of the line is usually acceptable. Those pixels are
  551. * ignored, unless the IC has to write the scaled line in
  552. * reverse.
  553. */
  554. return (!ipu_rot_mode_is_irt(rot_mode) &&
  555. (rot_mode & IPU_ROT_BIT_HFLIP)) ? 8 : 2;
  556. }
  557. /*
  558. * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
  559. * formats to guarantee 8-byte aligned line start addresses in the
  560. * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
  561. * for all other formats.
  562. */
  563. return (ipu_rot_mode_is_irt(rot_mode) &&
  564. fmt->planar && !fmt->uv_packed) ?
  565. 8 * fmt->uv_width_dec : 8;
  566. }
  567. static inline u32 tile_height_align(enum ipu_image_convert_type type,
  568. const struct ipu_image_pixfmt *fmt,
  569. enum ipu_rotate_mode rot_mode)
  570. {
  571. if (type == IMAGE_CONVERT_IN || !ipu_rot_mode_is_irt(rot_mode))
  572. return 2;
  573. /*
  574. * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
  575. * formats to guarantee 8-byte aligned line start addresses in the
  576. * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
  577. * for all other formats.
  578. */
  579. return (fmt->planar && !fmt->uv_packed) ? 8 * fmt->uv_width_dec : 8;
  580. }
  581. /*
  582. * Fill in left position and width and for all tiles in an input column, and
  583. * for all corresponding output tiles. If the 90° rotator is used, the output
  584. * tiles are in a row, and output tile top position and height are set.
  585. */
  586. static void fill_tile_column(struct ipu_image_convert_ctx *ctx,
  587. unsigned int col,
  588. struct ipu_image_convert_image *in,
  589. unsigned int in_left, unsigned int in_width,
  590. struct ipu_image_convert_image *out,
  591. unsigned int out_left, unsigned int out_width)
  592. {
  593. unsigned int row, tile_idx;
  594. struct ipu_image_tile *in_tile, *out_tile;
  595. for (row = 0; row < in->num_rows; row++) {
  596. tile_idx = in->num_cols * row + col;
  597. in_tile = &in->tile[tile_idx];
  598. out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
  599. in_tile->left = in_left;
  600. in_tile->width = in_width;
  601. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  602. out_tile->top = out_left;
  603. out_tile->height = out_width;
  604. } else {
  605. out_tile->left = out_left;
  606. out_tile->width = out_width;
  607. }
  608. }
  609. }
  610. /*
  611. * Fill in top position and height and for all tiles in an input row, and
  612. * for all corresponding output tiles. If the 90° rotator is used, the output
  613. * tiles are in a column, and output tile left position and width are set.
  614. */
  615. static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row,
  616. struct ipu_image_convert_image *in,
  617. unsigned int in_top, unsigned int in_height,
  618. struct ipu_image_convert_image *out,
  619. unsigned int out_top, unsigned int out_height)
  620. {
  621. unsigned int col, tile_idx;
  622. struct ipu_image_tile *in_tile, *out_tile;
  623. for (col = 0; col < in->num_cols; col++) {
  624. tile_idx = in->num_cols * row + col;
  625. in_tile = &in->tile[tile_idx];
  626. out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
  627. in_tile->top = in_top;
  628. in_tile->height = in_height;
  629. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  630. out_tile->left = out_top;
  631. out_tile->width = out_height;
  632. } else {
  633. out_tile->top = out_top;
  634. out_tile->height = out_height;
  635. }
  636. }
  637. }
  638. /*
  639. * Find the best horizontal and vertical seam positions to split into tiles.
  640. * Minimize the fractional part of the input sampling position for the
  641. * top / left pixels of each tile.
  642. */
  643. static void find_seams(struct ipu_image_convert_ctx *ctx,
  644. struct ipu_image_convert_image *in,
  645. struct ipu_image_convert_image *out)
  646. {
  647. struct device *dev = ctx->chan->priv->ipu->dev;
  648. unsigned int resized_width = out->base.rect.width;
  649. unsigned int resized_height = out->base.rect.height;
  650. unsigned int col;
  651. unsigned int row;
  652. unsigned int in_left_align = tile_left_align(in->fmt);
  653. unsigned int in_top_align = tile_top_align(in->fmt);
  654. unsigned int out_left_align = tile_left_align(out->fmt);
  655. unsigned int out_top_align = tile_top_align(out->fmt);
  656. unsigned int out_width_align = tile_width_align(out->type, out->fmt,
  657. ctx->rot_mode);
  658. unsigned int out_height_align = tile_height_align(out->type, out->fmt,
  659. ctx->rot_mode);
  660. unsigned int in_right = in->base.rect.width;
  661. unsigned int in_bottom = in->base.rect.height;
  662. unsigned int out_right = out->base.rect.width;
  663. unsigned int out_bottom = out->base.rect.height;
  664. unsigned int flipped_out_left;
  665. unsigned int flipped_out_top;
  666. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  667. /* Switch width/height and align top left to IRT block size */
  668. resized_width = out->base.rect.height;
  669. resized_height = out->base.rect.width;
  670. out_left_align = out_height_align;
  671. out_top_align = out_width_align;
  672. out_width_align = out_left_align;
  673. out_height_align = out_top_align;
  674. out_right = out->base.rect.height;
  675. out_bottom = out->base.rect.width;
  676. }
  677. for (col = in->num_cols - 1; col > 0; col--) {
  678. bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) ||
  679. !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
  680. bool allow_out_overshoot = (col < in->num_cols - 1) &&
  681. !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
  682. unsigned int in_left;
  683. unsigned int out_left;
  684. /*
  685. * Align input width to burst length if the scaling step flips
  686. * horizontally.
  687. */
  688. find_best_seam(ctx, col,
  689. in_right, out_right,
  690. in_left_align, out_left_align,
  691. allow_in_overshoot ? 1 : 8 /* burst length */,
  692. allow_out_overshoot ? 1 : out_width_align,
  693. ctx->downsize_coeff_h, ctx->image_resize_coeff_h,
  694. &in_left, &out_left);
  695. if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
  696. flipped_out_left = resized_width - out_right;
  697. else
  698. flipped_out_left = out_left;
  699. fill_tile_column(ctx, col, in, in_left, in_right - in_left,
  700. out, flipped_out_left, out_right - out_left);
  701. dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col,
  702. in_left, in_right - in_left,
  703. flipped_out_left, out_right - out_left);
  704. in_right = in_left;
  705. out_right = out_left;
  706. }
  707. flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ?
  708. resized_width - out_right : 0;
  709. fill_tile_column(ctx, 0, in, 0, in_right,
  710. out, flipped_out_left, out_right);
  711. dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__,
  712. in_right, flipped_out_left, out_right);
  713. for (row = in->num_rows - 1; row > 0; row--) {
  714. bool allow_overshoot = row < in->num_rows - 1;
  715. unsigned int in_top;
  716. unsigned int out_top;
  717. find_best_seam(ctx, row,
  718. in_bottom, out_bottom,
  719. in_top_align, out_top_align,
  720. 1, allow_overshoot ? 1 : out_height_align,
  721. ctx->downsize_coeff_v, ctx->image_resize_coeff_v,
  722. &in_top, &out_top);
  723. if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
  724. ipu_rot_mode_is_irt(ctx->rot_mode))
  725. flipped_out_top = resized_height - out_bottom;
  726. else
  727. flipped_out_top = out_top;
  728. fill_tile_row(ctx, row, in, in_top, in_bottom - in_top,
  729. out, flipped_out_top, out_bottom - out_top);
  730. dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row,
  731. in_top, in_bottom - in_top,
  732. flipped_out_top, out_bottom - out_top);
  733. in_bottom = in_top;
  734. out_bottom = out_top;
  735. }
  736. if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
  737. ipu_rot_mode_is_irt(ctx->rot_mode))
  738. flipped_out_top = resized_height - out_bottom;
  739. else
  740. flipped_out_top = 0;
  741. fill_tile_row(ctx, 0, in, 0, in_bottom,
  742. out, flipped_out_top, out_bottom);
  743. dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__,
  744. in_bottom, flipped_out_top, out_bottom);
  745. }
  746. static int calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
  747. struct ipu_image_convert_image *image)
  748. {
  749. struct ipu_image_convert_chan *chan = ctx->chan;
  750. struct ipu_image_convert_priv *priv = chan->priv;
  751. unsigned int max_width = 1024;
  752. unsigned int max_height = 1024;
  753. unsigned int i;
  754. if (image->type == IMAGE_CONVERT_IN) {
  755. /* Up to 4096x4096 input tile size */
  756. max_width <<= ctx->downsize_coeff_h;
  757. max_height <<= ctx->downsize_coeff_v;
  758. }
  759. for (i = 0; i < ctx->num_tiles; i++) {
  760. struct ipu_image_tile *tile;
  761. const unsigned int row = i / image->num_cols;
  762. const unsigned int col = i % image->num_cols;
  763. if (image->type == IMAGE_CONVERT_OUT)
  764. tile = &image->tile[ctx->out_tile_map[i]];
  765. else
  766. tile = &image->tile[i];
  767. tile->size = ((tile->height * image->fmt->bpp) >> 3) *
  768. tile->width;
  769. if (image->fmt->planar) {
  770. tile->stride = tile->width;
  771. tile->rot_stride = tile->height;
  772. } else {
  773. tile->stride =
  774. (image->fmt->bpp * tile->width) >> 3;
  775. tile->rot_stride =
  776. (image->fmt->bpp * tile->height) >> 3;
  777. }
  778. dev_dbg(priv->ipu->dev,
  779. "task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n",
  780. chan->ic_task, ctx,
  781. image->type == IMAGE_CONVERT_IN ? "Input" : "Output",
  782. row, col,
  783. tile->width, tile->height, tile->left, tile->top);
  784. if (!tile->width || tile->width > max_width ||
  785. !tile->height || tile->height > max_height) {
  786. dev_err(priv->ipu->dev, "invalid %s tile size: %ux%u\n",
  787. image->type == IMAGE_CONVERT_IN ? "input" :
  788. "output", tile->width, tile->height);
  789. return -EINVAL;
  790. }
  791. }
  792. return 0;
  793. }
  794. /*
  795. * Use the rotation transformation to find the tile coordinates
  796. * (row, col) of a tile in the destination frame that corresponds
  797. * to the given tile coordinates of a source frame. The destination
  798. * coordinate is then converted to a tile index.
  799. */
  800. static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
  801. int src_row, int src_col)
  802. {
  803. struct ipu_image_convert_chan *chan = ctx->chan;
  804. struct ipu_image_convert_priv *priv = chan->priv;
  805. struct ipu_image_convert_image *s_image = &ctx->in;
  806. struct ipu_image_convert_image *d_image = &ctx->out;
  807. int dst_row, dst_col;
  808. /* with no rotation it's a 1:1 mapping */
  809. if (ctx->rot_mode == IPU_ROTATE_NONE)
  810. return src_row * s_image->num_cols + src_col;
  811. /*
  812. * before doing the transform, first we have to translate
  813. * source row,col for an origin in the center of s_image
  814. */
  815. src_row = src_row * 2 - (s_image->num_rows - 1);
  816. src_col = src_col * 2 - (s_image->num_cols - 1);
  817. /* do the rotation transform */
  818. if (ctx->rot_mode & IPU_ROT_BIT_90) {
  819. dst_col = -src_row;
  820. dst_row = src_col;
  821. } else {
  822. dst_col = src_col;
  823. dst_row = src_row;
  824. }
  825. /* apply flip */
  826. if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
  827. dst_col = -dst_col;
  828. if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
  829. dst_row = -dst_row;
  830. dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
  831. chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
  832. /*
  833. * finally translate dest row,col using an origin in upper
  834. * left of d_image
  835. */
  836. dst_row += d_image->num_rows - 1;
  837. dst_col += d_image->num_cols - 1;
  838. dst_row /= 2;
  839. dst_col /= 2;
  840. return dst_row * d_image->num_cols + dst_col;
  841. }
  842. /*
  843. * Fill the out_tile_map[] with transformed destination tile indeces.
  844. */
  845. static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
  846. {
  847. struct ipu_image_convert_image *s_image = &ctx->in;
  848. unsigned int row, col, tile = 0;
  849. for (row = 0; row < s_image->num_rows; row++) {
  850. for (col = 0; col < s_image->num_cols; col++) {
  851. ctx->out_tile_map[tile] =
  852. transform_tile_index(ctx, row, col);
  853. tile++;
  854. }
  855. }
  856. }
  857. static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
  858. struct ipu_image_convert_image *image)
  859. {
  860. struct ipu_image_convert_chan *chan = ctx->chan;
  861. struct ipu_image_convert_priv *priv = chan->priv;
  862. const struct ipu_image_pixfmt *fmt = image->fmt;
  863. unsigned int row, col, tile = 0;
  864. u32 H, top, y_stride, uv_stride;
  865. u32 uv_row_off, uv_col_off, uv_off, u_off, v_off;
  866. u32 y_row_off, y_col_off, y_off;
  867. u32 y_size, uv_size;
  868. /* setup some convenience vars */
  869. H = image->base.pix.height;
  870. y_stride = image->stride;
  871. uv_stride = y_stride / fmt->uv_width_dec;
  872. if (fmt->uv_packed)
  873. uv_stride *= 2;
  874. y_size = H * y_stride;
  875. uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
  876. for (row = 0; row < image->num_rows; row++) {
  877. top = image->tile[tile].top;
  878. y_row_off = top * y_stride;
  879. uv_row_off = (top * uv_stride) / fmt->uv_height_dec;
  880. for (col = 0; col < image->num_cols; col++) {
  881. y_col_off = image->tile[tile].left;
  882. uv_col_off = y_col_off / fmt->uv_width_dec;
  883. if (fmt->uv_packed)
  884. uv_col_off *= 2;
  885. y_off = y_row_off + y_col_off;
  886. uv_off = uv_row_off + uv_col_off;
  887. u_off = y_size - y_off + uv_off;
  888. v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
  889. if (fmt->uv_swapped)
  890. swap(u_off, v_off);
  891. image->tile[tile].offset = y_off;
  892. image->tile[tile].u_off = u_off;
  893. image->tile[tile++].v_off = v_off;
  894. if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) {
  895. dev_err(priv->ipu->dev,
  896. "task %u: ctx %p: %s@[%d,%d]: "
  897. "y_off %08x, u_off %08x, v_off %08x\n",
  898. chan->ic_task, ctx,
  899. image->type == IMAGE_CONVERT_IN ?
  900. "Input" : "Output", row, col,
  901. y_off, u_off, v_off);
  902. return -EINVAL;
  903. }
  904. }
  905. }
  906. return 0;
  907. }
  908. static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
  909. struct ipu_image_convert_image *image)
  910. {
  911. struct ipu_image_convert_chan *chan = ctx->chan;
  912. struct ipu_image_convert_priv *priv = chan->priv;
  913. const struct ipu_image_pixfmt *fmt = image->fmt;
  914. unsigned int row, col, tile = 0;
  915. u32 bpp, stride, offset;
  916. u32 row_off, col_off;
  917. /* setup some convenience vars */
  918. stride = image->stride;
  919. bpp = fmt->bpp;
  920. for (row = 0; row < image->num_rows; row++) {
  921. row_off = image->tile[tile].top * stride;
  922. for (col = 0; col < image->num_cols; col++) {
  923. col_off = (image->tile[tile].left * bpp) >> 3;
  924. offset = row_off + col_off;
  925. image->tile[tile].offset = offset;
  926. image->tile[tile].u_off = 0;
  927. image->tile[tile++].v_off = 0;
  928. if (offset & 0x7) {
  929. dev_err(priv->ipu->dev,
  930. "task %u: ctx %p: %s@[%d,%d]: "
  931. "phys %08x\n",
  932. chan->ic_task, ctx,
  933. image->type == IMAGE_CONVERT_IN ?
  934. "Input" : "Output", row, col,
  935. row_off + col_off);
  936. return -EINVAL;
  937. }
  938. }
  939. }
  940. return 0;
  941. }
  942. static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
  943. struct ipu_image_convert_image *image)
  944. {
  945. if (image->fmt->planar)
  946. return calc_tile_offsets_planar(ctx, image);
  947. return calc_tile_offsets_packed(ctx, image);
  948. }
  949. /*
  950. * Calculate the resizing ratio for the IC main processing section given input
  951. * size, fixed downsizing coefficient, and output size.
  952. * Either round to closest for the next tile's first pixel to minimize seams
  953. * and distortion (for all but right column / bottom row), or round down to
  954. * avoid sampling beyond the edges of the input image for this tile's last
  955. * pixel.
  956. * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff.
  957. */
  958. static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff,
  959. u32 output_size, bool allow_overshoot)
  960. {
  961. u32 downsized = input_size >> downsize_coeff;
  962. if (allow_overshoot)
  963. return DIV_ROUND_CLOSEST(8192 * downsized, output_size);
  964. else
  965. return 8192 * (downsized - 1) / (output_size - 1);
  966. }
  967. /*
  968. * Slightly modify resize coefficients per tile to hide the bilinear
  969. * interpolator reset at tile borders, shifting the right / bottom edge
  970. * by up to a half input pixel. This removes noticeable seams between
  971. * tiles at higher upscaling factors.
  972. */
  973. static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
  974. {
  975. struct ipu_image_convert_chan *chan = ctx->chan;
  976. struct ipu_image_convert_priv *priv = chan->priv;
  977. struct ipu_image_tile *in_tile, *out_tile;
  978. unsigned int col, row, tile_idx;
  979. unsigned int last_output;
  980. for (col = 0; col < ctx->in.num_cols; col++) {
  981. bool closest = (col < ctx->in.num_cols - 1) &&
  982. !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
  983. u32 resized_width;
  984. u32 resize_coeff_h;
  985. u32 in_width;
  986. tile_idx = col;
  987. in_tile = &ctx->in.tile[tile_idx];
  988. out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
  989. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  990. resized_width = out_tile->height;
  991. else
  992. resized_width = out_tile->width;
  993. resize_coeff_h = calc_resize_coeff(in_tile->width,
  994. ctx->downsize_coeff_h,
  995. resized_width, closest);
  996. dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n",
  997. __func__, col, resize_coeff_h);
  998. /*
  999. * With the horizontal scaling factor known, round up resized
  1000. * width (output width or height) to burst size.
  1001. */
  1002. resized_width = round_up(resized_width, 8);
  1003. /*
  1004. * Calculate input width from the last accessed input pixel
  1005. * given resized width and scaling coefficients. Round up to
  1006. * burst size.
  1007. */
  1008. last_output = resized_width - 1;
  1009. if (closest && ((last_output * resize_coeff_h) % 8192))
  1010. last_output++;
  1011. in_width = round_up(
  1012. (DIV_ROUND_UP(last_output * resize_coeff_h, 8192) + 1)
  1013. << ctx->downsize_coeff_h, 8);
  1014. for (row = 0; row < ctx->in.num_rows; row++) {
  1015. tile_idx = row * ctx->in.num_cols + col;
  1016. in_tile = &ctx->in.tile[tile_idx];
  1017. out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
  1018. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1019. out_tile->height = resized_width;
  1020. else
  1021. out_tile->width = resized_width;
  1022. in_tile->width = in_width;
  1023. }
  1024. ctx->resize_coeffs_h[col] = resize_coeff_h;
  1025. }
  1026. for (row = 0; row < ctx->in.num_rows; row++) {
  1027. bool closest = (row < ctx->in.num_rows - 1) &&
  1028. !(ctx->rot_mode & IPU_ROT_BIT_VFLIP);
  1029. u32 resized_height;
  1030. u32 resize_coeff_v;
  1031. u32 in_height;
  1032. tile_idx = row * ctx->in.num_cols;
  1033. in_tile = &ctx->in.tile[tile_idx];
  1034. out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
  1035. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1036. resized_height = out_tile->width;
  1037. else
  1038. resized_height = out_tile->height;
  1039. resize_coeff_v = calc_resize_coeff(in_tile->height,
  1040. ctx->downsize_coeff_v,
  1041. resized_height, closest);
  1042. dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n",
  1043. __func__, row, resize_coeff_v);
  1044. /*
  1045. * With the vertical scaling factor known, round up resized
  1046. * height (output width or height) to IDMAC limitations.
  1047. */
  1048. resized_height = round_up(resized_height, 2);
  1049. /*
  1050. * Calculate input width from the last accessed input pixel
  1051. * given resized height and scaling coefficients. Align to
  1052. * IDMAC restrictions.
  1053. */
  1054. last_output = resized_height - 1;
  1055. if (closest && ((last_output * resize_coeff_v) % 8192))
  1056. last_output++;
  1057. in_height = round_up(
  1058. (DIV_ROUND_UP(last_output * resize_coeff_v, 8192) + 1)
  1059. << ctx->downsize_coeff_v, 2);
  1060. for (col = 0; col < ctx->in.num_cols; col++) {
  1061. tile_idx = row * ctx->in.num_cols + col;
  1062. in_tile = &ctx->in.tile[tile_idx];
  1063. out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
  1064. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1065. out_tile->width = resized_height;
  1066. else
  1067. out_tile->height = resized_height;
  1068. in_tile->height = in_height;
  1069. }
  1070. ctx->resize_coeffs_v[row] = resize_coeff_v;
  1071. }
  1072. }
  1073. /*
  1074. * return the number of runs in given queue (pending_q or done_q)
  1075. * for this context. hold irqlock when calling.
  1076. */
  1077. static int get_run_count(struct ipu_image_convert_ctx *ctx,
  1078. struct list_head *q)
  1079. {
  1080. struct ipu_image_convert_run *run;
  1081. int count = 0;
  1082. lockdep_assert_held(&ctx->chan->irqlock);
  1083. list_for_each_entry(run, q, list) {
  1084. if (run->ctx == ctx)
  1085. count++;
  1086. }
  1087. return count;
  1088. }
  1089. static void convert_stop(struct ipu_image_convert_run *run)
  1090. {
  1091. struct ipu_image_convert_ctx *ctx = run->ctx;
  1092. struct ipu_image_convert_chan *chan = ctx->chan;
  1093. struct ipu_image_convert_priv *priv = chan->priv;
  1094. dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
  1095. __func__, chan->ic_task, ctx, run);
  1096. /* disable IC tasks and the channels */
  1097. ipu_ic_task_disable(chan->ic);
  1098. ipu_idmac_disable_channel(chan->in_chan);
  1099. ipu_idmac_disable_channel(chan->out_chan);
  1100. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1101. ipu_idmac_disable_channel(chan->rotation_in_chan);
  1102. ipu_idmac_disable_channel(chan->rotation_out_chan);
  1103. ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
  1104. }
  1105. ipu_ic_disable(chan->ic);
  1106. }
  1107. static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
  1108. struct ipuv3_channel *channel,
  1109. struct ipu_image_convert_image *image,
  1110. enum ipu_rotate_mode rot_mode,
  1111. bool rot_swap_width_height,
  1112. unsigned int tile)
  1113. {
  1114. struct ipu_image_convert_chan *chan = ctx->chan;
  1115. unsigned int burst_size;
  1116. u32 width, height, stride;
  1117. dma_addr_t addr0, addr1 = 0;
  1118. struct ipu_image tile_image;
  1119. unsigned int tile_idx[2];
  1120. if (image->type == IMAGE_CONVERT_OUT) {
  1121. tile_idx[0] = ctx->out_tile_map[tile];
  1122. tile_idx[1] = ctx->out_tile_map[1];
  1123. } else {
  1124. tile_idx[0] = tile;
  1125. tile_idx[1] = 1;
  1126. }
  1127. if (rot_swap_width_height) {
  1128. width = image->tile[tile_idx[0]].height;
  1129. height = image->tile[tile_idx[0]].width;
  1130. stride = image->tile[tile_idx[0]].rot_stride;
  1131. addr0 = ctx->rot_intermediate[0].phys;
  1132. if (ctx->double_buffering)
  1133. addr1 = ctx->rot_intermediate[1].phys;
  1134. } else {
  1135. width = image->tile[tile_idx[0]].width;
  1136. height = image->tile[tile_idx[0]].height;
  1137. stride = image->stride;
  1138. addr0 = image->base.phys0 +
  1139. image->tile[tile_idx[0]].offset;
  1140. if (ctx->double_buffering)
  1141. addr1 = image->base.phys0 +
  1142. image->tile[tile_idx[1]].offset;
  1143. }
  1144. ipu_cpmem_zero(channel);
  1145. memset(&tile_image, 0, sizeof(tile_image));
  1146. tile_image.pix.width = tile_image.rect.width = width;
  1147. tile_image.pix.height = tile_image.rect.height = height;
  1148. tile_image.pix.bytesperline = stride;
  1149. tile_image.pix.pixelformat = image->fmt->fourcc;
  1150. tile_image.phys0 = addr0;
  1151. tile_image.phys1 = addr1;
  1152. if (image->fmt->planar && !rot_swap_width_height) {
  1153. tile_image.u_offset = image->tile[tile_idx[0]].u_off;
  1154. tile_image.v_offset = image->tile[tile_idx[0]].v_off;
  1155. }
  1156. ipu_cpmem_set_image(channel, &tile_image);
  1157. if (rot_mode)
  1158. ipu_cpmem_set_rotation(channel, rot_mode);
  1159. /*
  1160. * Skip writing U and V components to odd rows in the output
  1161. * channels for planar 4:2:0.
  1162. */
  1163. if ((channel == chan->out_chan ||
  1164. channel == chan->rotation_out_chan) &&
  1165. image->fmt->planar && image->fmt->uv_height_dec == 2)
  1166. ipu_cpmem_skip_odd_chroma_rows(channel);
  1167. if (channel == chan->rotation_in_chan ||
  1168. channel == chan->rotation_out_chan) {
  1169. burst_size = 8;
  1170. ipu_cpmem_set_block_mode(channel);
  1171. } else
  1172. burst_size = (width % 16) ? 8 : 16;
  1173. ipu_cpmem_set_burstsize(channel, burst_size);
  1174. ipu_ic_task_idma_init(chan->ic, channel, width, height,
  1175. burst_size, rot_mode);
  1176. /*
  1177. * Setting a non-zero AXI ID collides with the PRG AXI snooping, so
  1178. * only do this when there is no PRG present.
  1179. */
  1180. if (!channel->ipu->prg_priv)
  1181. ipu_cpmem_set_axi_id(channel, 1);
  1182. ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
  1183. }
  1184. static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
  1185. {
  1186. struct ipu_image_convert_ctx *ctx = run->ctx;
  1187. struct ipu_image_convert_chan *chan = ctx->chan;
  1188. struct ipu_image_convert_priv *priv = chan->priv;
  1189. struct ipu_image_convert_image *s_image = &ctx->in;
  1190. struct ipu_image_convert_image *d_image = &ctx->out;
  1191. unsigned int dst_tile = ctx->out_tile_map[tile];
  1192. unsigned int dest_width, dest_height;
  1193. unsigned int col, row;
  1194. u32 rsc;
  1195. int ret;
  1196. dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
  1197. __func__, chan->ic_task, ctx, run, tile, dst_tile);
  1198. /* clear EOF irq mask */
  1199. ctx->eof_mask = 0;
  1200. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1201. /* swap width/height for resizer */
  1202. dest_width = d_image->tile[dst_tile].height;
  1203. dest_height = d_image->tile[dst_tile].width;
  1204. } else {
  1205. dest_width = d_image->tile[dst_tile].width;
  1206. dest_height = d_image->tile[dst_tile].height;
  1207. }
  1208. row = tile / s_image->num_cols;
  1209. col = tile % s_image->num_cols;
  1210. rsc = (ctx->downsize_coeff_v << 30) |
  1211. (ctx->resize_coeffs_v[row] << 16) |
  1212. (ctx->downsize_coeff_h << 14) |
  1213. (ctx->resize_coeffs_h[col]);
  1214. dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n",
  1215. __func__, s_image->tile[tile].width,
  1216. s_image->tile[tile].height, dest_width, dest_height, rsc);
  1217. /* setup the IC resizer and CSC */
  1218. ret = ipu_ic_task_init_rsc(chan->ic, &ctx->csc,
  1219. s_image->tile[tile].width,
  1220. s_image->tile[tile].height,
  1221. dest_width,
  1222. dest_height,
  1223. rsc);
  1224. if (ret) {
  1225. dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
  1226. return ret;
  1227. }
  1228. /* init the source MEM-->IC PP IDMAC channel */
  1229. init_idmac_channel(ctx, chan->in_chan, s_image,
  1230. IPU_ROTATE_NONE, false, tile);
  1231. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1232. /* init the IC PP-->MEM IDMAC channel */
  1233. init_idmac_channel(ctx, chan->out_chan, d_image,
  1234. IPU_ROTATE_NONE, true, tile);
  1235. /* init the MEM-->IC PP ROT IDMAC channel */
  1236. init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
  1237. ctx->rot_mode, true, tile);
  1238. /* init the destination IC PP ROT-->MEM IDMAC channel */
  1239. init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
  1240. IPU_ROTATE_NONE, false, tile);
  1241. /* now link IC PP-->MEM to MEM-->IC PP ROT */
  1242. ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
  1243. } else {
  1244. /* init the destination IC PP-->MEM IDMAC channel */
  1245. init_idmac_channel(ctx, chan->out_chan, d_image,
  1246. ctx->rot_mode, false, tile);
  1247. }
  1248. /* enable the IC */
  1249. ipu_ic_enable(chan->ic);
  1250. /* set buffers ready */
  1251. ipu_idmac_select_buffer(chan->in_chan, 0);
  1252. ipu_idmac_select_buffer(chan->out_chan, 0);
  1253. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1254. ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
  1255. if (ctx->double_buffering) {
  1256. ipu_idmac_select_buffer(chan->in_chan, 1);
  1257. ipu_idmac_select_buffer(chan->out_chan, 1);
  1258. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1259. ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
  1260. }
  1261. /* enable the channels! */
  1262. ipu_idmac_enable_channel(chan->in_chan);
  1263. ipu_idmac_enable_channel(chan->out_chan);
  1264. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1265. ipu_idmac_enable_channel(chan->rotation_in_chan);
  1266. ipu_idmac_enable_channel(chan->rotation_out_chan);
  1267. }
  1268. ipu_ic_task_enable(chan->ic);
  1269. ipu_cpmem_dump(chan->in_chan);
  1270. ipu_cpmem_dump(chan->out_chan);
  1271. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1272. ipu_cpmem_dump(chan->rotation_in_chan);
  1273. ipu_cpmem_dump(chan->rotation_out_chan);
  1274. }
  1275. ipu_dump(priv->ipu);
  1276. return 0;
  1277. }
  1278. /* hold irqlock when calling */
  1279. static int do_run(struct ipu_image_convert_run *run)
  1280. {
  1281. struct ipu_image_convert_ctx *ctx = run->ctx;
  1282. struct ipu_image_convert_chan *chan = ctx->chan;
  1283. lockdep_assert_held(&chan->irqlock);
  1284. ctx->in.base.phys0 = run->in_phys;
  1285. ctx->out.base.phys0 = run->out_phys;
  1286. ctx->cur_buf_num = 0;
  1287. ctx->next_tile = 1;
  1288. /* remove run from pending_q and set as current */
  1289. list_del(&run->list);
  1290. chan->current_run = run;
  1291. return convert_start(run, 0);
  1292. }
  1293. /* hold irqlock when calling */
  1294. static void run_next(struct ipu_image_convert_chan *chan)
  1295. {
  1296. struct ipu_image_convert_priv *priv = chan->priv;
  1297. struct ipu_image_convert_run *run, *tmp;
  1298. int ret;
  1299. lockdep_assert_held(&chan->irqlock);
  1300. list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
  1301. /* skip contexts that are aborting */
  1302. if (run->ctx->aborting) {
  1303. dev_dbg(priv->ipu->dev,
  1304. "%s: task %u: skipping aborting ctx %p run %p\n",
  1305. __func__, chan->ic_task, run->ctx, run);
  1306. continue;
  1307. }
  1308. ret = do_run(run);
  1309. if (!ret)
  1310. break;
  1311. /*
  1312. * something went wrong with start, add the run
  1313. * to done q and continue to the next run in the
  1314. * pending q.
  1315. */
  1316. run->status = ret;
  1317. list_add_tail(&run->list, &chan->done_q);
  1318. chan->current_run = NULL;
  1319. }
  1320. }
  1321. static void empty_done_q(struct ipu_image_convert_chan *chan)
  1322. {
  1323. struct ipu_image_convert_priv *priv = chan->priv;
  1324. struct ipu_image_convert_run *run;
  1325. unsigned long flags;
  1326. spin_lock_irqsave(&chan->irqlock, flags);
  1327. while (!list_empty(&chan->done_q)) {
  1328. run = list_entry(chan->done_q.next,
  1329. struct ipu_image_convert_run,
  1330. list);
  1331. list_del(&run->list);
  1332. dev_dbg(priv->ipu->dev,
  1333. "%s: task %u: completing ctx %p run %p with %d\n",
  1334. __func__, chan->ic_task, run->ctx, run, run->status);
  1335. /* call the completion callback and free the run */
  1336. spin_unlock_irqrestore(&chan->irqlock, flags);
  1337. run->ctx->complete(run, run->ctx->complete_context);
  1338. spin_lock_irqsave(&chan->irqlock, flags);
  1339. }
  1340. spin_unlock_irqrestore(&chan->irqlock, flags);
  1341. }
  1342. /*
  1343. * the bottom half thread clears out the done_q, calling the
  1344. * completion handler for each.
  1345. */
  1346. static irqreturn_t do_bh(int irq, void *dev_id)
  1347. {
  1348. struct ipu_image_convert_chan *chan = dev_id;
  1349. struct ipu_image_convert_priv *priv = chan->priv;
  1350. struct ipu_image_convert_ctx *ctx;
  1351. unsigned long flags;
  1352. dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
  1353. chan->ic_task);
  1354. empty_done_q(chan);
  1355. spin_lock_irqsave(&chan->irqlock, flags);
  1356. /*
  1357. * the done_q is cleared out, signal any contexts
  1358. * that are aborting that abort can complete.
  1359. */
  1360. list_for_each_entry(ctx, &chan->ctx_list, list) {
  1361. if (ctx->aborting) {
  1362. dev_dbg(priv->ipu->dev,
  1363. "%s: task %u: signaling abort for ctx %p\n",
  1364. __func__, chan->ic_task, ctx);
  1365. complete_all(&ctx->aborted);
  1366. }
  1367. }
  1368. spin_unlock_irqrestore(&chan->irqlock, flags);
  1369. dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
  1370. chan->ic_task);
  1371. return IRQ_HANDLED;
  1372. }
  1373. static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
  1374. {
  1375. unsigned int cur_tile = ctx->next_tile - 1;
  1376. unsigned int next_tile = ctx->next_tile;
  1377. if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] !=
  1378. ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] ||
  1379. ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] !=
  1380. ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] ||
  1381. ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width ||
  1382. ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height ||
  1383. ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width ||
  1384. ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height)
  1385. return true;
  1386. return false;
  1387. }
  1388. /* hold irqlock when calling */
  1389. static irqreturn_t do_tile_complete(struct ipu_image_convert_run *run)
  1390. {
  1391. struct ipu_image_convert_ctx *ctx = run->ctx;
  1392. struct ipu_image_convert_chan *chan = ctx->chan;
  1393. struct ipu_image_tile *src_tile, *dst_tile;
  1394. struct ipu_image_convert_image *s_image = &ctx->in;
  1395. struct ipu_image_convert_image *d_image = &ctx->out;
  1396. struct ipuv3_channel *outch;
  1397. unsigned int dst_idx;
  1398. lockdep_assert_held(&chan->irqlock);
  1399. outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
  1400. chan->rotation_out_chan : chan->out_chan;
  1401. /*
  1402. * It is difficult to stop the channel DMA before the channels
  1403. * enter the paused state. Without double-buffering the channels
  1404. * are always in a paused state when the EOF irq occurs, so it
  1405. * is safe to stop the channels now. For double-buffering we
  1406. * just ignore the abort until the operation completes, when it
  1407. * is safe to shut down.
  1408. */
  1409. if (ctx->aborting && !ctx->double_buffering) {
  1410. convert_stop(run);
  1411. run->status = -EIO;
  1412. goto done;
  1413. }
  1414. if (ctx->next_tile == ctx->num_tiles) {
  1415. /*
  1416. * the conversion is complete
  1417. */
  1418. convert_stop(run);
  1419. run->status = 0;
  1420. goto done;
  1421. }
  1422. /*
  1423. * not done, place the next tile buffers.
  1424. */
  1425. if (!ctx->double_buffering) {
  1426. if (ic_settings_changed(ctx)) {
  1427. convert_stop(run);
  1428. convert_start(run, ctx->next_tile);
  1429. } else {
  1430. src_tile = &s_image->tile[ctx->next_tile];
  1431. dst_idx = ctx->out_tile_map[ctx->next_tile];
  1432. dst_tile = &d_image->tile[dst_idx];
  1433. ipu_cpmem_set_buffer(chan->in_chan, 0,
  1434. s_image->base.phys0 +
  1435. src_tile->offset);
  1436. ipu_cpmem_set_buffer(outch, 0,
  1437. d_image->base.phys0 +
  1438. dst_tile->offset);
  1439. if (s_image->fmt->planar)
  1440. ipu_cpmem_set_uv_offset(chan->in_chan,
  1441. src_tile->u_off,
  1442. src_tile->v_off);
  1443. if (d_image->fmt->planar)
  1444. ipu_cpmem_set_uv_offset(outch,
  1445. dst_tile->u_off,
  1446. dst_tile->v_off);
  1447. ipu_idmac_select_buffer(chan->in_chan, 0);
  1448. ipu_idmac_select_buffer(outch, 0);
  1449. }
  1450. } else if (ctx->next_tile < ctx->num_tiles - 1) {
  1451. src_tile = &s_image->tile[ctx->next_tile + 1];
  1452. dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
  1453. dst_tile = &d_image->tile[dst_idx];
  1454. ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
  1455. s_image->base.phys0 + src_tile->offset);
  1456. ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
  1457. d_image->base.phys0 + dst_tile->offset);
  1458. ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
  1459. ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
  1460. ctx->cur_buf_num ^= 1;
  1461. }
  1462. ctx->eof_mask = 0; /* clear EOF irq mask for next tile */
  1463. ctx->next_tile++;
  1464. return IRQ_HANDLED;
  1465. done:
  1466. list_add_tail(&run->list, &chan->done_q);
  1467. chan->current_run = NULL;
  1468. run_next(chan);
  1469. return IRQ_WAKE_THREAD;
  1470. }
  1471. static irqreturn_t eof_irq(int irq, void *data)
  1472. {
  1473. struct ipu_image_convert_chan *chan = data;
  1474. struct ipu_image_convert_priv *priv = chan->priv;
  1475. struct ipu_image_convert_ctx *ctx;
  1476. struct ipu_image_convert_run *run;
  1477. irqreturn_t ret = IRQ_HANDLED;
  1478. bool tile_complete = false;
  1479. unsigned long flags;
  1480. spin_lock_irqsave(&chan->irqlock, flags);
  1481. /* get current run and its context */
  1482. run = chan->current_run;
  1483. if (!run) {
  1484. ret = IRQ_NONE;
  1485. goto out;
  1486. }
  1487. ctx = run->ctx;
  1488. if (irq == chan->in_eof_irq) {
  1489. ctx->eof_mask |= EOF_IRQ_IN;
  1490. } else if (irq == chan->out_eof_irq) {
  1491. ctx->eof_mask |= EOF_IRQ_OUT;
  1492. } else if (irq == chan->rot_in_eof_irq ||
  1493. irq == chan->rot_out_eof_irq) {
  1494. if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1495. /* this was NOT a rotation op, shouldn't happen */
  1496. dev_err(priv->ipu->dev,
  1497. "Unexpected rotation interrupt\n");
  1498. goto out;
  1499. }
  1500. ctx->eof_mask |= (irq == chan->rot_in_eof_irq) ?
  1501. EOF_IRQ_ROT_IN : EOF_IRQ_ROT_OUT;
  1502. } else {
  1503. dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq);
  1504. ret = IRQ_NONE;
  1505. goto out;
  1506. }
  1507. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  1508. tile_complete = (ctx->eof_mask == EOF_IRQ_ROT_COMPLETE);
  1509. else
  1510. tile_complete = (ctx->eof_mask == EOF_IRQ_COMPLETE);
  1511. if (tile_complete)
  1512. ret = do_tile_complete(run);
  1513. out:
  1514. spin_unlock_irqrestore(&chan->irqlock, flags);
  1515. return ret;
  1516. }
  1517. /*
  1518. * try to force the completion of runs for this ctx. Called when
  1519. * abort wait times out in ipu_image_convert_abort().
  1520. */
  1521. static void force_abort(struct ipu_image_convert_ctx *ctx)
  1522. {
  1523. struct ipu_image_convert_chan *chan = ctx->chan;
  1524. struct ipu_image_convert_run *run;
  1525. unsigned long flags;
  1526. spin_lock_irqsave(&chan->irqlock, flags);
  1527. run = chan->current_run;
  1528. if (run && run->ctx == ctx) {
  1529. convert_stop(run);
  1530. run->status = -EIO;
  1531. list_add_tail(&run->list, &chan->done_q);
  1532. chan->current_run = NULL;
  1533. run_next(chan);
  1534. }
  1535. spin_unlock_irqrestore(&chan->irqlock, flags);
  1536. empty_done_q(chan);
  1537. }
  1538. static void release_ipu_resources(struct ipu_image_convert_chan *chan)
  1539. {
  1540. if (chan->in_eof_irq >= 0)
  1541. free_irq(chan->in_eof_irq, chan);
  1542. if (chan->rot_in_eof_irq >= 0)
  1543. free_irq(chan->rot_in_eof_irq, chan);
  1544. if (chan->out_eof_irq >= 0)
  1545. free_irq(chan->out_eof_irq, chan);
  1546. if (chan->rot_out_eof_irq >= 0)
  1547. free_irq(chan->rot_out_eof_irq, chan);
  1548. if (!IS_ERR_OR_NULL(chan->in_chan))
  1549. ipu_idmac_put(chan->in_chan);
  1550. if (!IS_ERR_OR_NULL(chan->out_chan))
  1551. ipu_idmac_put(chan->out_chan);
  1552. if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
  1553. ipu_idmac_put(chan->rotation_in_chan);
  1554. if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
  1555. ipu_idmac_put(chan->rotation_out_chan);
  1556. if (!IS_ERR_OR_NULL(chan->ic))
  1557. ipu_ic_put(chan->ic);
  1558. chan->in_chan = chan->out_chan = chan->rotation_in_chan =
  1559. chan->rotation_out_chan = NULL;
  1560. chan->in_eof_irq = -1;
  1561. chan->rot_in_eof_irq = -1;
  1562. chan->out_eof_irq = -1;
  1563. chan->rot_out_eof_irq = -1;
  1564. }
  1565. static int get_eof_irq(struct ipu_image_convert_chan *chan,
  1566. struct ipuv3_channel *channel)
  1567. {
  1568. struct ipu_image_convert_priv *priv = chan->priv;
  1569. int ret, irq;
  1570. irq = ipu_idmac_channel_irq(priv->ipu, channel, IPU_IRQ_EOF);
  1571. ret = request_threaded_irq(irq, eof_irq, do_bh, 0, "ipu-ic", chan);
  1572. if (ret < 0) {
  1573. dev_err(priv->ipu->dev, "could not acquire irq %d\n", irq);
  1574. return ret;
  1575. }
  1576. return irq;
  1577. }
  1578. static int get_ipu_resources(struct ipu_image_convert_chan *chan)
  1579. {
  1580. const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
  1581. struct ipu_image_convert_priv *priv = chan->priv;
  1582. int ret;
  1583. /* get IC */
  1584. chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
  1585. if (IS_ERR(chan->ic)) {
  1586. dev_err(priv->ipu->dev, "could not acquire IC\n");
  1587. ret = PTR_ERR(chan->ic);
  1588. goto err;
  1589. }
  1590. /* get IDMAC channels */
  1591. chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
  1592. chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
  1593. if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
  1594. dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
  1595. ret = -EBUSY;
  1596. goto err;
  1597. }
  1598. chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
  1599. chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
  1600. if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
  1601. dev_err(priv->ipu->dev,
  1602. "could not acquire idmac rotation channels\n");
  1603. ret = -EBUSY;
  1604. goto err;
  1605. }
  1606. /* acquire the EOF interrupts */
  1607. ret = get_eof_irq(chan, chan->in_chan);
  1608. if (ret < 0) {
  1609. chan->in_eof_irq = -1;
  1610. goto err;
  1611. }
  1612. chan->in_eof_irq = ret;
  1613. ret = get_eof_irq(chan, chan->rotation_in_chan);
  1614. if (ret < 0) {
  1615. chan->rot_in_eof_irq = -1;
  1616. goto err;
  1617. }
  1618. chan->rot_in_eof_irq = ret;
  1619. ret = get_eof_irq(chan, chan->out_chan);
  1620. if (ret < 0) {
  1621. chan->out_eof_irq = -1;
  1622. goto err;
  1623. }
  1624. chan->out_eof_irq = ret;
  1625. ret = get_eof_irq(chan, chan->rotation_out_chan);
  1626. if (ret < 0) {
  1627. chan->rot_out_eof_irq = -1;
  1628. goto err;
  1629. }
  1630. chan->rot_out_eof_irq = ret;
  1631. return 0;
  1632. err:
  1633. release_ipu_resources(chan);
  1634. return ret;
  1635. }
  1636. static int fill_image(struct ipu_image_convert_ctx *ctx,
  1637. struct ipu_image_convert_image *ic_image,
  1638. struct ipu_image *image,
  1639. enum ipu_image_convert_type type)
  1640. {
  1641. struct ipu_image_convert_priv *priv = ctx->chan->priv;
  1642. ic_image->base = *image;
  1643. ic_image->type = type;
  1644. ic_image->fmt = get_format(image->pix.pixelformat);
  1645. if (!ic_image->fmt) {
  1646. dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
  1647. type == IMAGE_CONVERT_OUT ? "Output" : "Input");
  1648. return -EINVAL;
  1649. }
  1650. if (ic_image->fmt->planar)
  1651. ic_image->stride = ic_image->base.pix.width;
  1652. else
  1653. ic_image->stride = ic_image->base.pix.bytesperline;
  1654. return 0;
  1655. }
  1656. /* borrowed from drivers/media/v4l2-core/v4l2-common.c */
  1657. static unsigned int clamp_align(unsigned int x, unsigned int min,
  1658. unsigned int max, unsigned int align)
  1659. {
  1660. /* Bits that must be zero to be aligned */
  1661. unsigned int mask = ~((1 << align) - 1);
  1662. /* Clamp to aligned min and max */
  1663. x = clamp(x, (min + ~mask) & mask, max & mask);
  1664. /* Round to nearest aligned value */
  1665. if (align)
  1666. x = (x + (1 << (align - 1))) & mask;
  1667. return x;
  1668. }
  1669. /* Adjusts input/output images to IPU restrictions */
  1670. void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
  1671. enum ipu_rotate_mode rot_mode)
  1672. {
  1673. const struct ipu_image_pixfmt *infmt, *outfmt;
  1674. u32 w_align_out, h_align_out;
  1675. u32 w_align_in, h_align_in;
  1676. infmt = get_format(in->pix.pixelformat);
  1677. outfmt = get_format(out->pix.pixelformat);
  1678. /* set some default pixel formats if needed */
  1679. if (!infmt) {
  1680. in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
  1681. infmt = get_format(V4L2_PIX_FMT_RGB24);
  1682. }
  1683. if (!outfmt) {
  1684. out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
  1685. outfmt = get_format(V4L2_PIX_FMT_RGB24);
  1686. }
  1687. /* image converter does not handle fields */
  1688. in->pix.field = out->pix.field = V4L2_FIELD_NONE;
  1689. /* resizer cannot downsize more than 4:1 */
  1690. if (ipu_rot_mode_is_irt(rot_mode)) {
  1691. out->pix.height = max_t(__u32, out->pix.height,
  1692. in->pix.width / 4);
  1693. out->pix.width = max_t(__u32, out->pix.width,
  1694. in->pix.height / 4);
  1695. } else {
  1696. out->pix.width = max_t(__u32, out->pix.width,
  1697. in->pix.width / 4);
  1698. out->pix.height = max_t(__u32, out->pix.height,
  1699. in->pix.height / 4);
  1700. }
  1701. /* align input width/height */
  1702. w_align_in = ilog2(tile_width_align(IMAGE_CONVERT_IN, infmt,
  1703. rot_mode));
  1704. h_align_in = ilog2(tile_height_align(IMAGE_CONVERT_IN, infmt,
  1705. rot_mode));
  1706. in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W,
  1707. w_align_in);
  1708. in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H,
  1709. h_align_in);
  1710. /* align output width/height */
  1711. w_align_out = ilog2(tile_width_align(IMAGE_CONVERT_OUT, outfmt,
  1712. rot_mode));
  1713. h_align_out = ilog2(tile_height_align(IMAGE_CONVERT_OUT, outfmt,
  1714. rot_mode));
  1715. out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W,
  1716. w_align_out);
  1717. out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H,
  1718. h_align_out);
  1719. /* set input/output strides and image sizes */
  1720. in->pix.bytesperline = infmt->planar ?
  1721. clamp_align(in->pix.width, 2 << w_align_in, MAX_W,
  1722. w_align_in) :
  1723. clamp_align((in->pix.width * infmt->bpp) >> 3,
  1724. ((2 << w_align_in) * infmt->bpp) >> 3,
  1725. (MAX_W * infmt->bpp) >> 3,
  1726. w_align_in);
  1727. in->pix.sizeimage = infmt->planar ?
  1728. (in->pix.height * in->pix.bytesperline * infmt->bpp) >> 3 :
  1729. in->pix.height * in->pix.bytesperline;
  1730. out->pix.bytesperline = outfmt->planar ? out->pix.width :
  1731. (out->pix.width * outfmt->bpp) >> 3;
  1732. out->pix.sizeimage = outfmt->planar ?
  1733. (out->pix.height * out->pix.bytesperline * outfmt->bpp) >> 3 :
  1734. out->pix.height * out->pix.bytesperline;
  1735. }
  1736. EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
  1737. /*
  1738. * this is used by ipu_image_convert_prepare() to verify set input and
  1739. * output images are valid before starting the conversion. Clients can
  1740. * also call it before calling ipu_image_convert_prepare().
  1741. */
  1742. int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
  1743. enum ipu_rotate_mode rot_mode)
  1744. {
  1745. struct ipu_image testin, testout;
  1746. testin = *in;
  1747. testout = *out;
  1748. ipu_image_convert_adjust(&testin, &testout, rot_mode);
  1749. if (testin.pix.width != in->pix.width ||
  1750. testin.pix.height != in->pix.height ||
  1751. testout.pix.width != out->pix.width ||
  1752. testout.pix.height != out->pix.height)
  1753. return -EINVAL;
  1754. return 0;
  1755. }
  1756. EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
  1757. /*
  1758. * Call ipu_image_convert_prepare() to prepare for the conversion of
  1759. * given images and rotation mode. Returns a new conversion context.
  1760. */
  1761. struct ipu_image_convert_ctx *
  1762. ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
  1763. struct ipu_image *in, struct ipu_image *out,
  1764. enum ipu_rotate_mode rot_mode,
  1765. ipu_image_convert_cb_t complete,
  1766. void *complete_context)
  1767. {
  1768. struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
  1769. struct ipu_image_convert_image *s_image, *d_image;
  1770. struct ipu_image_convert_chan *chan;
  1771. struct ipu_image_convert_ctx *ctx;
  1772. unsigned long flags;
  1773. unsigned int i;
  1774. bool get_res;
  1775. int ret;
  1776. if (!in || !out || !complete ||
  1777. (ic_task != IC_TASK_VIEWFINDER &&
  1778. ic_task != IC_TASK_POST_PROCESSOR))
  1779. return ERR_PTR(-EINVAL);
  1780. /* verify the in/out images before continuing */
  1781. ret = ipu_image_convert_verify(in, out, rot_mode);
  1782. if (ret) {
  1783. dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
  1784. __func__);
  1785. return ERR_PTR(ret);
  1786. }
  1787. chan = &priv->chan[ic_task];
  1788. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  1789. if (!ctx)
  1790. return ERR_PTR(-ENOMEM);
  1791. dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
  1792. chan->ic_task, ctx);
  1793. ctx->chan = chan;
  1794. init_completion(&ctx->aborted);
  1795. ctx->rot_mode = rot_mode;
  1796. /* Sets ctx->in.num_rows/cols as well */
  1797. ret = calc_image_resize_coefficients(ctx, in, out);
  1798. if (ret)
  1799. goto out_free;
  1800. s_image = &ctx->in;
  1801. d_image = &ctx->out;
  1802. /* set tiling and rotation */
  1803. if (ipu_rot_mode_is_irt(rot_mode)) {
  1804. d_image->num_rows = s_image->num_cols;
  1805. d_image->num_cols = s_image->num_rows;
  1806. } else {
  1807. d_image->num_rows = s_image->num_rows;
  1808. d_image->num_cols = s_image->num_cols;
  1809. }
  1810. ctx->num_tiles = d_image->num_cols * d_image->num_rows;
  1811. ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
  1812. if (ret)
  1813. goto out_free;
  1814. ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
  1815. if (ret)
  1816. goto out_free;
  1817. calc_out_tile_map(ctx);
  1818. find_seams(ctx, s_image, d_image);
  1819. ret = calc_tile_dimensions(ctx, s_image);
  1820. if (ret)
  1821. goto out_free;
  1822. ret = calc_tile_offsets(ctx, s_image);
  1823. if (ret)
  1824. goto out_free;
  1825. calc_tile_dimensions(ctx, d_image);
  1826. ret = calc_tile_offsets(ctx, d_image);
  1827. if (ret)
  1828. goto out_free;
  1829. calc_tile_resize_coefficients(ctx);
  1830. ret = ipu_ic_calc_csc(&ctx->csc,
  1831. s_image->base.pix.ycbcr_enc,
  1832. s_image->base.pix.quantization,
  1833. ipu_pixelformat_to_colorspace(s_image->fmt->fourcc),
  1834. d_image->base.pix.ycbcr_enc,
  1835. d_image->base.pix.quantization,
  1836. ipu_pixelformat_to_colorspace(d_image->fmt->fourcc));
  1837. if (ret)
  1838. goto out_free;
  1839. dump_format(ctx, s_image);
  1840. dump_format(ctx, d_image);
  1841. ctx->complete = complete;
  1842. ctx->complete_context = complete_context;
  1843. /*
  1844. * Can we use double-buffering for this operation? If there is
  1845. * only one tile (the whole image can be converted in a single
  1846. * operation) there's no point in using double-buffering. Also,
  1847. * the IPU's IDMAC channels allow only a single U and V plane
  1848. * offset shared between both buffers, but these offsets change
  1849. * for every tile, and therefore would have to be updated for
  1850. * each buffer which is not possible. So double-buffering is
  1851. * impossible when either the source or destination images are
  1852. * a planar format (YUV420, YUV422P, etc.). Further, differently
  1853. * sized tiles or different resizing coefficients per tile
  1854. * prevent double-buffering as well.
  1855. */
  1856. ctx->double_buffering = (ctx->num_tiles > 1 &&
  1857. !s_image->fmt->planar &&
  1858. !d_image->fmt->planar);
  1859. for (i = 1; i < ctx->num_tiles; i++) {
  1860. if (ctx->in.tile[i].width != ctx->in.tile[0].width ||
  1861. ctx->in.tile[i].height != ctx->in.tile[0].height ||
  1862. ctx->out.tile[i].width != ctx->out.tile[0].width ||
  1863. ctx->out.tile[i].height != ctx->out.tile[0].height) {
  1864. ctx->double_buffering = false;
  1865. break;
  1866. }
  1867. }
  1868. for (i = 1; i < ctx->in.num_cols; i++) {
  1869. if (ctx->resize_coeffs_h[i] != ctx->resize_coeffs_h[0]) {
  1870. ctx->double_buffering = false;
  1871. break;
  1872. }
  1873. }
  1874. for (i = 1; i < ctx->in.num_rows; i++) {
  1875. if (ctx->resize_coeffs_v[i] != ctx->resize_coeffs_v[0]) {
  1876. ctx->double_buffering = false;
  1877. break;
  1878. }
  1879. }
  1880. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1881. unsigned long intermediate_size = d_image->tile[0].size;
  1882. for (i = 1; i < ctx->num_tiles; i++) {
  1883. if (d_image->tile[i].size > intermediate_size)
  1884. intermediate_size = d_image->tile[i].size;
  1885. }
  1886. ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
  1887. intermediate_size);
  1888. if (ret)
  1889. goto out_free;
  1890. if (ctx->double_buffering) {
  1891. ret = alloc_dma_buf(priv,
  1892. &ctx->rot_intermediate[1],
  1893. intermediate_size);
  1894. if (ret)
  1895. goto out_free_dmabuf0;
  1896. }
  1897. }
  1898. spin_lock_irqsave(&chan->irqlock, flags);
  1899. get_res = list_empty(&chan->ctx_list);
  1900. list_add_tail(&ctx->list, &chan->ctx_list);
  1901. spin_unlock_irqrestore(&chan->irqlock, flags);
  1902. if (get_res) {
  1903. ret = get_ipu_resources(chan);
  1904. if (ret)
  1905. goto out_free_dmabuf1;
  1906. }
  1907. return ctx;
  1908. out_free_dmabuf1:
  1909. free_dma_buf(priv, &ctx->rot_intermediate[1]);
  1910. spin_lock_irqsave(&chan->irqlock, flags);
  1911. list_del(&ctx->list);
  1912. spin_unlock_irqrestore(&chan->irqlock, flags);
  1913. out_free_dmabuf0:
  1914. free_dma_buf(priv, &ctx->rot_intermediate[0]);
  1915. out_free:
  1916. kfree(ctx);
  1917. return ERR_PTR(ret);
  1918. }
  1919. EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
  1920. /*
  1921. * Carry out a single image conversion run. Only the physaddr's of the input
  1922. * and output image buffers are needed. The conversion context must have
  1923. * been created previously with ipu_image_convert_prepare().
  1924. */
  1925. int ipu_image_convert_queue(struct ipu_image_convert_run *run)
  1926. {
  1927. struct ipu_image_convert_chan *chan;
  1928. struct ipu_image_convert_priv *priv;
  1929. struct ipu_image_convert_ctx *ctx;
  1930. unsigned long flags;
  1931. int ret = 0;
  1932. if (!run || !run->ctx || !run->in_phys || !run->out_phys)
  1933. return -EINVAL;
  1934. ctx = run->ctx;
  1935. chan = ctx->chan;
  1936. priv = chan->priv;
  1937. dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
  1938. chan->ic_task, ctx, run);
  1939. INIT_LIST_HEAD(&run->list);
  1940. spin_lock_irqsave(&chan->irqlock, flags);
  1941. if (ctx->aborting) {
  1942. ret = -EIO;
  1943. goto unlock;
  1944. }
  1945. list_add_tail(&run->list, &chan->pending_q);
  1946. if (!chan->current_run) {
  1947. ret = do_run(run);
  1948. if (ret)
  1949. chan->current_run = NULL;
  1950. }
  1951. unlock:
  1952. spin_unlock_irqrestore(&chan->irqlock, flags);
  1953. return ret;
  1954. }
  1955. EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
  1956. /* Abort any active or pending conversions for this context */
  1957. static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
  1958. {
  1959. struct ipu_image_convert_chan *chan = ctx->chan;
  1960. struct ipu_image_convert_priv *priv = chan->priv;
  1961. struct ipu_image_convert_run *run, *active_run, *tmp;
  1962. unsigned long flags;
  1963. int run_count, ret;
  1964. spin_lock_irqsave(&chan->irqlock, flags);
  1965. /* move all remaining pending runs in this context to done_q */
  1966. list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
  1967. if (run->ctx != ctx)
  1968. continue;
  1969. run->status = -EIO;
  1970. list_move_tail(&run->list, &chan->done_q);
  1971. }
  1972. run_count = get_run_count(ctx, &chan->done_q);
  1973. active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
  1974. chan->current_run : NULL;
  1975. if (active_run)
  1976. reinit_completion(&ctx->aborted);
  1977. ctx->aborting = true;
  1978. spin_unlock_irqrestore(&chan->irqlock, flags);
  1979. if (!run_count && !active_run) {
  1980. dev_dbg(priv->ipu->dev,
  1981. "%s: task %u: no abort needed for ctx %p\n",
  1982. __func__, chan->ic_task, ctx);
  1983. return;
  1984. }
  1985. if (!active_run) {
  1986. empty_done_q(chan);
  1987. return;
  1988. }
  1989. dev_dbg(priv->ipu->dev,
  1990. "%s: task %u: wait for completion: %d runs\n",
  1991. __func__, chan->ic_task, run_count);
  1992. ret = wait_for_completion_timeout(&ctx->aborted,
  1993. msecs_to_jiffies(10000));
  1994. if (ret == 0) {
  1995. dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
  1996. force_abort(ctx);
  1997. }
  1998. }
  1999. void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
  2000. {
  2001. __ipu_image_convert_abort(ctx);
  2002. ctx->aborting = false;
  2003. }
  2004. EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
  2005. /* Unprepare image conversion context */
  2006. void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
  2007. {
  2008. struct ipu_image_convert_chan *chan = ctx->chan;
  2009. struct ipu_image_convert_priv *priv = chan->priv;
  2010. unsigned long flags;
  2011. bool put_res;
  2012. /* make sure no runs are hanging around */
  2013. __ipu_image_convert_abort(ctx);
  2014. dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
  2015. chan->ic_task, ctx);
  2016. spin_lock_irqsave(&chan->irqlock, flags);
  2017. list_del(&ctx->list);
  2018. put_res = list_empty(&chan->ctx_list);
  2019. spin_unlock_irqrestore(&chan->irqlock, flags);
  2020. if (put_res)
  2021. release_ipu_resources(chan);
  2022. free_dma_buf(priv, &ctx->rot_intermediate[1]);
  2023. free_dma_buf(priv, &ctx->rot_intermediate[0]);
  2024. kfree(ctx);
  2025. }
  2026. EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
  2027. /*
  2028. * "Canned" asynchronous single image conversion. Allocates and returns
  2029. * a new conversion run. On successful return the caller must free the
  2030. * run and call ipu_image_convert_unprepare() after conversion completes.
  2031. */
  2032. struct ipu_image_convert_run *
  2033. ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
  2034. struct ipu_image *in, struct ipu_image *out,
  2035. enum ipu_rotate_mode rot_mode,
  2036. ipu_image_convert_cb_t complete,
  2037. void *complete_context)
  2038. {
  2039. struct ipu_image_convert_ctx *ctx;
  2040. struct ipu_image_convert_run *run;
  2041. int ret;
  2042. ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
  2043. complete, complete_context);
  2044. if (IS_ERR(ctx))
  2045. return ERR_CAST(ctx);
  2046. run = kzalloc(sizeof(*run), GFP_KERNEL);
  2047. if (!run) {
  2048. ipu_image_convert_unprepare(ctx);
  2049. return ERR_PTR(-ENOMEM);
  2050. }
  2051. run->ctx = ctx;
  2052. run->in_phys = in->phys0;
  2053. run->out_phys = out->phys0;
  2054. ret = ipu_image_convert_queue(run);
  2055. if (ret) {
  2056. ipu_image_convert_unprepare(ctx);
  2057. kfree(run);
  2058. return ERR_PTR(ret);
  2059. }
  2060. return run;
  2061. }
  2062. EXPORT_SYMBOL_GPL(ipu_image_convert);
  2063. /* "Canned" synchronous single image conversion */
  2064. static void image_convert_sync_complete(struct ipu_image_convert_run *run,
  2065. void *data)
  2066. {
  2067. struct completion *comp = data;
  2068. complete(comp);
  2069. }
  2070. int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
  2071. struct ipu_image *in, struct ipu_image *out,
  2072. enum ipu_rotate_mode rot_mode)
  2073. {
  2074. struct ipu_image_convert_run *run;
  2075. struct completion comp;
  2076. int ret;
  2077. init_completion(&comp);
  2078. run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
  2079. image_convert_sync_complete, &comp);
  2080. if (IS_ERR(run))
  2081. return PTR_ERR(run);
  2082. ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
  2083. ret = (ret == 0) ? -ETIMEDOUT : 0;
  2084. ipu_image_convert_unprepare(run->ctx);
  2085. kfree(run);
  2086. return ret;
  2087. }
  2088. EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
  2089. int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
  2090. {
  2091. struct ipu_image_convert_priv *priv;
  2092. int i;
  2093. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  2094. if (!priv)
  2095. return -ENOMEM;
  2096. ipu->image_convert_priv = priv;
  2097. priv->ipu = ipu;
  2098. for (i = 0; i < IC_NUM_TASKS; i++) {
  2099. struct ipu_image_convert_chan *chan = &priv->chan[i];
  2100. chan->ic_task = i;
  2101. chan->priv = priv;
  2102. chan->dma_ch = &image_convert_dma_chan[i];
  2103. chan->in_eof_irq = -1;
  2104. chan->rot_in_eof_irq = -1;
  2105. chan->out_eof_irq = -1;
  2106. chan->rot_out_eof_irq = -1;
  2107. spin_lock_init(&chan->irqlock);
  2108. INIT_LIST_HEAD(&chan->ctx_list);
  2109. INIT_LIST_HEAD(&chan->pending_q);
  2110. INIT_LIST_HEAD(&chan->done_q);
  2111. }
  2112. return 0;
  2113. }
  2114. void ipu_image_convert_exit(struct ipu_soc *ipu)
  2115. {
  2116. }