ipu-common.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570
  1. /*
  2. * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
  3. * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the
  7. * Free Software Foundation; either version 2 of the License, or (at your
  8. * option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  12. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  13. * for more details.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/export.h>
  17. #include <linux/types.h>
  18. #include <linux/reset.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/err.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/delay.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/io.h>
  25. #include <linux/clk.h>
  26. #include <linux/list.h>
  27. #include <linux/irq.h>
  28. #include <linux/irqchip/chained_irq.h>
  29. #include <linux/irqdomain.h>
  30. #include <linux/of_device.h>
  31. #include <linux/of_graph.h>
  32. #include <drm/drm_fourcc.h>
  33. #include <video/imx-ipu-v3.h>
  34. #include "ipu-prv.h"
  35. static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
  36. {
  37. return readl(ipu->cm_reg + offset);
  38. }
  39. static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
  40. {
  41. writel(value, ipu->cm_reg + offset);
  42. }
  43. int ipu_get_num(struct ipu_soc *ipu)
  44. {
  45. return ipu->id;
  46. }
  47. EXPORT_SYMBOL_GPL(ipu_get_num);
  48. void ipu_srm_dp_update(struct ipu_soc *ipu, bool sync)
  49. {
  50. u32 val;
  51. val = ipu_cm_read(ipu, IPU_SRM_PRI2);
  52. val &= ~DP_S_SRM_MODE_MASK;
  53. val |= sync ? DP_S_SRM_MODE_NEXT_FRAME :
  54. DP_S_SRM_MODE_NOW;
  55. ipu_cm_write(ipu, val, IPU_SRM_PRI2);
  56. }
  57. EXPORT_SYMBOL_GPL(ipu_srm_dp_update);
  58. enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
  59. {
  60. switch (drm_fourcc) {
  61. case DRM_FORMAT_ARGB1555:
  62. case DRM_FORMAT_ABGR1555:
  63. case DRM_FORMAT_RGBA5551:
  64. case DRM_FORMAT_BGRA5551:
  65. case DRM_FORMAT_RGB565:
  66. case DRM_FORMAT_BGR565:
  67. case DRM_FORMAT_RGB888:
  68. case DRM_FORMAT_BGR888:
  69. case DRM_FORMAT_ARGB4444:
  70. case DRM_FORMAT_XRGB8888:
  71. case DRM_FORMAT_XBGR8888:
  72. case DRM_FORMAT_RGBX8888:
  73. case DRM_FORMAT_BGRX8888:
  74. case DRM_FORMAT_ARGB8888:
  75. case DRM_FORMAT_ABGR8888:
  76. case DRM_FORMAT_RGBA8888:
  77. case DRM_FORMAT_BGRA8888:
  78. case DRM_FORMAT_RGB565_A8:
  79. case DRM_FORMAT_BGR565_A8:
  80. case DRM_FORMAT_RGB888_A8:
  81. case DRM_FORMAT_BGR888_A8:
  82. case DRM_FORMAT_RGBX8888_A8:
  83. case DRM_FORMAT_BGRX8888_A8:
  84. return IPUV3_COLORSPACE_RGB;
  85. case DRM_FORMAT_YUYV:
  86. case DRM_FORMAT_UYVY:
  87. case DRM_FORMAT_YUV420:
  88. case DRM_FORMAT_YVU420:
  89. case DRM_FORMAT_YUV422:
  90. case DRM_FORMAT_YVU422:
  91. case DRM_FORMAT_YUV444:
  92. case DRM_FORMAT_YVU444:
  93. case DRM_FORMAT_NV12:
  94. case DRM_FORMAT_NV21:
  95. case DRM_FORMAT_NV16:
  96. case DRM_FORMAT_NV61:
  97. return IPUV3_COLORSPACE_YUV;
  98. default:
  99. return IPUV3_COLORSPACE_UNKNOWN;
  100. }
  101. }
  102. EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
  103. enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
  104. {
  105. switch (pixelformat) {
  106. case V4L2_PIX_FMT_YUV420:
  107. case V4L2_PIX_FMT_YVU420:
  108. case V4L2_PIX_FMT_YUV422P:
  109. case V4L2_PIX_FMT_UYVY:
  110. case V4L2_PIX_FMT_YUYV:
  111. case V4L2_PIX_FMT_NV12:
  112. case V4L2_PIX_FMT_NV21:
  113. case V4L2_PIX_FMT_NV16:
  114. case V4L2_PIX_FMT_NV61:
  115. return IPUV3_COLORSPACE_YUV;
  116. case V4L2_PIX_FMT_XRGB32:
  117. case V4L2_PIX_FMT_XBGR32:
  118. case V4L2_PIX_FMT_RGB32:
  119. case V4L2_PIX_FMT_BGR32:
  120. case V4L2_PIX_FMT_RGB24:
  121. case V4L2_PIX_FMT_BGR24:
  122. case V4L2_PIX_FMT_RGB565:
  123. return IPUV3_COLORSPACE_RGB;
  124. default:
  125. return IPUV3_COLORSPACE_UNKNOWN;
  126. }
  127. }
  128. EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
  129. bool ipu_pixelformat_is_planar(u32 pixelformat)
  130. {
  131. switch (pixelformat) {
  132. case V4L2_PIX_FMT_YUV420:
  133. case V4L2_PIX_FMT_YVU420:
  134. case V4L2_PIX_FMT_YUV422P:
  135. case V4L2_PIX_FMT_NV12:
  136. case V4L2_PIX_FMT_NV21:
  137. case V4L2_PIX_FMT_NV16:
  138. case V4L2_PIX_FMT_NV61:
  139. return true;
  140. }
  141. return false;
  142. }
  143. EXPORT_SYMBOL_GPL(ipu_pixelformat_is_planar);
  144. enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code)
  145. {
  146. switch (mbus_code & 0xf000) {
  147. case 0x1000:
  148. return IPUV3_COLORSPACE_RGB;
  149. case 0x2000:
  150. return IPUV3_COLORSPACE_YUV;
  151. default:
  152. return IPUV3_COLORSPACE_UNKNOWN;
  153. }
  154. }
  155. EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace);
  156. int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
  157. {
  158. switch (pixelformat) {
  159. case V4L2_PIX_FMT_YUV420:
  160. case V4L2_PIX_FMT_YVU420:
  161. case V4L2_PIX_FMT_YUV422P:
  162. case V4L2_PIX_FMT_NV12:
  163. case V4L2_PIX_FMT_NV21:
  164. case V4L2_PIX_FMT_NV16:
  165. case V4L2_PIX_FMT_NV61:
  166. /*
  167. * for the planar YUV formats, the stride passed to
  168. * cpmem must be the stride in bytes of the Y plane.
  169. * And all the planar YUV formats have an 8-bit
  170. * Y component.
  171. */
  172. return (8 * pixel_stride) >> 3;
  173. case V4L2_PIX_FMT_RGB565:
  174. case V4L2_PIX_FMT_YUYV:
  175. case V4L2_PIX_FMT_UYVY:
  176. return (16 * pixel_stride) >> 3;
  177. case V4L2_PIX_FMT_BGR24:
  178. case V4L2_PIX_FMT_RGB24:
  179. return (24 * pixel_stride) >> 3;
  180. case V4L2_PIX_FMT_BGR32:
  181. case V4L2_PIX_FMT_RGB32:
  182. case V4L2_PIX_FMT_XBGR32:
  183. case V4L2_PIX_FMT_XRGB32:
  184. return (32 * pixel_stride) >> 3;
  185. default:
  186. break;
  187. }
  188. return -EINVAL;
  189. }
  190. EXPORT_SYMBOL_GPL(ipu_stride_to_bytes);
  191. int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
  192. bool hflip, bool vflip)
  193. {
  194. u32 r90, vf, hf;
  195. switch (degrees) {
  196. case 0:
  197. vf = hf = r90 = 0;
  198. break;
  199. case 90:
  200. vf = hf = 0;
  201. r90 = 1;
  202. break;
  203. case 180:
  204. vf = hf = 1;
  205. r90 = 0;
  206. break;
  207. case 270:
  208. vf = hf = r90 = 1;
  209. break;
  210. default:
  211. return -EINVAL;
  212. }
  213. hf ^= (u32)hflip;
  214. vf ^= (u32)vflip;
  215. *mode = (enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf);
  216. return 0;
  217. }
  218. EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode);
  219. int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
  220. bool hflip, bool vflip)
  221. {
  222. u32 r90, vf, hf;
  223. r90 = ((u32)mode >> 2) & 0x1;
  224. hf = ((u32)mode >> 1) & 0x1;
  225. vf = ((u32)mode >> 0) & 0x1;
  226. hf ^= (u32)hflip;
  227. vf ^= (u32)vflip;
  228. switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) {
  229. case IPU_ROTATE_NONE:
  230. *degrees = 0;
  231. break;
  232. case IPU_ROTATE_90_RIGHT:
  233. *degrees = 90;
  234. break;
  235. case IPU_ROTATE_180:
  236. *degrees = 180;
  237. break;
  238. case IPU_ROTATE_90_LEFT:
  239. *degrees = 270;
  240. break;
  241. default:
  242. return -EINVAL;
  243. }
  244. return 0;
  245. }
  246. EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees);
  247. struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
  248. {
  249. struct ipuv3_channel *channel;
  250. dev_dbg(ipu->dev, "%s %d\n", __func__, num);
  251. if (num > 63)
  252. return ERR_PTR(-ENODEV);
  253. mutex_lock(&ipu->channel_lock);
  254. list_for_each_entry(channel, &ipu->channels, list) {
  255. if (channel->num == num) {
  256. channel = ERR_PTR(-EBUSY);
  257. goto out;
  258. }
  259. }
  260. channel = kzalloc(sizeof(*channel), GFP_KERNEL);
  261. if (!channel) {
  262. channel = ERR_PTR(-ENOMEM);
  263. goto out;
  264. }
  265. channel->num = num;
  266. channel->ipu = ipu;
  267. list_add(&channel->list, &ipu->channels);
  268. out:
  269. mutex_unlock(&ipu->channel_lock);
  270. return channel;
  271. }
  272. EXPORT_SYMBOL_GPL(ipu_idmac_get);
  273. void ipu_idmac_put(struct ipuv3_channel *channel)
  274. {
  275. struct ipu_soc *ipu = channel->ipu;
  276. dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
  277. mutex_lock(&ipu->channel_lock);
  278. list_del(&channel->list);
  279. kfree(channel);
  280. mutex_unlock(&ipu->channel_lock);
  281. }
  282. EXPORT_SYMBOL_GPL(ipu_idmac_put);
  283. #define idma_mask(ch) (1 << ((ch) & 0x1f))
  284. /*
  285. * This is an undocumented feature, a write one to a channel bit in
  286. * IPU_CHA_CUR_BUF and IPU_CHA_TRIPLE_CUR_BUF will reset the channel's
  287. * internal current buffer pointer so that transfers start from buffer
  288. * 0 on the next channel enable (that's the theory anyway, the imx6 TRM
  289. * only says these are read-only registers). This operation is required
  290. * for channel linking to work correctly, for instance video capture
  291. * pipelines that carry out image rotations will fail after the first
  292. * streaming unless this function is called for each channel before
  293. * re-enabling the channels.
  294. */
  295. static void __ipu_idmac_reset_current_buffer(struct ipuv3_channel *channel)
  296. {
  297. struct ipu_soc *ipu = channel->ipu;
  298. unsigned int chno = channel->num;
  299. ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_CUR_BUF(chno));
  300. }
  301. void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
  302. bool doublebuffer)
  303. {
  304. struct ipu_soc *ipu = channel->ipu;
  305. unsigned long flags;
  306. u32 reg;
  307. spin_lock_irqsave(&ipu->lock, flags);
  308. reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
  309. if (doublebuffer)
  310. reg |= idma_mask(channel->num);
  311. else
  312. reg &= ~idma_mask(channel->num);
  313. ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
  314. __ipu_idmac_reset_current_buffer(channel);
  315. spin_unlock_irqrestore(&ipu->lock, flags);
  316. }
  317. EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
  318. static const struct {
  319. int chnum;
  320. u32 reg;
  321. int shift;
  322. } idmac_lock_en_info[] = {
  323. { .chnum = 5, .reg = IDMAC_CH_LOCK_EN_1, .shift = 0, },
  324. { .chnum = 11, .reg = IDMAC_CH_LOCK_EN_1, .shift = 2, },
  325. { .chnum = 12, .reg = IDMAC_CH_LOCK_EN_1, .shift = 4, },
  326. { .chnum = 14, .reg = IDMAC_CH_LOCK_EN_1, .shift = 6, },
  327. { .chnum = 15, .reg = IDMAC_CH_LOCK_EN_1, .shift = 8, },
  328. { .chnum = 20, .reg = IDMAC_CH_LOCK_EN_1, .shift = 10, },
  329. { .chnum = 21, .reg = IDMAC_CH_LOCK_EN_1, .shift = 12, },
  330. { .chnum = 22, .reg = IDMAC_CH_LOCK_EN_1, .shift = 14, },
  331. { .chnum = 23, .reg = IDMAC_CH_LOCK_EN_1, .shift = 16, },
  332. { .chnum = 27, .reg = IDMAC_CH_LOCK_EN_1, .shift = 18, },
  333. { .chnum = 28, .reg = IDMAC_CH_LOCK_EN_1, .shift = 20, },
  334. { .chnum = 45, .reg = IDMAC_CH_LOCK_EN_2, .shift = 0, },
  335. { .chnum = 46, .reg = IDMAC_CH_LOCK_EN_2, .shift = 2, },
  336. { .chnum = 47, .reg = IDMAC_CH_LOCK_EN_2, .shift = 4, },
  337. { .chnum = 48, .reg = IDMAC_CH_LOCK_EN_2, .shift = 6, },
  338. { .chnum = 49, .reg = IDMAC_CH_LOCK_EN_2, .shift = 8, },
  339. { .chnum = 50, .reg = IDMAC_CH_LOCK_EN_2, .shift = 10, },
  340. };
  341. int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
  342. {
  343. struct ipu_soc *ipu = channel->ipu;
  344. unsigned long flags;
  345. u32 bursts, regval;
  346. int i;
  347. switch (num_bursts) {
  348. case 0:
  349. case 1:
  350. bursts = 0x00; /* locking disabled */
  351. break;
  352. case 2:
  353. bursts = 0x01;
  354. break;
  355. case 4:
  356. bursts = 0x02;
  357. break;
  358. case 8:
  359. bursts = 0x03;
  360. break;
  361. default:
  362. return -EINVAL;
  363. }
  364. /*
  365. * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
  366. * i.MX53 channel arbitration locking doesn't seem to work properly.
  367. * Allow enabling the lock feature on IPUv3H / i.MX6 only.
  368. */
  369. if (bursts && ipu->ipu_type != IPUV3H)
  370. return -EINVAL;
  371. for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
  372. if (channel->num == idmac_lock_en_info[i].chnum)
  373. break;
  374. }
  375. if (i >= ARRAY_SIZE(idmac_lock_en_info))
  376. return -EINVAL;
  377. spin_lock_irqsave(&ipu->lock, flags);
  378. regval = ipu_idmac_read(ipu, idmac_lock_en_info[i].reg);
  379. regval &= ~(0x03 << idmac_lock_en_info[i].shift);
  380. regval |= (bursts << idmac_lock_en_info[i].shift);
  381. ipu_idmac_write(ipu, regval, idmac_lock_en_info[i].reg);
  382. spin_unlock_irqrestore(&ipu->lock, flags);
  383. return 0;
  384. }
  385. EXPORT_SYMBOL_GPL(ipu_idmac_lock_enable);
  386. int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
  387. {
  388. unsigned long lock_flags;
  389. u32 val;
  390. spin_lock_irqsave(&ipu->lock, lock_flags);
  391. val = ipu_cm_read(ipu, IPU_DISP_GEN);
  392. if (mask & IPU_CONF_DI0_EN)
  393. val |= IPU_DI0_COUNTER_RELEASE;
  394. if (mask & IPU_CONF_DI1_EN)
  395. val |= IPU_DI1_COUNTER_RELEASE;
  396. ipu_cm_write(ipu, val, IPU_DISP_GEN);
  397. val = ipu_cm_read(ipu, IPU_CONF);
  398. val |= mask;
  399. ipu_cm_write(ipu, val, IPU_CONF);
  400. spin_unlock_irqrestore(&ipu->lock, lock_flags);
  401. return 0;
  402. }
  403. EXPORT_SYMBOL_GPL(ipu_module_enable);
  404. int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
  405. {
  406. unsigned long lock_flags;
  407. u32 val;
  408. spin_lock_irqsave(&ipu->lock, lock_flags);
  409. val = ipu_cm_read(ipu, IPU_CONF);
  410. val &= ~mask;
  411. ipu_cm_write(ipu, val, IPU_CONF);
  412. val = ipu_cm_read(ipu, IPU_DISP_GEN);
  413. if (mask & IPU_CONF_DI0_EN)
  414. val &= ~IPU_DI0_COUNTER_RELEASE;
  415. if (mask & IPU_CONF_DI1_EN)
  416. val &= ~IPU_DI1_COUNTER_RELEASE;
  417. ipu_cm_write(ipu, val, IPU_DISP_GEN);
  418. spin_unlock_irqrestore(&ipu->lock, lock_flags);
  419. return 0;
  420. }
  421. EXPORT_SYMBOL_GPL(ipu_module_disable);
  422. int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
  423. {
  424. struct ipu_soc *ipu = channel->ipu;
  425. unsigned int chno = channel->num;
  426. return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
  427. }
  428. EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
  429. bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num)
  430. {
  431. struct ipu_soc *ipu = channel->ipu;
  432. unsigned long flags;
  433. u32 reg = 0;
  434. spin_lock_irqsave(&ipu->lock, flags);
  435. switch (buf_num) {
  436. case 0:
  437. reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num));
  438. break;
  439. case 1:
  440. reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num));
  441. break;
  442. case 2:
  443. reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(channel->num));
  444. break;
  445. }
  446. spin_unlock_irqrestore(&ipu->lock, flags);
  447. return ((reg & idma_mask(channel->num)) != 0);
  448. }
  449. EXPORT_SYMBOL_GPL(ipu_idmac_buffer_is_ready);
  450. void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
  451. {
  452. struct ipu_soc *ipu = channel->ipu;
  453. unsigned int chno = channel->num;
  454. unsigned long flags;
  455. spin_lock_irqsave(&ipu->lock, flags);
  456. /* Mark buffer as ready. */
  457. if (buf_num == 0)
  458. ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
  459. else
  460. ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
  461. spin_unlock_irqrestore(&ipu->lock, flags);
  462. }
  463. EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
  464. void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num)
  465. {
  466. struct ipu_soc *ipu = channel->ipu;
  467. unsigned int chno = channel->num;
  468. unsigned long flags;
  469. spin_lock_irqsave(&ipu->lock, flags);
  470. ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
  471. switch (buf_num) {
  472. case 0:
  473. ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
  474. break;
  475. case 1:
  476. ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
  477. break;
  478. case 2:
  479. ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF2_RDY(chno));
  480. break;
  481. default:
  482. break;
  483. }
  484. ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
  485. spin_unlock_irqrestore(&ipu->lock, flags);
  486. }
  487. EXPORT_SYMBOL_GPL(ipu_idmac_clear_buffer);
  488. int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
  489. {
  490. struct ipu_soc *ipu = channel->ipu;
  491. u32 val;
  492. unsigned long flags;
  493. spin_lock_irqsave(&ipu->lock, flags);
  494. val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
  495. val |= idma_mask(channel->num);
  496. ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
  497. spin_unlock_irqrestore(&ipu->lock, flags);
  498. return 0;
  499. }
  500. EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
  501. bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
  502. {
  503. return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
  504. }
  505. EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
  506. int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
  507. {
  508. struct ipu_soc *ipu = channel->ipu;
  509. unsigned long timeout;
  510. timeout = jiffies + msecs_to_jiffies(ms);
  511. while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
  512. idma_mask(channel->num)) {
  513. if (time_after(jiffies, timeout))
  514. return -ETIMEDOUT;
  515. cpu_relax();
  516. }
  517. return 0;
  518. }
  519. EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
  520. int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
  521. {
  522. struct ipu_soc *ipu = channel->ipu;
  523. u32 val;
  524. unsigned long flags;
  525. spin_lock_irqsave(&ipu->lock, flags);
  526. /* Disable DMA channel(s) */
  527. val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
  528. val &= ~idma_mask(channel->num);
  529. ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
  530. __ipu_idmac_reset_current_buffer(channel);
  531. /* Set channel buffers NOT to be ready */
  532. ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
  533. if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
  534. idma_mask(channel->num)) {
  535. ipu_cm_write(ipu, idma_mask(channel->num),
  536. IPU_CHA_BUF0_RDY(channel->num));
  537. }
  538. if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
  539. idma_mask(channel->num)) {
  540. ipu_cm_write(ipu, idma_mask(channel->num),
  541. IPU_CHA_BUF1_RDY(channel->num));
  542. }
  543. ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
  544. /* Reset the double buffer */
  545. val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
  546. val &= ~idma_mask(channel->num);
  547. ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
  548. spin_unlock_irqrestore(&ipu->lock, flags);
  549. return 0;
  550. }
  551. EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
  552. /*
  553. * The imx6 rev. D TRM says that enabling the WM feature will increase
  554. * a channel's priority. Refer to Table 36-8 Calculated priority value.
  555. * The sub-module that is the sink or source for the channel must enable
  556. * watermark signal for this to take effect (SMFC_WM for instance).
  557. */
  558. void ipu_idmac_enable_watermark(struct ipuv3_channel *channel, bool enable)
  559. {
  560. struct ipu_soc *ipu = channel->ipu;
  561. unsigned long flags;
  562. u32 val;
  563. spin_lock_irqsave(&ipu->lock, flags);
  564. val = ipu_idmac_read(ipu, IDMAC_WM_EN(channel->num));
  565. if (enable)
  566. val |= 1 << (channel->num % 32);
  567. else
  568. val &= ~(1 << (channel->num % 32));
  569. ipu_idmac_write(ipu, val, IDMAC_WM_EN(channel->num));
  570. spin_unlock_irqrestore(&ipu->lock, flags);
  571. }
  572. EXPORT_SYMBOL_GPL(ipu_idmac_enable_watermark);
  573. static int ipu_memory_reset(struct ipu_soc *ipu)
  574. {
  575. unsigned long timeout;
  576. ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
  577. timeout = jiffies + msecs_to_jiffies(1000);
  578. while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
  579. if (time_after(jiffies, timeout))
  580. return -ETIME;
  581. cpu_relax();
  582. }
  583. return 0;
  584. }
  585. /*
  586. * Set the source mux for the given CSI. Selects either parallel or
  587. * MIPI CSI2 sources.
  588. */
  589. void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2)
  590. {
  591. unsigned long flags;
  592. u32 val, mask;
  593. mask = (csi_id == 1) ? IPU_CONF_CSI1_DATA_SOURCE :
  594. IPU_CONF_CSI0_DATA_SOURCE;
  595. spin_lock_irqsave(&ipu->lock, flags);
  596. val = ipu_cm_read(ipu, IPU_CONF);
  597. if (mipi_csi2)
  598. val |= mask;
  599. else
  600. val &= ~mask;
  601. ipu_cm_write(ipu, val, IPU_CONF);
  602. spin_unlock_irqrestore(&ipu->lock, flags);
  603. }
  604. EXPORT_SYMBOL_GPL(ipu_set_csi_src_mux);
  605. /*
  606. * Set the source mux for the IC. Selects either CSI[01] or the VDI.
  607. */
  608. void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
  609. {
  610. unsigned long flags;
  611. u32 val;
  612. spin_lock_irqsave(&ipu->lock, flags);
  613. val = ipu_cm_read(ipu, IPU_CONF);
  614. if (vdi)
  615. val |= IPU_CONF_IC_INPUT;
  616. else
  617. val &= ~IPU_CONF_IC_INPUT;
  618. if (csi_id == 1)
  619. val |= IPU_CONF_CSI_SEL;
  620. else
  621. val &= ~IPU_CONF_CSI_SEL;
  622. ipu_cm_write(ipu, val, IPU_CONF);
  623. spin_unlock_irqrestore(&ipu->lock, flags);
  624. }
  625. EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
  626. /* Frame Synchronization Unit Channel Linking */
  627. struct fsu_link_reg_info {
  628. int chno;
  629. u32 reg;
  630. u32 mask;
  631. u32 val;
  632. };
  633. struct fsu_link_info {
  634. struct fsu_link_reg_info src;
  635. struct fsu_link_reg_info sink;
  636. };
  637. static const struct fsu_link_info fsu_link_info[] = {
  638. {
  639. .src = { IPUV3_CHANNEL_IC_PRP_ENC_MEM, IPU_FS_PROC_FLOW2,
  640. FS_PRP_ENC_DEST_SEL_MASK, FS_PRP_ENC_DEST_SEL_IRT_ENC },
  641. .sink = { IPUV3_CHANNEL_MEM_ROT_ENC, IPU_FS_PROC_FLOW1,
  642. FS_PRPENC_ROT_SRC_SEL_MASK, FS_PRPENC_ROT_SRC_SEL_ENC },
  643. }, {
  644. .src = { IPUV3_CHANNEL_IC_PRP_VF_MEM, IPU_FS_PROC_FLOW2,
  645. FS_PRPVF_DEST_SEL_MASK, FS_PRPVF_DEST_SEL_IRT_VF },
  646. .sink = { IPUV3_CHANNEL_MEM_ROT_VF, IPU_FS_PROC_FLOW1,
  647. FS_PRPVF_ROT_SRC_SEL_MASK, FS_PRPVF_ROT_SRC_SEL_VF },
  648. }, {
  649. .src = { IPUV3_CHANNEL_IC_PP_MEM, IPU_FS_PROC_FLOW2,
  650. FS_PP_DEST_SEL_MASK, FS_PP_DEST_SEL_IRT_PP },
  651. .sink = { IPUV3_CHANNEL_MEM_ROT_PP, IPU_FS_PROC_FLOW1,
  652. FS_PP_ROT_SRC_SEL_MASK, FS_PP_ROT_SRC_SEL_PP },
  653. }, {
  654. .src = { IPUV3_CHANNEL_CSI_DIRECT, 0 },
  655. .sink = { IPUV3_CHANNEL_CSI_VDI_PREV, IPU_FS_PROC_FLOW1,
  656. FS_VDI_SRC_SEL_MASK, FS_VDI_SRC_SEL_CSI_DIRECT },
  657. },
  658. };
  659. static const struct fsu_link_info *find_fsu_link_info(int src, int sink)
  660. {
  661. int i;
  662. for (i = 0; i < ARRAY_SIZE(fsu_link_info); i++) {
  663. if (src == fsu_link_info[i].src.chno &&
  664. sink == fsu_link_info[i].sink.chno)
  665. return &fsu_link_info[i];
  666. }
  667. return NULL;
  668. }
  669. /*
  670. * Links a source channel to a sink channel in the FSU.
  671. */
  672. int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch)
  673. {
  674. const struct fsu_link_info *link;
  675. u32 src_reg, sink_reg;
  676. unsigned long flags;
  677. link = find_fsu_link_info(src_ch, sink_ch);
  678. if (!link)
  679. return -EINVAL;
  680. spin_lock_irqsave(&ipu->lock, flags);
  681. if (link->src.mask) {
  682. src_reg = ipu_cm_read(ipu, link->src.reg);
  683. src_reg &= ~link->src.mask;
  684. src_reg |= link->src.val;
  685. ipu_cm_write(ipu, src_reg, link->src.reg);
  686. }
  687. if (link->sink.mask) {
  688. sink_reg = ipu_cm_read(ipu, link->sink.reg);
  689. sink_reg &= ~link->sink.mask;
  690. sink_reg |= link->sink.val;
  691. ipu_cm_write(ipu, sink_reg, link->sink.reg);
  692. }
  693. spin_unlock_irqrestore(&ipu->lock, flags);
  694. return 0;
  695. }
  696. EXPORT_SYMBOL_GPL(ipu_fsu_link);
  697. /*
  698. * Unlinks source and sink channels in the FSU.
  699. */
  700. int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch)
  701. {
  702. const struct fsu_link_info *link;
  703. u32 src_reg, sink_reg;
  704. unsigned long flags;
  705. link = find_fsu_link_info(src_ch, sink_ch);
  706. if (!link)
  707. return -EINVAL;
  708. spin_lock_irqsave(&ipu->lock, flags);
  709. if (link->src.mask) {
  710. src_reg = ipu_cm_read(ipu, link->src.reg);
  711. src_reg &= ~link->src.mask;
  712. ipu_cm_write(ipu, src_reg, link->src.reg);
  713. }
  714. if (link->sink.mask) {
  715. sink_reg = ipu_cm_read(ipu, link->sink.reg);
  716. sink_reg &= ~link->sink.mask;
  717. ipu_cm_write(ipu, sink_reg, link->sink.reg);
  718. }
  719. spin_unlock_irqrestore(&ipu->lock, flags);
  720. return 0;
  721. }
  722. EXPORT_SYMBOL_GPL(ipu_fsu_unlink);
  723. /* Link IDMAC channels in the FSU */
  724. int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink)
  725. {
  726. return ipu_fsu_link(src->ipu, src->num, sink->num);
  727. }
  728. EXPORT_SYMBOL_GPL(ipu_idmac_link);
  729. /* Unlink IDMAC channels in the FSU */
  730. int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink)
  731. {
  732. return ipu_fsu_unlink(src->ipu, src->num, sink->num);
  733. }
  734. EXPORT_SYMBOL_GPL(ipu_idmac_unlink);
  735. struct ipu_devtype {
  736. const char *name;
  737. unsigned long cm_ofs;
  738. unsigned long cpmem_ofs;
  739. unsigned long srm_ofs;
  740. unsigned long tpm_ofs;
  741. unsigned long csi0_ofs;
  742. unsigned long csi1_ofs;
  743. unsigned long ic_ofs;
  744. unsigned long disp0_ofs;
  745. unsigned long disp1_ofs;
  746. unsigned long dc_tmpl_ofs;
  747. unsigned long vdi_ofs;
  748. enum ipuv3_type type;
  749. };
  750. static struct ipu_devtype ipu_type_imx51 = {
  751. .name = "IPUv3EX",
  752. .cm_ofs = 0x1e000000,
  753. .cpmem_ofs = 0x1f000000,
  754. .srm_ofs = 0x1f040000,
  755. .tpm_ofs = 0x1f060000,
  756. .csi0_ofs = 0x1e030000,
  757. .csi1_ofs = 0x1e038000,
  758. .ic_ofs = 0x1e020000,
  759. .disp0_ofs = 0x1e040000,
  760. .disp1_ofs = 0x1e048000,
  761. .dc_tmpl_ofs = 0x1f080000,
  762. .vdi_ofs = 0x1e068000,
  763. .type = IPUV3EX,
  764. };
  765. static struct ipu_devtype ipu_type_imx53 = {
  766. .name = "IPUv3M",
  767. .cm_ofs = 0x06000000,
  768. .cpmem_ofs = 0x07000000,
  769. .srm_ofs = 0x07040000,
  770. .tpm_ofs = 0x07060000,
  771. .csi0_ofs = 0x06030000,
  772. .csi1_ofs = 0x06038000,
  773. .ic_ofs = 0x06020000,
  774. .disp0_ofs = 0x06040000,
  775. .disp1_ofs = 0x06048000,
  776. .dc_tmpl_ofs = 0x07080000,
  777. .vdi_ofs = 0x06068000,
  778. .type = IPUV3M,
  779. };
  780. static struct ipu_devtype ipu_type_imx6q = {
  781. .name = "IPUv3H",
  782. .cm_ofs = 0x00200000,
  783. .cpmem_ofs = 0x00300000,
  784. .srm_ofs = 0x00340000,
  785. .tpm_ofs = 0x00360000,
  786. .csi0_ofs = 0x00230000,
  787. .csi1_ofs = 0x00238000,
  788. .ic_ofs = 0x00220000,
  789. .disp0_ofs = 0x00240000,
  790. .disp1_ofs = 0x00248000,
  791. .dc_tmpl_ofs = 0x00380000,
  792. .vdi_ofs = 0x00268000,
  793. .type = IPUV3H,
  794. };
  795. static const struct of_device_id imx_ipu_dt_ids[] = {
  796. { .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
  797. { .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
  798. { .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
  799. { .compatible = "fsl,imx6qp-ipu", .data = &ipu_type_imx6q, },
  800. { /* sentinel */ }
  801. };
  802. MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
  803. static int ipu_submodules_init(struct ipu_soc *ipu,
  804. struct platform_device *pdev, unsigned long ipu_base,
  805. struct clk *ipu_clk)
  806. {
  807. char *unit;
  808. int ret;
  809. struct device *dev = &pdev->dev;
  810. const struct ipu_devtype *devtype = ipu->devtype;
  811. ret = ipu_cpmem_init(ipu, dev, ipu_base + devtype->cpmem_ofs);
  812. if (ret) {
  813. unit = "cpmem";
  814. goto err_cpmem;
  815. }
  816. ret = ipu_csi_init(ipu, dev, 0, ipu_base + devtype->csi0_ofs,
  817. IPU_CONF_CSI0_EN, ipu_clk);
  818. if (ret) {
  819. unit = "csi0";
  820. goto err_csi_0;
  821. }
  822. ret = ipu_csi_init(ipu, dev, 1, ipu_base + devtype->csi1_ofs,
  823. IPU_CONF_CSI1_EN, ipu_clk);
  824. if (ret) {
  825. unit = "csi1";
  826. goto err_csi_1;
  827. }
  828. ret = ipu_ic_init(ipu, dev,
  829. ipu_base + devtype->ic_ofs,
  830. ipu_base + devtype->tpm_ofs);
  831. if (ret) {
  832. unit = "ic";
  833. goto err_ic;
  834. }
  835. ret = ipu_vdi_init(ipu, dev, ipu_base + devtype->vdi_ofs,
  836. IPU_CONF_VDI_EN | IPU_CONF_ISP_EN |
  837. IPU_CONF_IC_INPUT);
  838. if (ret) {
  839. unit = "vdi";
  840. goto err_vdi;
  841. }
  842. ret = ipu_image_convert_init(ipu, dev);
  843. if (ret) {
  844. unit = "image_convert";
  845. goto err_image_convert;
  846. }
  847. ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
  848. IPU_CONF_DI0_EN, ipu_clk);
  849. if (ret) {
  850. unit = "di0";
  851. goto err_di_0;
  852. }
  853. ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
  854. IPU_CONF_DI1_EN, ipu_clk);
  855. if (ret) {
  856. unit = "di1";
  857. goto err_di_1;
  858. }
  859. ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
  860. IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
  861. if (ret) {
  862. unit = "dc_template";
  863. goto err_dc;
  864. }
  865. ret = ipu_dmfc_init(ipu, dev, ipu_base +
  866. devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
  867. if (ret) {
  868. unit = "dmfc";
  869. goto err_dmfc;
  870. }
  871. ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
  872. if (ret) {
  873. unit = "dp";
  874. goto err_dp;
  875. }
  876. ret = ipu_smfc_init(ipu, dev, ipu_base +
  877. devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
  878. if (ret) {
  879. unit = "smfc";
  880. goto err_smfc;
  881. }
  882. return 0;
  883. err_smfc:
  884. ipu_dp_exit(ipu);
  885. err_dp:
  886. ipu_dmfc_exit(ipu);
  887. err_dmfc:
  888. ipu_dc_exit(ipu);
  889. err_dc:
  890. ipu_di_exit(ipu, 1);
  891. err_di_1:
  892. ipu_di_exit(ipu, 0);
  893. err_di_0:
  894. ipu_image_convert_exit(ipu);
  895. err_image_convert:
  896. ipu_vdi_exit(ipu);
  897. err_vdi:
  898. ipu_ic_exit(ipu);
  899. err_ic:
  900. ipu_csi_exit(ipu, 1);
  901. err_csi_1:
  902. ipu_csi_exit(ipu, 0);
  903. err_csi_0:
  904. ipu_cpmem_exit(ipu);
  905. err_cpmem:
  906. dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
  907. return ret;
  908. }
  909. static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
  910. {
  911. unsigned long status;
  912. int i, bit, irq;
  913. for (i = 0; i < num_regs; i++) {
  914. status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
  915. status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
  916. for_each_set_bit(bit, &status, 32) {
  917. irq = irq_linear_revmap(ipu->domain,
  918. regs[i] * 32 + bit);
  919. if (irq)
  920. generic_handle_irq(irq);
  921. }
  922. }
  923. }
  924. static void ipu_irq_handler(struct irq_desc *desc)
  925. {
  926. struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
  927. struct irq_chip *chip = irq_desc_get_chip(desc);
  928. static const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
  929. chained_irq_enter(chip, desc);
  930. ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
  931. chained_irq_exit(chip, desc);
  932. }
  933. static void ipu_err_irq_handler(struct irq_desc *desc)
  934. {
  935. struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
  936. struct irq_chip *chip = irq_desc_get_chip(desc);
  937. static const int int_reg[] = { 4, 5, 8, 9};
  938. chained_irq_enter(chip, desc);
  939. ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
  940. chained_irq_exit(chip, desc);
  941. }
  942. int ipu_map_irq(struct ipu_soc *ipu, int irq)
  943. {
  944. int virq;
  945. virq = irq_linear_revmap(ipu->domain, irq);
  946. if (!virq)
  947. virq = irq_create_mapping(ipu->domain, irq);
  948. return virq;
  949. }
  950. EXPORT_SYMBOL_GPL(ipu_map_irq);
  951. int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
  952. enum ipu_channel_irq irq_type)
  953. {
  954. return ipu_map_irq(ipu, irq_type + channel->num);
  955. }
  956. EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
  957. static void ipu_submodules_exit(struct ipu_soc *ipu)
  958. {
  959. ipu_smfc_exit(ipu);
  960. ipu_dp_exit(ipu);
  961. ipu_dmfc_exit(ipu);
  962. ipu_dc_exit(ipu);
  963. ipu_di_exit(ipu, 1);
  964. ipu_di_exit(ipu, 0);
  965. ipu_image_convert_exit(ipu);
  966. ipu_vdi_exit(ipu);
  967. ipu_ic_exit(ipu);
  968. ipu_csi_exit(ipu, 1);
  969. ipu_csi_exit(ipu, 0);
  970. ipu_cpmem_exit(ipu);
  971. }
  972. static int platform_remove_devices_fn(struct device *dev, void *unused)
  973. {
  974. struct platform_device *pdev = to_platform_device(dev);
  975. platform_device_unregister(pdev);
  976. return 0;
  977. }
  978. static void platform_device_unregister_children(struct platform_device *pdev)
  979. {
  980. device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
  981. }
  982. struct ipu_platform_reg {
  983. struct ipu_client_platformdata pdata;
  984. const char *name;
  985. };
  986. /* These must be in the order of the corresponding device tree port nodes */
  987. static struct ipu_platform_reg client_reg[] = {
  988. {
  989. .pdata = {
  990. .csi = 0,
  991. .dma[0] = IPUV3_CHANNEL_CSI0,
  992. .dma[1] = -EINVAL,
  993. },
  994. .name = "imx-ipuv3-csi",
  995. }, {
  996. .pdata = {
  997. .csi = 1,
  998. .dma[0] = IPUV3_CHANNEL_CSI1,
  999. .dma[1] = -EINVAL,
  1000. },
  1001. .name = "imx-ipuv3-csi",
  1002. }, {
  1003. .pdata = {
  1004. .di = 0,
  1005. .dc = 5,
  1006. .dp = IPU_DP_FLOW_SYNC_BG,
  1007. .dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
  1008. .dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
  1009. },
  1010. .name = "imx-ipuv3-crtc",
  1011. }, {
  1012. .pdata = {
  1013. .di = 1,
  1014. .dc = 1,
  1015. .dp = -EINVAL,
  1016. .dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
  1017. .dma[1] = -EINVAL,
  1018. },
  1019. .name = "imx-ipuv3-crtc",
  1020. },
  1021. };
  1022. static DEFINE_MUTEX(ipu_client_id_mutex);
  1023. static int ipu_client_id;
  1024. static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
  1025. {
  1026. struct device *dev = ipu->dev;
  1027. unsigned i;
  1028. int id, ret;
  1029. mutex_lock(&ipu_client_id_mutex);
  1030. id = ipu_client_id;
  1031. ipu_client_id += ARRAY_SIZE(client_reg);
  1032. mutex_unlock(&ipu_client_id_mutex);
  1033. for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
  1034. struct ipu_platform_reg *reg = &client_reg[i];
  1035. struct platform_device *pdev;
  1036. struct device_node *of_node;
  1037. /* Associate subdevice with the corresponding port node */
  1038. of_node = of_graph_get_port_by_id(dev->of_node, i);
  1039. if (!of_node) {
  1040. dev_info(dev,
  1041. "no port@%d node in %pOF, not using %s%d\n",
  1042. i, dev->of_node,
  1043. (i / 2) ? "DI" : "CSI", i % 2);
  1044. continue;
  1045. }
  1046. pdev = platform_device_alloc(reg->name, id++);
  1047. if (!pdev) {
  1048. ret = -ENOMEM;
  1049. goto err_register;
  1050. }
  1051. pdev->dev.parent = dev;
  1052. reg->pdata.of_node = of_node;
  1053. ret = platform_device_add_data(pdev, &reg->pdata,
  1054. sizeof(reg->pdata));
  1055. if (!ret)
  1056. ret = platform_device_add(pdev);
  1057. if (ret) {
  1058. platform_device_put(pdev);
  1059. goto err_register;
  1060. }
  1061. }
  1062. return 0;
  1063. err_register:
  1064. platform_device_unregister_children(to_platform_device(dev));
  1065. return ret;
  1066. }
  1067. static int ipu_irq_init(struct ipu_soc *ipu)
  1068. {
  1069. struct irq_chip_generic *gc;
  1070. struct irq_chip_type *ct;
  1071. unsigned long unused[IPU_NUM_IRQS / 32] = {
  1072. 0x400100d0, 0xffe000fd,
  1073. 0x400100d0, 0xffe000fd,
  1074. 0x400100d0, 0xffe000fd,
  1075. 0x4077ffff, 0xffe7e1fd,
  1076. 0x23fffffe, 0x8880fff0,
  1077. 0xf98fe7d0, 0xfff81fff,
  1078. 0x400100d0, 0xffe000fd,
  1079. 0x00000000,
  1080. };
  1081. int ret, i;
  1082. ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
  1083. &irq_generic_chip_ops, ipu);
  1084. if (!ipu->domain) {
  1085. dev_err(ipu->dev, "failed to add irq domain\n");
  1086. return -ENODEV;
  1087. }
  1088. ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
  1089. handle_level_irq, 0, 0, 0);
  1090. if (ret < 0) {
  1091. dev_err(ipu->dev, "failed to alloc generic irq chips\n");
  1092. irq_domain_remove(ipu->domain);
  1093. return ret;
  1094. }
  1095. /* Mask and clear all interrupts */
  1096. for (i = 0; i < IPU_NUM_IRQS; i += 32) {
  1097. ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
  1098. ipu_cm_write(ipu, ~unused[i / 32], IPU_INT_STAT(i / 32));
  1099. }
  1100. for (i = 0; i < IPU_NUM_IRQS; i += 32) {
  1101. gc = irq_get_domain_generic_chip(ipu->domain, i);
  1102. gc->reg_base = ipu->cm_reg;
  1103. gc->unused = unused[i / 32];
  1104. ct = gc->chip_types;
  1105. ct->chip.irq_ack = irq_gc_ack_set_bit;
  1106. ct->chip.irq_mask = irq_gc_mask_clr_bit;
  1107. ct->chip.irq_unmask = irq_gc_mask_set_bit;
  1108. ct->regs.ack = IPU_INT_STAT(i / 32);
  1109. ct->regs.mask = IPU_INT_CTRL(i / 32);
  1110. }
  1111. irq_set_chained_handler_and_data(ipu->irq_sync, ipu_irq_handler, ipu);
  1112. irq_set_chained_handler_and_data(ipu->irq_err, ipu_err_irq_handler,
  1113. ipu);
  1114. return 0;
  1115. }
  1116. static void ipu_irq_exit(struct ipu_soc *ipu)
  1117. {
  1118. int i, irq;
  1119. irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
  1120. irq_set_chained_handler_and_data(ipu->irq_sync, NULL, NULL);
  1121. /* TODO: remove irq_domain_generic_chips */
  1122. for (i = 0; i < IPU_NUM_IRQS; i++) {
  1123. irq = irq_linear_revmap(ipu->domain, i);
  1124. if (irq)
  1125. irq_dispose_mapping(irq);
  1126. }
  1127. irq_domain_remove(ipu->domain);
  1128. }
  1129. void ipu_dump(struct ipu_soc *ipu)
  1130. {
  1131. int i;
  1132. dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n",
  1133. ipu_cm_read(ipu, IPU_CONF));
  1134. dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n",
  1135. ipu_idmac_read(ipu, IDMAC_CONF));
  1136. dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
  1137. ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
  1138. dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
  1139. ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
  1140. dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
  1141. ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
  1142. dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
  1143. ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
  1144. dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
  1145. ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
  1146. dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
  1147. ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
  1148. dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
  1149. ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
  1150. dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
  1151. ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
  1152. dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
  1153. ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
  1154. dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
  1155. ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
  1156. dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
  1157. ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
  1158. dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
  1159. ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
  1160. for (i = 0; i < 15; i++)
  1161. dev_dbg(ipu->dev, "IPU_INT_CTRL(%d) = \t%08X\n", i,
  1162. ipu_cm_read(ipu, IPU_INT_CTRL(i)));
  1163. }
  1164. EXPORT_SYMBOL_GPL(ipu_dump);
  1165. static int ipu_probe(struct platform_device *pdev)
  1166. {
  1167. struct device_node *np = pdev->dev.of_node;
  1168. struct ipu_soc *ipu;
  1169. struct resource *res;
  1170. unsigned long ipu_base;
  1171. int ret, irq_sync, irq_err;
  1172. const struct ipu_devtype *devtype;
  1173. devtype = of_device_get_match_data(&pdev->dev);
  1174. if (!devtype)
  1175. return -EINVAL;
  1176. irq_sync = platform_get_irq(pdev, 0);
  1177. irq_err = platform_get_irq(pdev, 1);
  1178. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1179. dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
  1180. irq_sync, irq_err);
  1181. if (!res || irq_sync < 0 || irq_err < 0)
  1182. return -ENODEV;
  1183. ipu_base = res->start;
  1184. ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
  1185. if (!ipu)
  1186. return -ENODEV;
  1187. ipu->id = of_alias_get_id(np, "ipu");
  1188. if (ipu->id < 0)
  1189. ipu->id = 0;
  1190. if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
  1191. IS_ENABLED(CONFIG_DRM)) {
  1192. ipu->prg_priv = ipu_prg_lookup_by_phandle(&pdev->dev,
  1193. "fsl,prg", ipu->id);
  1194. if (!ipu->prg_priv)
  1195. return -EPROBE_DEFER;
  1196. }
  1197. ipu->devtype = devtype;
  1198. ipu->ipu_type = devtype->type;
  1199. spin_lock_init(&ipu->lock);
  1200. mutex_init(&ipu->channel_lock);
  1201. INIT_LIST_HEAD(&ipu->channels);
  1202. dev_dbg(&pdev->dev, "cm_reg: 0x%08lx\n",
  1203. ipu_base + devtype->cm_ofs);
  1204. dev_dbg(&pdev->dev, "idmac: 0x%08lx\n",
  1205. ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
  1206. dev_dbg(&pdev->dev, "cpmem: 0x%08lx\n",
  1207. ipu_base + devtype->cpmem_ofs);
  1208. dev_dbg(&pdev->dev, "csi0: 0x%08lx\n",
  1209. ipu_base + devtype->csi0_ofs);
  1210. dev_dbg(&pdev->dev, "csi1: 0x%08lx\n",
  1211. ipu_base + devtype->csi1_ofs);
  1212. dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
  1213. ipu_base + devtype->ic_ofs);
  1214. dev_dbg(&pdev->dev, "disp0: 0x%08lx\n",
  1215. ipu_base + devtype->disp0_ofs);
  1216. dev_dbg(&pdev->dev, "disp1: 0x%08lx\n",
  1217. ipu_base + devtype->disp1_ofs);
  1218. dev_dbg(&pdev->dev, "srm: 0x%08lx\n",
  1219. ipu_base + devtype->srm_ofs);
  1220. dev_dbg(&pdev->dev, "tpm: 0x%08lx\n",
  1221. ipu_base + devtype->tpm_ofs);
  1222. dev_dbg(&pdev->dev, "dc: 0x%08lx\n",
  1223. ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
  1224. dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
  1225. ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
  1226. dev_dbg(&pdev->dev, "dmfc: 0x%08lx\n",
  1227. ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
  1228. dev_dbg(&pdev->dev, "vdi: 0x%08lx\n",
  1229. ipu_base + devtype->vdi_ofs);
  1230. ipu->cm_reg = devm_ioremap(&pdev->dev,
  1231. ipu_base + devtype->cm_ofs, PAGE_SIZE);
  1232. ipu->idmac_reg = devm_ioremap(&pdev->dev,
  1233. ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
  1234. PAGE_SIZE);
  1235. if (!ipu->cm_reg || !ipu->idmac_reg)
  1236. return -ENOMEM;
  1237. ipu->clk = devm_clk_get(&pdev->dev, "bus");
  1238. if (IS_ERR(ipu->clk)) {
  1239. ret = PTR_ERR(ipu->clk);
  1240. dev_err(&pdev->dev, "clk_get failed with %d", ret);
  1241. return ret;
  1242. }
  1243. platform_set_drvdata(pdev, ipu);
  1244. ret = clk_prepare_enable(ipu->clk);
  1245. if (ret) {
  1246. dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
  1247. return ret;
  1248. }
  1249. ipu->dev = &pdev->dev;
  1250. ipu->irq_sync = irq_sync;
  1251. ipu->irq_err = irq_err;
  1252. ret = device_reset(&pdev->dev);
  1253. if (ret) {
  1254. dev_err(&pdev->dev, "failed to reset: %d\n", ret);
  1255. goto out_failed_reset;
  1256. }
  1257. ret = ipu_memory_reset(ipu);
  1258. if (ret)
  1259. goto out_failed_reset;
  1260. ret = ipu_irq_init(ipu);
  1261. if (ret)
  1262. goto out_failed_irq;
  1263. /* Set MCU_T to divide MCU access window into 2 */
  1264. ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
  1265. IPU_DISP_GEN);
  1266. ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
  1267. if (ret)
  1268. goto failed_submodules_init;
  1269. ret = ipu_add_client_devices(ipu, ipu_base);
  1270. if (ret) {
  1271. dev_err(&pdev->dev, "adding client devices failed with %d\n",
  1272. ret);
  1273. goto failed_add_clients;
  1274. }
  1275. dev_info(&pdev->dev, "%s probed\n", devtype->name);
  1276. return 0;
  1277. failed_add_clients:
  1278. ipu_submodules_exit(ipu);
  1279. failed_submodules_init:
  1280. ipu_irq_exit(ipu);
  1281. out_failed_irq:
  1282. out_failed_reset:
  1283. clk_disable_unprepare(ipu->clk);
  1284. return ret;
  1285. }
  1286. static int ipu_remove(struct platform_device *pdev)
  1287. {
  1288. struct ipu_soc *ipu = platform_get_drvdata(pdev);
  1289. platform_device_unregister_children(pdev);
  1290. ipu_submodules_exit(ipu);
  1291. ipu_irq_exit(ipu);
  1292. clk_disable_unprepare(ipu->clk);
  1293. return 0;
  1294. }
  1295. static struct platform_driver imx_ipu_driver = {
  1296. .driver = {
  1297. .name = "imx-ipuv3",
  1298. .of_match_table = imx_ipu_dt_ids,
  1299. },
  1300. .probe = ipu_probe,
  1301. .remove = ipu_remove,
  1302. };
  1303. static struct platform_driver * const drivers[] = {
  1304. #if IS_ENABLED(CONFIG_DRM)
  1305. &ipu_pre_drv,
  1306. &ipu_prg_drv,
  1307. #endif
  1308. &imx_ipu_driver,
  1309. };
  1310. static int __init imx_ipu_init(void)
  1311. {
  1312. return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  1313. }
  1314. module_init(imx_ipu_init);
  1315. static void __exit imx_ipu_exit(void)
  1316. {
  1317. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  1318. }
  1319. module_exit(imx_ipu_exit);
  1320. MODULE_ALIAS("platform:imx-ipuv3");
  1321. MODULE_DESCRIPTION("i.MX IPU v3 driver");
  1322. MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
  1323. MODULE_LICENSE("GPL");