dsi_host.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461
  1. /*
  2. * Copyright (c) 2015, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/err.h>
  16. #include <linux/gpio.h>
  17. #include <linux/gpio/consumer.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/of_device.h>
  20. #include <linux/of_gpio.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/pinctrl/consumer.h>
  23. #include <linux/of_graph.h>
  24. #include <linux/regulator/consumer.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/mfd/syscon.h>
  27. #include <linux/regmap.h>
  28. #include <video/mipi_display.h>
  29. #include "dsi.h"
  30. #include "dsi.xml.h"
  31. #include "sfpb.xml.h"
  32. #include "dsi_cfg.h"
  33. #include "msm_kms.h"
  34. #define DSI_RESET_TOGGLE_DELAY_MS 20
  35. static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
  36. {
  37. u32 ver;
  38. if (!major || !minor)
  39. return -EINVAL;
  40. /*
  41. * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
  42. * makes all other registers 4-byte shifted down.
  43. *
  44. * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
  45. * older, we read the DSI_VERSION register without any shift(offset
  46. * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
  47. * the case of DSI6G, this has to be zero (the offset points to a
  48. * scratch register which we never touch)
  49. */
  50. ver = msm_readl(base + REG_DSI_VERSION);
  51. if (ver) {
  52. /* older dsi host, there is no register shift */
  53. ver = FIELD(ver, DSI_VERSION_MAJOR);
  54. if (ver <= MSM_DSI_VER_MAJOR_V2) {
  55. /* old versions */
  56. *major = ver;
  57. *minor = 0;
  58. return 0;
  59. } else {
  60. return -EINVAL;
  61. }
  62. } else {
  63. /*
  64. * newer host, offset 0 has 6G_HW_VERSION, the rest of the
  65. * registers are shifted down, read DSI_VERSION again with
  66. * the shifted offset
  67. */
  68. ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
  69. ver = FIELD(ver, DSI_VERSION_MAJOR);
  70. if (ver == MSM_DSI_VER_MAJOR_6G) {
  71. /* 6G version */
  72. *major = ver;
  73. *minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
  74. return 0;
  75. } else {
  76. return -EINVAL;
  77. }
  78. }
  79. }
  80. #define DSI_ERR_STATE_ACK 0x0000
  81. #define DSI_ERR_STATE_TIMEOUT 0x0001
  82. #define DSI_ERR_STATE_DLN0_PHY 0x0002
  83. #define DSI_ERR_STATE_FIFO 0x0004
  84. #define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008
  85. #define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010
  86. #define DSI_ERR_STATE_PLL_UNLOCKED 0x0020
  87. #define DSI_CLK_CTRL_ENABLE_CLKS \
  88. (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
  89. DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
  90. DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
  91. DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
  92. struct msm_dsi_host {
  93. struct mipi_dsi_host base;
  94. struct platform_device *pdev;
  95. struct drm_device *dev;
  96. int id;
  97. void __iomem *ctrl_base;
  98. struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
  99. struct clk *bus_clks[DSI_BUS_CLK_MAX];
  100. struct clk *byte_clk;
  101. struct clk *esc_clk;
  102. struct clk *pixel_clk;
  103. struct clk *byte_clk_src;
  104. struct clk *pixel_clk_src;
  105. struct clk *byte_intf_clk;
  106. u32 byte_clk_rate;
  107. u32 pixel_clk_rate;
  108. u32 esc_clk_rate;
  109. /* DSI v2 specific clocks */
  110. struct clk *src_clk;
  111. struct clk *esc_clk_src;
  112. struct clk *dsi_clk_src;
  113. u32 src_clk_rate;
  114. struct gpio_desc *disp_en_gpio;
  115. struct gpio_desc *te_gpio;
  116. const struct msm_dsi_cfg_handler *cfg_hnd;
  117. struct completion dma_comp;
  118. struct completion video_comp;
  119. struct mutex dev_mutex;
  120. struct mutex cmd_mutex;
  121. spinlock_t intr_lock; /* Protect interrupt ctrl register */
  122. u32 err_work_state;
  123. struct work_struct err_work;
  124. struct work_struct hpd_work;
  125. struct workqueue_struct *workqueue;
  126. /* DSI 6G TX buffer*/
  127. struct drm_gem_object *tx_gem_obj;
  128. /* DSI v2 TX buffer */
  129. void *tx_buf;
  130. dma_addr_t tx_buf_paddr;
  131. int tx_size;
  132. u8 *rx_buf;
  133. struct regmap *sfpb;
  134. struct drm_display_mode *mode;
  135. /* connected device info */
  136. struct device_node *device_node;
  137. unsigned int channel;
  138. unsigned int lanes;
  139. enum mipi_dsi_pixel_format format;
  140. unsigned long mode_flags;
  141. /* lane data parsed via DT */
  142. int dlane_swap;
  143. int num_data_lanes;
  144. u32 dma_cmd_ctrl_restore;
  145. bool registered;
  146. bool power_on;
  147. bool enabled;
  148. int irq;
  149. };
  150. static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
  151. {
  152. switch (fmt) {
  153. case MIPI_DSI_FMT_RGB565: return 16;
  154. case MIPI_DSI_FMT_RGB666_PACKED: return 18;
  155. case MIPI_DSI_FMT_RGB666:
  156. case MIPI_DSI_FMT_RGB888:
  157. default: return 24;
  158. }
  159. }
  160. static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
  161. {
  162. return msm_readl(msm_host->ctrl_base + reg);
  163. }
  164. static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
  165. {
  166. msm_writel(data, msm_host->ctrl_base + reg);
  167. }
  168. static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
  169. static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
  170. static const struct msm_dsi_cfg_handler *dsi_get_config(
  171. struct msm_dsi_host *msm_host)
  172. {
  173. const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
  174. struct device *dev = &msm_host->pdev->dev;
  175. struct regulator *gdsc_reg;
  176. struct clk *ahb_clk;
  177. int ret;
  178. u32 major = 0, minor = 0;
  179. gdsc_reg = regulator_get(dev, "gdsc");
  180. if (IS_ERR(gdsc_reg)) {
  181. pr_err("%s: cannot get gdsc\n", __func__);
  182. goto exit;
  183. }
  184. ahb_clk = msm_clk_get(msm_host->pdev, "iface");
  185. if (IS_ERR(ahb_clk)) {
  186. pr_err("%s: cannot get interface clock\n", __func__);
  187. goto put_gdsc;
  188. }
  189. pm_runtime_get_sync(dev);
  190. ret = regulator_enable(gdsc_reg);
  191. if (ret) {
  192. pr_err("%s: unable to enable gdsc\n", __func__);
  193. goto put_gdsc;
  194. }
  195. ret = clk_prepare_enable(ahb_clk);
  196. if (ret) {
  197. pr_err("%s: unable to enable ahb_clk\n", __func__);
  198. goto disable_gdsc;
  199. }
  200. ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
  201. if (ret) {
  202. pr_err("%s: Invalid version\n", __func__);
  203. goto disable_clks;
  204. }
  205. cfg_hnd = msm_dsi_cfg_get(major, minor);
  206. DBG("%s: Version %x:%x\n", __func__, major, minor);
  207. disable_clks:
  208. clk_disable_unprepare(ahb_clk);
  209. disable_gdsc:
  210. regulator_disable(gdsc_reg);
  211. pm_runtime_put_sync(dev);
  212. put_gdsc:
  213. regulator_put(gdsc_reg);
  214. exit:
  215. return cfg_hnd;
  216. }
  217. static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
  218. {
  219. return container_of(host, struct msm_dsi_host, base);
  220. }
  221. static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
  222. {
  223. struct regulator_bulk_data *s = msm_host->supplies;
  224. const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
  225. int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
  226. int i;
  227. DBG("");
  228. for (i = num - 1; i >= 0; i--)
  229. if (regs[i].disable_load >= 0)
  230. regulator_set_load(s[i].consumer,
  231. regs[i].disable_load);
  232. regulator_bulk_disable(num, s);
  233. }
  234. static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
  235. {
  236. struct regulator_bulk_data *s = msm_host->supplies;
  237. const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
  238. int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
  239. int ret, i;
  240. DBG("");
  241. for (i = 0; i < num; i++) {
  242. if (regs[i].enable_load >= 0) {
  243. ret = regulator_set_load(s[i].consumer,
  244. regs[i].enable_load);
  245. if (ret < 0) {
  246. pr_err("regulator %d set op mode failed, %d\n",
  247. i, ret);
  248. goto fail;
  249. }
  250. }
  251. }
  252. ret = regulator_bulk_enable(num, s);
  253. if (ret < 0) {
  254. pr_err("regulator enable failed, %d\n", ret);
  255. goto fail;
  256. }
  257. return 0;
  258. fail:
  259. for (i--; i >= 0; i--)
  260. regulator_set_load(s[i].consumer, regs[i].disable_load);
  261. return ret;
  262. }
  263. static int dsi_regulator_init(struct msm_dsi_host *msm_host)
  264. {
  265. struct regulator_bulk_data *s = msm_host->supplies;
  266. const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
  267. int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
  268. int i, ret;
  269. for (i = 0; i < num; i++)
  270. s[i].supply = regs[i].name;
  271. ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
  272. if (ret < 0) {
  273. pr_err("%s: failed to init regulator, ret=%d\n",
  274. __func__, ret);
  275. return ret;
  276. }
  277. return 0;
  278. }
  279. int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
  280. {
  281. struct platform_device *pdev = msm_host->pdev;
  282. int ret = 0;
  283. msm_host->src_clk = msm_clk_get(pdev, "src");
  284. if (IS_ERR(msm_host->src_clk)) {
  285. ret = PTR_ERR(msm_host->src_clk);
  286. pr_err("%s: can't find src clock. ret=%d\n",
  287. __func__, ret);
  288. msm_host->src_clk = NULL;
  289. return ret;
  290. }
  291. msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
  292. if (!msm_host->esc_clk_src) {
  293. ret = -ENODEV;
  294. pr_err("%s: can't get esc clock parent. ret=%d\n",
  295. __func__, ret);
  296. return ret;
  297. }
  298. msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
  299. if (!msm_host->dsi_clk_src) {
  300. ret = -ENODEV;
  301. pr_err("%s: can't get src clock parent. ret=%d\n",
  302. __func__, ret);
  303. }
  304. return ret;
  305. }
  306. int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
  307. {
  308. struct platform_device *pdev = msm_host->pdev;
  309. int ret = 0;
  310. msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
  311. if (IS_ERR(msm_host->byte_intf_clk)) {
  312. ret = PTR_ERR(msm_host->byte_intf_clk);
  313. pr_err("%s: can't find byte_intf clock. ret=%d\n",
  314. __func__, ret);
  315. }
  316. return ret;
  317. }
  318. static int dsi_clk_init(struct msm_dsi_host *msm_host)
  319. {
  320. struct platform_device *pdev = msm_host->pdev;
  321. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  322. const struct msm_dsi_config *cfg = cfg_hnd->cfg;
  323. int i, ret = 0;
  324. /* get bus clocks */
  325. for (i = 0; i < cfg->num_bus_clks; i++) {
  326. msm_host->bus_clks[i] = msm_clk_get(pdev,
  327. cfg->bus_clk_names[i]);
  328. if (IS_ERR(msm_host->bus_clks[i])) {
  329. ret = PTR_ERR(msm_host->bus_clks[i]);
  330. pr_err("%s: Unable to get %s clock, ret = %d\n",
  331. __func__, cfg->bus_clk_names[i], ret);
  332. goto exit;
  333. }
  334. }
  335. /* get link and source clocks */
  336. msm_host->byte_clk = msm_clk_get(pdev, "byte");
  337. if (IS_ERR(msm_host->byte_clk)) {
  338. ret = PTR_ERR(msm_host->byte_clk);
  339. pr_err("%s: can't find dsi_byte clock. ret=%d\n",
  340. __func__, ret);
  341. msm_host->byte_clk = NULL;
  342. goto exit;
  343. }
  344. msm_host->pixel_clk = msm_clk_get(pdev, "pixel");
  345. if (IS_ERR(msm_host->pixel_clk)) {
  346. ret = PTR_ERR(msm_host->pixel_clk);
  347. pr_err("%s: can't find dsi_pixel clock. ret=%d\n",
  348. __func__, ret);
  349. msm_host->pixel_clk = NULL;
  350. goto exit;
  351. }
  352. msm_host->esc_clk = msm_clk_get(pdev, "core");
  353. if (IS_ERR(msm_host->esc_clk)) {
  354. ret = PTR_ERR(msm_host->esc_clk);
  355. pr_err("%s: can't find dsi_esc clock. ret=%d\n",
  356. __func__, ret);
  357. msm_host->esc_clk = NULL;
  358. goto exit;
  359. }
  360. msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
  361. if (IS_ERR(msm_host->byte_clk_src)) {
  362. ret = PTR_ERR(msm_host->byte_clk_src);
  363. pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
  364. goto exit;
  365. }
  366. msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
  367. if (IS_ERR(msm_host->pixel_clk_src)) {
  368. ret = PTR_ERR(msm_host->pixel_clk_src);
  369. pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
  370. goto exit;
  371. }
  372. if (cfg_hnd->ops->clk_init_ver)
  373. ret = cfg_hnd->ops->clk_init_ver(msm_host);
  374. exit:
  375. return ret;
  376. }
  377. static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
  378. {
  379. const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
  380. int i, ret;
  381. DBG("id=%d", msm_host->id);
  382. for (i = 0; i < cfg->num_bus_clks; i++) {
  383. ret = clk_prepare_enable(msm_host->bus_clks[i]);
  384. if (ret) {
  385. pr_err("%s: failed to enable bus clock %d ret %d\n",
  386. __func__, i, ret);
  387. goto err;
  388. }
  389. }
  390. return 0;
  391. err:
  392. for (; i > 0; i--)
  393. clk_disable_unprepare(msm_host->bus_clks[i]);
  394. return ret;
  395. }
  396. static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
  397. {
  398. const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
  399. int i;
  400. DBG("");
  401. for (i = cfg->num_bus_clks - 1; i >= 0; i--)
  402. clk_disable_unprepare(msm_host->bus_clks[i]);
  403. }
  404. int msm_dsi_runtime_suspend(struct device *dev)
  405. {
  406. struct platform_device *pdev = to_platform_device(dev);
  407. struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
  408. struct mipi_dsi_host *host = msm_dsi->host;
  409. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  410. if (!msm_host->cfg_hnd)
  411. return 0;
  412. dsi_bus_clk_disable(msm_host);
  413. return 0;
  414. }
  415. int msm_dsi_runtime_resume(struct device *dev)
  416. {
  417. struct platform_device *pdev = to_platform_device(dev);
  418. struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
  419. struct mipi_dsi_host *host = msm_dsi->host;
  420. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  421. if (!msm_host->cfg_hnd)
  422. return 0;
  423. return dsi_bus_clk_enable(msm_host);
  424. }
  425. int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
  426. {
  427. int ret;
  428. DBG("Set clk rates: pclk=%d, byteclk=%d",
  429. msm_host->mode->clock, msm_host->byte_clk_rate);
  430. ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
  431. if (ret) {
  432. pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
  433. goto error;
  434. }
  435. ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
  436. if (ret) {
  437. pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
  438. goto error;
  439. }
  440. if (msm_host->byte_intf_clk) {
  441. ret = clk_set_rate(msm_host->byte_intf_clk,
  442. msm_host->byte_clk_rate / 2);
  443. if (ret) {
  444. pr_err("%s: Failed to set rate byte intf clk, %d\n",
  445. __func__, ret);
  446. goto error;
  447. }
  448. }
  449. ret = clk_prepare_enable(msm_host->esc_clk);
  450. if (ret) {
  451. pr_err("%s: Failed to enable dsi esc clk\n", __func__);
  452. goto error;
  453. }
  454. ret = clk_prepare_enable(msm_host->byte_clk);
  455. if (ret) {
  456. pr_err("%s: Failed to enable dsi byte clk\n", __func__);
  457. goto byte_clk_err;
  458. }
  459. ret = clk_prepare_enable(msm_host->pixel_clk);
  460. if (ret) {
  461. pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
  462. goto pixel_clk_err;
  463. }
  464. if (msm_host->byte_intf_clk) {
  465. ret = clk_prepare_enable(msm_host->byte_intf_clk);
  466. if (ret) {
  467. pr_err("%s: Failed to enable byte intf clk\n",
  468. __func__);
  469. goto byte_intf_clk_err;
  470. }
  471. }
  472. return 0;
  473. byte_intf_clk_err:
  474. clk_disable_unprepare(msm_host->pixel_clk);
  475. pixel_clk_err:
  476. clk_disable_unprepare(msm_host->byte_clk);
  477. byte_clk_err:
  478. clk_disable_unprepare(msm_host->esc_clk);
  479. error:
  480. return ret;
  481. }
  482. int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
  483. {
  484. int ret;
  485. DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
  486. msm_host->mode->clock, msm_host->byte_clk_rate,
  487. msm_host->esc_clk_rate, msm_host->src_clk_rate);
  488. ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
  489. if (ret) {
  490. pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
  491. goto error;
  492. }
  493. ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
  494. if (ret) {
  495. pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
  496. goto error;
  497. }
  498. ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
  499. if (ret) {
  500. pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
  501. goto error;
  502. }
  503. ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
  504. if (ret) {
  505. pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
  506. goto error;
  507. }
  508. ret = clk_prepare_enable(msm_host->byte_clk);
  509. if (ret) {
  510. pr_err("%s: Failed to enable dsi byte clk\n", __func__);
  511. goto error;
  512. }
  513. ret = clk_prepare_enable(msm_host->esc_clk);
  514. if (ret) {
  515. pr_err("%s: Failed to enable dsi esc clk\n", __func__);
  516. goto esc_clk_err;
  517. }
  518. ret = clk_prepare_enable(msm_host->src_clk);
  519. if (ret) {
  520. pr_err("%s: Failed to enable dsi src clk\n", __func__);
  521. goto src_clk_err;
  522. }
  523. ret = clk_prepare_enable(msm_host->pixel_clk);
  524. if (ret) {
  525. pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
  526. goto pixel_clk_err;
  527. }
  528. return 0;
  529. pixel_clk_err:
  530. clk_disable_unprepare(msm_host->src_clk);
  531. src_clk_err:
  532. clk_disable_unprepare(msm_host->esc_clk);
  533. esc_clk_err:
  534. clk_disable_unprepare(msm_host->byte_clk);
  535. error:
  536. return ret;
  537. }
  538. void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
  539. {
  540. clk_disable_unprepare(msm_host->esc_clk);
  541. clk_disable_unprepare(msm_host->pixel_clk);
  542. if (msm_host->byte_intf_clk)
  543. clk_disable_unprepare(msm_host->byte_intf_clk);
  544. clk_disable_unprepare(msm_host->byte_clk);
  545. }
  546. void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
  547. {
  548. clk_disable_unprepare(msm_host->pixel_clk);
  549. clk_disable_unprepare(msm_host->src_clk);
  550. clk_disable_unprepare(msm_host->esc_clk);
  551. clk_disable_unprepare(msm_host->byte_clk);
  552. }
  553. static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
  554. {
  555. struct drm_display_mode *mode = msm_host->mode;
  556. u32 pclk_rate;
  557. pclk_rate = mode->clock * 1000;
  558. /*
  559. * For dual DSI mode, the current DRM mode has the complete width of the
  560. * panel. Since, the complete panel is driven by two DSI controllers,
  561. * the clock rates have to be split between the two dsi controllers.
  562. * Adjust the byte and pixel clock rates for each dsi host accordingly.
  563. */
  564. if (is_dual_dsi)
  565. pclk_rate /= 2;
  566. return pclk_rate;
  567. }
  568. static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
  569. {
  570. u8 lanes = msm_host->lanes;
  571. u32 bpp = dsi_get_bpp(msm_host->format);
  572. u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_dual_dsi);
  573. u64 pclk_bpp = (u64)pclk_rate * bpp;
  574. if (lanes == 0) {
  575. pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
  576. lanes = 1;
  577. }
  578. do_div(pclk_bpp, (8 * lanes));
  579. msm_host->pixel_clk_rate = pclk_rate;
  580. msm_host->byte_clk_rate = pclk_bpp;
  581. DBG("pclk=%d, bclk=%d", msm_host->pixel_clk_rate,
  582. msm_host->byte_clk_rate);
  583. }
  584. int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi)
  585. {
  586. if (!msm_host->mode) {
  587. pr_err("%s: mode not set\n", __func__);
  588. return -EINVAL;
  589. }
  590. dsi_calc_pclk(msm_host, is_dual_dsi);
  591. msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
  592. return 0;
  593. }
  594. int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi)
  595. {
  596. u32 bpp = dsi_get_bpp(msm_host->format);
  597. u64 pclk_bpp;
  598. unsigned int esc_mhz, esc_div;
  599. unsigned long byte_mhz;
  600. dsi_calc_pclk(msm_host, is_dual_dsi);
  601. pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_dual_dsi) * bpp;
  602. do_div(pclk_bpp, 8);
  603. msm_host->src_clk_rate = pclk_bpp;
  604. /*
  605. * esc clock is byte clock followed by a 4 bit divider,
  606. * we need to find an escape clock frequency within the
  607. * mipi DSI spec range within the maximum divider limit
  608. * We iterate here between an escape clock frequencey
  609. * between 20 Mhz to 5 Mhz and pick up the first one
  610. * that can be supported by our divider
  611. */
  612. byte_mhz = msm_host->byte_clk_rate / 1000000;
  613. for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
  614. esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
  615. /*
  616. * TODO: Ideally, we shouldn't know what sort of divider
  617. * is available in mmss_cc, we're just assuming that
  618. * it'll always be a 4 bit divider. Need to come up with
  619. * a better way here.
  620. */
  621. if (esc_div >= 1 && esc_div <= 16)
  622. break;
  623. }
  624. if (esc_mhz < 5)
  625. return -EINVAL;
  626. msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
  627. DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
  628. msm_host->src_clk_rate);
  629. return 0;
  630. }
  631. static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
  632. {
  633. u32 intr;
  634. unsigned long flags;
  635. spin_lock_irqsave(&msm_host->intr_lock, flags);
  636. intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
  637. if (enable)
  638. intr |= mask;
  639. else
  640. intr &= ~mask;
  641. DBG("intr=%x enable=%d", intr, enable);
  642. dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
  643. spin_unlock_irqrestore(&msm_host->intr_lock, flags);
  644. }
  645. static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
  646. {
  647. if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
  648. return BURST_MODE;
  649. else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
  650. return NON_BURST_SYNCH_PULSE;
  651. return NON_BURST_SYNCH_EVENT;
  652. }
  653. static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
  654. const enum mipi_dsi_pixel_format mipi_fmt)
  655. {
  656. switch (mipi_fmt) {
  657. case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
  658. case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE;
  659. case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666;
  660. case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565;
  661. default: return VID_DST_FORMAT_RGB888;
  662. }
  663. }
  664. static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
  665. const enum mipi_dsi_pixel_format mipi_fmt)
  666. {
  667. switch (mipi_fmt) {
  668. case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
  669. case MIPI_DSI_FMT_RGB666_PACKED:
  670. case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666;
  671. case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
  672. default: return CMD_DST_FORMAT_RGB888;
  673. }
  674. }
  675. static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
  676. struct msm_dsi_phy_shared_timings *phy_shared_timings)
  677. {
  678. u32 flags = msm_host->mode_flags;
  679. enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
  680. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  681. u32 data = 0;
  682. if (!enable) {
  683. dsi_write(msm_host, REG_DSI_CTRL, 0);
  684. return;
  685. }
  686. if (flags & MIPI_DSI_MODE_VIDEO) {
  687. if (flags & MIPI_DSI_MODE_VIDEO_HSE)
  688. data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
  689. if (flags & MIPI_DSI_MODE_VIDEO_HFP)
  690. data |= DSI_VID_CFG0_HFP_POWER_STOP;
  691. if (flags & MIPI_DSI_MODE_VIDEO_HBP)
  692. data |= DSI_VID_CFG0_HBP_POWER_STOP;
  693. if (flags & MIPI_DSI_MODE_VIDEO_HSA)
  694. data |= DSI_VID_CFG0_HSA_POWER_STOP;
  695. /* Always set low power stop mode for BLLP
  696. * to let command engine send packets
  697. */
  698. data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
  699. DSI_VID_CFG0_BLLP_POWER_STOP;
  700. data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
  701. data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
  702. data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
  703. dsi_write(msm_host, REG_DSI_VID_CFG0, data);
  704. /* Do not swap RGB colors */
  705. data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
  706. dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
  707. } else {
  708. /* Do not swap RGB colors */
  709. data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
  710. data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
  711. dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
  712. data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
  713. DSI_CMD_CFG1_WR_MEM_CONTINUE(
  714. MIPI_DCS_WRITE_MEMORY_CONTINUE);
  715. /* Always insert DCS command */
  716. data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
  717. dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
  718. }
  719. dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
  720. DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
  721. DSI_CMD_DMA_CTRL_LOW_POWER);
  722. data = 0;
  723. /* Always assume dedicated TE pin */
  724. data |= DSI_TRIG_CTRL_TE;
  725. data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
  726. data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
  727. data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
  728. if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
  729. (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
  730. data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
  731. dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
  732. data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) |
  733. DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre);
  734. dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
  735. if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
  736. (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) &&
  737. phy_shared_timings->clk_pre_inc_by_2)
  738. dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND,
  739. DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK);
  740. data = 0;
  741. if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
  742. data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
  743. dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
  744. /* allow only ack-err-status to generate interrupt */
  745. dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
  746. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
  747. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  748. data = DSI_CTRL_CLK_EN;
  749. DBG("lane number=%d", msm_host->lanes);
  750. data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0);
  751. dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
  752. DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
  753. if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
  754. dsi_write(msm_host, REG_DSI_LANE_CTRL,
  755. DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
  756. data |= DSI_CTRL_ENABLE;
  757. dsi_write(msm_host, REG_DSI_CTRL, data);
  758. }
  759. static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
  760. {
  761. struct drm_display_mode *mode = msm_host->mode;
  762. u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
  763. u32 h_total = mode->htotal;
  764. u32 v_total = mode->vtotal;
  765. u32 hs_end = mode->hsync_end - mode->hsync_start;
  766. u32 vs_end = mode->vsync_end - mode->vsync_start;
  767. u32 ha_start = h_total - mode->hsync_start;
  768. u32 ha_end = ha_start + mode->hdisplay;
  769. u32 va_start = v_total - mode->vsync_start;
  770. u32 va_end = va_start + mode->vdisplay;
  771. u32 hdisplay = mode->hdisplay;
  772. u32 wc;
  773. DBG("");
  774. /*
  775. * For dual DSI mode, the current DRM mode has
  776. * the complete width of the panel. Since, the complete
  777. * panel is driven by two DSI controllers, the horizontal
  778. * timings have to be split between the two dsi controllers.
  779. * Adjust the DSI host timing values accordingly.
  780. */
  781. if (is_dual_dsi) {
  782. h_total /= 2;
  783. hs_end /= 2;
  784. ha_start /= 2;
  785. ha_end /= 2;
  786. hdisplay /= 2;
  787. }
  788. if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
  789. dsi_write(msm_host, REG_DSI_ACTIVE_H,
  790. DSI_ACTIVE_H_START(ha_start) |
  791. DSI_ACTIVE_H_END(ha_end));
  792. dsi_write(msm_host, REG_DSI_ACTIVE_V,
  793. DSI_ACTIVE_V_START(va_start) |
  794. DSI_ACTIVE_V_END(va_end));
  795. dsi_write(msm_host, REG_DSI_TOTAL,
  796. DSI_TOTAL_H_TOTAL(h_total - 1) |
  797. DSI_TOTAL_V_TOTAL(v_total - 1));
  798. dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
  799. DSI_ACTIVE_HSYNC_START(hs_start) |
  800. DSI_ACTIVE_HSYNC_END(hs_end));
  801. dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
  802. dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
  803. DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
  804. DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
  805. } else { /* command mode */
  806. /* image data and 1 byte write_memory_start cmd */
  807. wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
  808. dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
  809. DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
  810. DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
  811. msm_host->channel) |
  812. DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
  813. MIPI_DSI_DCS_LONG_WRITE));
  814. dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
  815. DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(hdisplay) |
  816. DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
  817. }
  818. }
  819. static void dsi_sw_reset(struct msm_dsi_host *msm_host)
  820. {
  821. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  822. wmb(); /* clocks need to be enabled before reset */
  823. dsi_write(msm_host, REG_DSI_RESET, 1);
  824. msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
  825. dsi_write(msm_host, REG_DSI_RESET, 0);
  826. }
  827. static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
  828. bool video_mode, bool enable)
  829. {
  830. u32 dsi_ctrl;
  831. dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
  832. if (!enable) {
  833. dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
  834. DSI_CTRL_CMD_MODE_EN);
  835. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
  836. DSI_IRQ_MASK_VIDEO_DONE, 0);
  837. } else {
  838. if (video_mode) {
  839. dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
  840. } else { /* command mode */
  841. dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
  842. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
  843. }
  844. dsi_ctrl |= DSI_CTRL_ENABLE;
  845. }
  846. dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
  847. }
  848. static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
  849. {
  850. u32 data;
  851. data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
  852. if (mode == 0)
  853. data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
  854. else
  855. data |= DSI_CMD_DMA_CTRL_LOW_POWER;
  856. dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
  857. }
  858. static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
  859. {
  860. u32 ret = 0;
  861. struct device *dev = &msm_host->pdev->dev;
  862. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
  863. reinit_completion(&msm_host->video_comp);
  864. ret = wait_for_completion_timeout(&msm_host->video_comp,
  865. msecs_to_jiffies(70));
  866. if (ret <= 0)
  867. dev_err(dev, "wait for video done timed out\n");
  868. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
  869. }
  870. static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
  871. {
  872. if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
  873. return;
  874. if (msm_host->power_on && msm_host->enabled) {
  875. dsi_wait4video_done(msm_host);
  876. /* delay 4 ms to skip BLLP */
  877. usleep_range(2000, 4000);
  878. }
  879. }
  880. int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
  881. {
  882. struct drm_device *dev = msm_host->dev;
  883. struct msm_drm_private *priv = dev->dev_private;
  884. uint64_t iova;
  885. u8 *data;
  886. data = msm_gem_kernel_new(dev, size, MSM_BO_UNCACHED,
  887. priv->kms->aspace,
  888. &msm_host->tx_gem_obj, &iova);
  889. if (IS_ERR(data)) {
  890. msm_host->tx_gem_obj = NULL;
  891. return PTR_ERR(data);
  892. }
  893. msm_host->tx_size = msm_host->tx_gem_obj->size;
  894. return 0;
  895. }
  896. int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
  897. {
  898. struct drm_device *dev = msm_host->dev;
  899. msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
  900. &msm_host->tx_buf_paddr, GFP_KERNEL);
  901. if (!msm_host->tx_buf)
  902. return -ENOMEM;
  903. msm_host->tx_size = size;
  904. return 0;
  905. }
  906. static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
  907. {
  908. struct drm_device *dev = msm_host->dev;
  909. struct msm_drm_private *priv;
  910. /*
  911. * This is possible if we're tearing down before we've had a chance to
  912. * fully initialize. A very real possibility if our probe is deferred,
  913. * in which case we'll hit msm_dsi_host_destroy() without having run
  914. * through the dsi_tx_buf_alloc().
  915. */
  916. if (!dev)
  917. return;
  918. priv = dev->dev_private;
  919. if (msm_host->tx_gem_obj) {
  920. msm_gem_put_iova(msm_host->tx_gem_obj, priv->kms->aspace);
  921. drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
  922. msm_host->tx_gem_obj = NULL;
  923. }
  924. if (msm_host->tx_buf)
  925. dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
  926. msm_host->tx_buf_paddr);
  927. }
  928. void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host)
  929. {
  930. return msm_gem_get_vaddr(msm_host->tx_gem_obj);
  931. }
  932. void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host)
  933. {
  934. return msm_host->tx_buf;
  935. }
  936. void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host)
  937. {
  938. msm_gem_put_vaddr(msm_host->tx_gem_obj);
  939. }
  940. /*
  941. * prepare cmd buffer to be txed
  942. */
  943. static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
  944. const struct mipi_dsi_msg *msg)
  945. {
  946. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  947. struct mipi_dsi_packet packet;
  948. int len;
  949. int ret;
  950. u8 *data;
  951. ret = mipi_dsi_create_packet(&packet, msg);
  952. if (ret) {
  953. pr_err("%s: create packet failed, %d\n", __func__, ret);
  954. return ret;
  955. }
  956. len = (packet.size + 3) & (~0x3);
  957. if (len > msm_host->tx_size) {
  958. pr_err("%s: packet size is too big\n", __func__);
  959. return -EINVAL;
  960. }
  961. data = cfg_hnd->ops->tx_buf_get(msm_host);
  962. if (IS_ERR(data)) {
  963. ret = PTR_ERR(data);
  964. pr_err("%s: get vaddr failed, %d\n", __func__, ret);
  965. return ret;
  966. }
  967. /* MSM specific command format in memory */
  968. data[0] = packet.header[1];
  969. data[1] = packet.header[2];
  970. data[2] = packet.header[0];
  971. data[3] = BIT(7); /* Last packet */
  972. if (mipi_dsi_packet_format_is_long(msg->type))
  973. data[3] |= BIT(6);
  974. if (msg->rx_buf && msg->rx_len)
  975. data[3] |= BIT(5);
  976. /* Long packet */
  977. if (packet.payload && packet.payload_length)
  978. memcpy(data + 4, packet.payload, packet.payload_length);
  979. /* Append 0xff to the end */
  980. if (packet.size < len)
  981. memset(data + packet.size, 0xff, len - packet.size);
  982. if (cfg_hnd->ops->tx_buf_put)
  983. cfg_hnd->ops->tx_buf_put(msm_host);
  984. return len;
  985. }
  986. /*
  987. * dsi_short_read1_resp: 1 parameter
  988. */
  989. static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  990. {
  991. u8 *data = msg->rx_buf;
  992. if (data && (msg->rx_len >= 1)) {
  993. *data = buf[1]; /* strip out dcs type */
  994. return 1;
  995. } else {
  996. pr_err("%s: read data does not match with rx_buf len %zu\n",
  997. __func__, msg->rx_len);
  998. return -EINVAL;
  999. }
  1000. }
  1001. /*
  1002. * dsi_short_read2_resp: 2 parameter
  1003. */
  1004. static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  1005. {
  1006. u8 *data = msg->rx_buf;
  1007. if (data && (msg->rx_len >= 2)) {
  1008. data[0] = buf[1]; /* strip out dcs type */
  1009. data[1] = buf[2];
  1010. return 2;
  1011. } else {
  1012. pr_err("%s: read data does not match with rx_buf len %zu\n",
  1013. __func__, msg->rx_len);
  1014. return -EINVAL;
  1015. }
  1016. }
  1017. static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
  1018. {
  1019. /* strip out 4 byte dcs header */
  1020. if (msg->rx_buf && msg->rx_len)
  1021. memcpy(msg->rx_buf, buf + 4, msg->rx_len);
  1022. return msg->rx_len;
  1023. }
  1024. int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
  1025. {
  1026. struct drm_device *dev = msm_host->dev;
  1027. struct msm_drm_private *priv = dev->dev_private;
  1028. if (!dma_base)
  1029. return -EINVAL;
  1030. return msm_gem_get_iova(msm_host->tx_gem_obj,
  1031. priv->kms->aspace, dma_base);
  1032. }
  1033. int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
  1034. {
  1035. if (!dma_base)
  1036. return -EINVAL;
  1037. *dma_base = msm_host->tx_buf_paddr;
  1038. return 0;
  1039. }
  1040. static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
  1041. {
  1042. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  1043. int ret;
  1044. uint64_t dma_base;
  1045. bool triggered;
  1046. ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base);
  1047. if (ret) {
  1048. pr_err("%s: failed to get iova: %d\n", __func__, ret);
  1049. return ret;
  1050. }
  1051. reinit_completion(&msm_host->dma_comp);
  1052. dsi_wait4video_eng_busy(msm_host);
  1053. triggered = msm_dsi_manager_cmd_xfer_trigger(
  1054. msm_host->id, dma_base, len);
  1055. if (triggered) {
  1056. ret = wait_for_completion_timeout(&msm_host->dma_comp,
  1057. msecs_to_jiffies(200));
  1058. DBG("ret=%d", ret);
  1059. if (ret == 0)
  1060. ret = -ETIMEDOUT;
  1061. else
  1062. ret = len;
  1063. } else
  1064. ret = len;
  1065. return ret;
  1066. }
  1067. static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
  1068. u8 *buf, int rx_byte, int pkt_size)
  1069. {
  1070. u32 *lp, *temp, data;
  1071. int i, j = 0, cnt;
  1072. u32 read_cnt;
  1073. u8 reg[16];
  1074. int repeated_bytes = 0;
  1075. int buf_offset = buf - msm_host->rx_buf;
  1076. lp = (u32 *)buf;
  1077. temp = (u32 *)reg;
  1078. cnt = (rx_byte + 3) >> 2;
  1079. if (cnt > 4)
  1080. cnt = 4; /* 4 x 32 bits registers only */
  1081. if (rx_byte == 4)
  1082. read_cnt = 4;
  1083. else
  1084. read_cnt = pkt_size + 6;
  1085. /*
  1086. * In case of multiple reads from the panel, after the first read, there
  1087. * is possibility that there are some bytes in the payload repeating in
  1088. * the RDBK_DATA registers. Since we read all the parameters from the
  1089. * panel right from the first byte for every pass. We need to skip the
  1090. * repeating bytes and then append the new parameters to the rx buffer.
  1091. */
  1092. if (read_cnt > 16) {
  1093. int bytes_shifted;
  1094. /* Any data more than 16 bytes will be shifted out.
  1095. * The temp read buffer should already contain these bytes.
  1096. * The remaining bytes in read buffer are the repeated bytes.
  1097. */
  1098. bytes_shifted = read_cnt - 16;
  1099. repeated_bytes = buf_offset - bytes_shifted;
  1100. }
  1101. for (i = cnt - 1; i >= 0; i--) {
  1102. data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
  1103. *temp++ = ntohl(data); /* to host byte order */
  1104. DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
  1105. }
  1106. for (i = repeated_bytes; i < 16; i++)
  1107. buf[j++] = reg[i];
  1108. return j;
  1109. }
  1110. static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
  1111. const struct mipi_dsi_msg *msg)
  1112. {
  1113. int len, ret;
  1114. int bllp_len = msm_host->mode->hdisplay *
  1115. dsi_get_bpp(msm_host->format) / 8;
  1116. len = dsi_cmd_dma_add(msm_host, msg);
  1117. if (!len) {
  1118. pr_err("%s: failed to add cmd type = 0x%x\n",
  1119. __func__, msg->type);
  1120. return -EINVAL;
  1121. }
  1122. /* for video mode, do not send cmds more than
  1123. * one pixel line, since it only transmit it
  1124. * during BLLP.
  1125. */
  1126. /* TODO: if the command is sent in LP mode, the bit rate is only
  1127. * half of esc clk rate. In this case, if the video is already
  1128. * actively streaming, we need to check more carefully if the
  1129. * command can be fit into one BLLP.
  1130. */
  1131. if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
  1132. pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
  1133. __func__, len);
  1134. return -EINVAL;
  1135. }
  1136. ret = dsi_cmd_dma_tx(msm_host, len);
  1137. if (ret < len) {
  1138. pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
  1139. __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
  1140. return -ECOMM;
  1141. }
  1142. return len;
  1143. }
  1144. static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
  1145. {
  1146. u32 data0, data1;
  1147. data0 = dsi_read(msm_host, REG_DSI_CTRL);
  1148. data1 = data0;
  1149. data1 &= ~DSI_CTRL_ENABLE;
  1150. dsi_write(msm_host, REG_DSI_CTRL, data1);
  1151. /*
  1152. * dsi controller need to be disabled before
  1153. * clocks turned on
  1154. */
  1155. wmb();
  1156. dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
  1157. wmb(); /* make sure clocks enabled */
  1158. /* dsi controller can only be reset while clocks are running */
  1159. dsi_write(msm_host, REG_DSI_RESET, 1);
  1160. msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
  1161. dsi_write(msm_host, REG_DSI_RESET, 0);
  1162. wmb(); /* controller out of reset */
  1163. dsi_write(msm_host, REG_DSI_CTRL, data0);
  1164. wmb(); /* make sure dsi controller enabled again */
  1165. }
  1166. static void dsi_hpd_worker(struct work_struct *work)
  1167. {
  1168. struct msm_dsi_host *msm_host =
  1169. container_of(work, struct msm_dsi_host, hpd_work);
  1170. drm_helper_hpd_irq_event(msm_host->dev);
  1171. }
  1172. static void dsi_err_worker(struct work_struct *work)
  1173. {
  1174. struct msm_dsi_host *msm_host =
  1175. container_of(work, struct msm_dsi_host, err_work);
  1176. u32 status = msm_host->err_work_state;
  1177. pr_err_ratelimited("%s: status=%x\n", __func__, status);
  1178. if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
  1179. dsi_sw_reset_restore(msm_host);
  1180. /* It is safe to clear here because error irq is disabled. */
  1181. msm_host->err_work_state = 0;
  1182. /* enable dsi error interrupt */
  1183. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
  1184. }
  1185. static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
  1186. {
  1187. u32 status;
  1188. status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
  1189. if (status) {
  1190. dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
  1191. /* Writing of an extra 0 needed to clear error bits */
  1192. dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
  1193. msm_host->err_work_state |= DSI_ERR_STATE_ACK;
  1194. }
  1195. }
  1196. static void dsi_timeout_status(struct msm_dsi_host *msm_host)
  1197. {
  1198. u32 status;
  1199. status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
  1200. if (status) {
  1201. dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
  1202. msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
  1203. }
  1204. }
  1205. static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
  1206. {
  1207. u32 status;
  1208. status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
  1209. if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
  1210. DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
  1211. DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
  1212. DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
  1213. DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
  1214. dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
  1215. msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
  1216. }
  1217. }
  1218. static void dsi_fifo_status(struct msm_dsi_host *msm_host)
  1219. {
  1220. u32 status;
  1221. status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
  1222. /* fifo underflow, overflow */
  1223. if (status) {
  1224. dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
  1225. msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
  1226. if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
  1227. msm_host->err_work_state |=
  1228. DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
  1229. }
  1230. }
  1231. static void dsi_status(struct msm_dsi_host *msm_host)
  1232. {
  1233. u32 status;
  1234. status = dsi_read(msm_host, REG_DSI_STATUS0);
  1235. if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
  1236. dsi_write(msm_host, REG_DSI_STATUS0, status);
  1237. msm_host->err_work_state |=
  1238. DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
  1239. }
  1240. }
  1241. static void dsi_clk_status(struct msm_dsi_host *msm_host)
  1242. {
  1243. u32 status;
  1244. status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
  1245. if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
  1246. dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
  1247. msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
  1248. }
  1249. }
  1250. static void dsi_error(struct msm_dsi_host *msm_host)
  1251. {
  1252. /* disable dsi error interrupt */
  1253. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
  1254. dsi_clk_status(msm_host);
  1255. dsi_fifo_status(msm_host);
  1256. dsi_ack_err_status(msm_host);
  1257. dsi_timeout_status(msm_host);
  1258. dsi_status(msm_host);
  1259. dsi_dln0_phy_err(msm_host);
  1260. queue_work(msm_host->workqueue, &msm_host->err_work);
  1261. }
  1262. static irqreturn_t dsi_host_irq(int irq, void *ptr)
  1263. {
  1264. struct msm_dsi_host *msm_host = ptr;
  1265. u32 isr;
  1266. unsigned long flags;
  1267. if (!msm_host->ctrl_base)
  1268. return IRQ_HANDLED;
  1269. spin_lock_irqsave(&msm_host->intr_lock, flags);
  1270. isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
  1271. dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
  1272. spin_unlock_irqrestore(&msm_host->intr_lock, flags);
  1273. DBG("isr=0x%x, id=%d", isr, msm_host->id);
  1274. if (isr & DSI_IRQ_ERROR)
  1275. dsi_error(msm_host);
  1276. if (isr & DSI_IRQ_VIDEO_DONE)
  1277. complete(&msm_host->video_comp);
  1278. if (isr & DSI_IRQ_CMD_DMA_DONE)
  1279. complete(&msm_host->dma_comp);
  1280. return IRQ_HANDLED;
  1281. }
  1282. static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
  1283. struct device *panel_device)
  1284. {
  1285. msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
  1286. "disp-enable",
  1287. GPIOD_OUT_LOW);
  1288. if (IS_ERR(msm_host->disp_en_gpio)) {
  1289. DBG("cannot get disp-enable-gpios %ld",
  1290. PTR_ERR(msm_host->disp_en_gpio));
  1291. return PTR_ERR(msm_host->disp_en_gpio);
  1292. }
  1293. msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
  1294. GPIOD_IN);
  1295. if (IS_ERR(msm_host->te_gpio)) {
  1296. DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
  1297. return PTR_ERR(msm_host->te_gpio);
  1298. }
  1299. return 0;
  1300. }
  1301. static int dsi_host_attach(struct mipi_dsi_host *host,
  1302. struct mipi_dsi_device *dsi)
  1303. {
  1304. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1305. int ret;
  1306. if (dsi->lanes > msm_host->num_data_lanes)
  1307. return -EINVAL;
  1308. msm_host->channel = dsi->channel;
  1309. msm_host->lanes = dsi->lanes;
  1310. msm_host->format = dsi->format;
  1311. msm_host->mode_flags = dsi->mode_flags;
  1312. msm_dsi_manager_attach_dsi_device(msm_host->id, dsi->mode_flags);
  1313. /* Some gpios defined in panel DT need to be controlled by host */
  1314. ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
  1315. if (ret)
  1316. return ret;
  1317. DBG("id=%d", msm_host->id);
  1318. if (msm_host->dev)
  1319. queue_work(msm_host->workqueue, &msm_host->hpd_work);
  1320. return 0;
  1321. }
  1322. static int dsi_host_detach(struct mipi_dsi_host *host,
  1323. struct mipi_dsi_device *dsi)
  1324. {
  1325. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1326. msm_host->device_node = NULL;
  1327. DBG("id=%d", msm_host->id);
  1328. if (msm_host->dev)
  1329. queue_work(msm_host->workqueue, &msm_host->hpd_work);
  1330. return 0;
  1331. }
  1332. static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
  1333. const struct mipi_dsi_msg *msg)
  1334. {
  1335. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1336. int ret;
  1337. if (!msg || !msm_host->power_on)
  1338. return -EINVAL;
  1339. mutex_lock(&msm_host->cmd_mutex);
  1340. ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
  1341. mutex_unlock(&msm_host->cmd_mutex);
  1342. return ret;
  1343. }
  1344. static struct mipi_dsi_host_ops dsi_host_ops = {
  1345. .attach = dsi_host_attach,
  1346. .detach = dsi_host_detach,
  1347. .transfer = dsi_host_transfer,
  1348. };
  1349. /*
  1350. * List of supported physical to logical lane mappings.
  1351. * For example, the 2nd entry represents the following mapping:
  1352. *
  1353. * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
  1354. */
  1355. static const int supported_data_lane_swaps[][4] = {
  1356. { 0, 1, 2, 3 },
  1357. { 3, 0, 1, 2 },
  1358. { 2, 3, 0, 1 },
  1359. { 1, 2, 3, 0 },
  1360. { 0, 3, 2, 1 },
  1361. { 1, 0, 3, 2 },
  1362. { 2, 1, 0, 3 },
  1363. { 3, 2, 1, 0 },
  1364. };
  1365. static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
  1366. struct device_node *ep)
  1367. {
  1368. struct device *dev = &msm_host->pdev->dev;
  1369. struct property *prop;
  1370. u32 lane_map[4];
  1371. int ret, i, len, num_lanes;
  1372. prop = of_find_property(ep, "data-lanes", &len);
  1373. if (!prop) {
  1374. dev_dbg(dev,
  1375. "failed to find data lane mapping, using default\n");
  1376. return 0;
  1377. }
  1378. num_lanes = len / sizeof(u32);
  1379. if (num_lanes < 1 || num_lanes > 4) {
  1380. dev_err(dev, "bad number of data lanes\n");
  1381. return -EINVAL;
  1382. }
  1383. msm_host->num_data_lanes = num_lanes;
  1384. ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
  1385. num_lanes);
  1386. if (ret) {
  1387. dev_err(dev, "failed to read lane data\n");
  1388. return ret;
  1389. }
  1390. /*
  1391. * compare DT specified physical-logical lane mappings with the ones
  1392. * supported by hardware
  1393. */
  1394. for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) {
  1395. const int *swap = supported_data_lane_swaps[i];
  1396. int j;
  1397. /*
  1398. * the data-lanes array we get from DT has a logical->physical
  1399. * mapping. The "data lane swap" register field represents
  1400. * supported configurations in a physical->logical mapping.
  1401. * Translate the DT mapping to what we understand and find a
  1402. * configuration that works.
  1403. */
  1404. for (j = 0; j < num_lanes; j++) {
  1405. if (lane_map[j] < 0 || lane_map[j] > 3)
  1406. dev_err(dev, "bad physical lane entry %u\n",
  1407. lane_map[j]);
  1408. if (swap[lane_map[j]] != j)
  1409. break;
  1410. }
  1411. if (j == num_lanes) {
  1412. msm_host->dlane_swap = i;
  1413. return 0;
  1414. }
  1415. }
  1416. return -EINVAL;
  1417. }
  1418. static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
  1419. {
  1420. struct device *dev = &msm_host->pdev->dev;
  1421. struct device_node *np = dev->of_node;
  1422. struct device_node *endpoint, *device_node;
  1423. int ret = 0;
  1424. /*
  1425. * Get the endpoint of the output port of the DSI host. In our case,
  1426. * this is mapped to port number with reg = 1. Don't return an error if
  1427. * the remote endpoint isn't defined. It's possible that there is
  1428. * nothing connected to the dsi output.
  1429. */
  1430. endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
  1431. if (!endpoint) {
  1432. dev_dbg(dev, "%s: no endpoint\n", __func__);
  1433. return 0;
  1434. }
  1435. ret = dsi_host_parse_lane_data(msm_host, endpoint);
  1436. if (ret) {
  1437. dev_err(dev, "%s: invalid lane configuration %d\n",
  1438. __func__, ret);
  1439. goto err;
  1440. }
  1441. /* Get panel node from the output port's endpoint data */
  1442. device_node = of_graph_get_remote_node(np, 1, 0);
  1443. if (!device_node) {
  1444. dev_dbg(dev, "%s: no valid device\n", __func__);
  1445. goto err;
  1446. }
  1447. msm_host->device_node = device_node;
  1448. if (of_property_read_bool(np, "syscon-sfpb")) {
  1449. msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
  1450. "syscon-sfpb");
  1451. if (IS_ERR(msm_host->sfpb)) {
  1452. dev_err(dev, "%s: failed to get sfpb regmap\n",
  1453. __func__);
  1454. ret = PTR_ERR(msm_host->sfpb);
  1455. }
  1456. }
  1457. of_node_put(device_node);
  1458. err:
  1459. of_node_put(endpoint);
  1460. return ret;
  1461. }
  1462. static int dsi_host_get_id(struct msm_dsi_host *msm_host)
  1463. {
  1464. struct platform_device *pdev = msm_host->pdev;
  1465. const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
  1466. struct resource *res;
  1467. int i;
  1468. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
  1469. if (!res)
  1470. return -EINVAL;
  1471. for (i = 0; i < cfg->num_dsi; i++) {
  1472. if (cfg->io_start[i] == res->start)
  1473. return i;
  1474. }
  1475. return -EINVAL;
  1476. }
  1477. int msm_dsi_host_init(struct msm_dsi *msm_dsi)
  1478. {
  1479. struct msm_dsi_host *msm_host = NULL;
  1480. struct platform_device *pdev = msm_dsi->pdev;
  1481. int ret;
  1482. msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
  1483. if (!msm_host) {
  1484. pr_err("%s: FAILED: cannot alloc dsi host\n",
  1485. __func__);
  1486. ret = -ENOMEM;
  1487. goto fail;
  1488. }
  1489. msm_host->pdev = pdev;
  1490. msm_dsi->host = &msm_host->base;
  1491. ret = dsi_host_parse_dt(msm_host);
  1492. if (ret) {
  1493. pr_err("%s: failed to parse dt\n", __func__);
  1494. goto fail;
  1495. }
  1496. msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
  1497. if (IS_ERR(msm_host->ctrl_base)) {
  1498. pr_err("%s: unable to map Dsi ctrl base\n", __func__);
  1499. ret = PTR_ERR(msm_host->ctrl_base);
  1500. goto fail;
  1501. }
  1502. pm_runtime_enable(&pdev->dev);
  1503. msm_host->cfg_hnd = dsi_get_config(msm_host);
  1504. if (!msm_host->cfg_hnd) {
  1505. ret = -EINVAL;
  1506. pr_err("%s: get config failed\n", __func__);
  1507. goto fail;
  1508. }
  1509. msm_host->id = dsi_host_get_id(msm_host);
  1510. if (msm_host->id < 0) {
  1511. ret = msm_host->id;
  1512. pr_err("%s: unable to identify DSI host index\n", __func__);
  1513. goto fail;
  1514. }
  1515. /* fixup base address by io offset */
  1516. msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
  1517. ret = dsi_regulator_init(msm_host);
  1518. if (ret) {
  1519. pr_err("%s: regulator init failed\n", __func__);
  1520. goto fail;
  1521. }
  1522. ret = dsi_clk_init(msm_host);
  1523. if (ret) {
  1524. pr_err("%s: unable to initialize dsi clks\n", __func__);
  1525. goto fail;
  1526. }
  1527. msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
  1528. if (!msm_host->rx_buf) {
  1529. ret = -ENOMEM;
  1530. pr_err("%s: alloc rx temp buf failed\n", __func__);
  1531. goto fail;
  1532. }
  1533. init_completion(&msm_host->dma_comp);
  1534. init_completion(&msm_host->video_comp);
  1535. mutex_init(&msm_host->dev_mutex);
  1536. mutex_init(&msm_host->cmd_mutex);
  1537. spin_lock_init(&msm_host->intr_lock);
  1538. /* setup workqueue */
  1539. msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
  1540. INIT_WORK(&msm_host->err_work, dsi_err_worker);
  1541. INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
  1542. msm_dsi->id = msm_host->id;
  1543. DBG("Dsi Host %d initialized", msm_host->id);
  1544. return 0;
  1545. fail:
  1546. return ret;
  1547. }
  1548. void msm_dsi_host_destroy(struct mipi_dsi_host *host)
  1549. {
  1550. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1551. DBG("");
  1552. dsi_tx_buf_free(msm_host);
  1553. if (msm_host->workqueue) {
  1554. flush_workqueue(msm_host->workqueue);
  1555. destroy_workqueue(msm_host->workqueue);
  1556. msm_host->workqueue = NULL;
  1557. }
  1558. mutex_destroy(&msm_host->cmd_mutex);
  1559. mutex_destroy(&msm_host->dev_mutex);
  1560. pm_runtime_disable(&msm_host->pdev->dev);
  1561. }
  1562. int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
  1563. struct drm_device *dev)
  1564. {
  1565. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1566. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  1567. struct platform_device *pdev = msm_host->pdev;
  1568. int ret;
  1569. msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
  1570. if (msm_host->irq < 0) {
  1571. ret = msm_host->irq;
  1572. dev_err(dev->dev, "failed to get irq: %d\n", ret);
  1573. return ret;
  1574. }
  1575. ret = devm_request_irq(&pdev->dev, msm_host->irq,
  1576. dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
  1577. "dsi_isr", msm_host);
  1578. if (ret < 0) {
  1579. dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
  1580. msm_host->irq, ret);
  1581. return ret;
  1582. }
  1583. msm_host->dev = dev;
  1584. ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
  1585. if (ret) {
  1586. pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
  1587. return ret;
  1588. }
  1589. return 0;
  1590. }
  1591. int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
  1592. {
  1593. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1594. int ret;
  1595. /* Register mipi dsi host */
  1596. if (!msm_host->registered) {
  1597. host->dev = &msm_host->pdev->dev;
  1598. host->ops = &dsi_host_ops;
  1599. ret = mipi_dsi_host_register(host);
  1600. if (ret)
  1601. return ret;
  1602. msm_host->registered = true;
  1603. /* If the panel driver has not been probed after host register,
  1604. * we should defer the host's probe.
  1605. * It makes sure panel is connected when fbcon detects
  1606. * connector status and gets the proper display mode to
  1607. * create framebuffer.
  1608. * Don't try to defer if there is nothing connected to the dsi
  1609. * output
  1610. */
  1611. if (check_defer && msm_host->device_node) {
  1612. if (IS_ERR(of_drm_find_panel(msm_host->device_node)))
  1613. if (!of_drm_find_bridge(msm_host->device_node))
  1614. return -EPROBE_DEFER;
  1615. }
  1616. }
  1617. return 0;
  1618. }
  1619. void msm_dsi_host_unregister(struct mipi_dsi_host *host)
  1620. {
  1621. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1622. if (msm_host->registered) {
  1623. mipi_dsi_host_unregister(host);
  1624. host->dev = NULL;
  1625. host->ops = NULL;
  1626. msm_host->registered = false;
  1627. }
  1628. }
  1629. int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
  1630. const struct mipi_dsi_msg *msg)
  1631. {
  1632. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1633. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  1634. /* TODO: make sure dsi_cmd_mdp is idle.
  1635. * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
  1636. * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
  1637. * How to handle the old versions? Wait for mdp cmd done?
  1638. */
  1639. /*
  1640. * mdss interrupt is generated in mdp core clock domain
  1641. * mdp clock need to be enabled to receive dsi interrupt
  1642. */
  1643. pm_runtime_get_sync(&msm_host->pdev->dev);
  1644. cfg_hnd->ops->link_clk_enable(msm_host);
  1645. /* TODO: vote for bus bandwidth */
  1646. if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
  1647. dsi_set_tx_power_mode(0, msm_host);
  1648. msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
  1649. dsi_write(msm_host, REG_DSI_CTRL,
  1650. msm_host->dma_cmd_ctrl_restore |
  1651. DSI_CTRL_CMD_MODE_EN |
  1652. DSI_CTRL_ENABLE);
  1653. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
  1654. return 0;
  1655. }
  1656. void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
  1657. const struct mipi_dsi_msg *msg)
  1658. {
  1659. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1660. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  1661. dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
  1662. dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
  1663. if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
  1664. dsi_set_tx_power_mode(1, msm_host);
  1665. /* TODO: unvote for bus bandwidth */
  1666. cfg_hnd->ops->link_clk_disable(msm_host);
  1667. pm_runtime_put_autosuspend(&msm_host->pdev->dev);
  1668. }
  1669. int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
  1670. const struct mipi_dsi_msg *msg)
  1671. {
  1672. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1673. return dsi_cmds2buf_tx(msm_host, msg);
  1674. }
  1675. int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
  1676. const struct mipi_dsi_msg *msg)
  1677. {
  1678. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1679. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  1680. int data_byte, rx_byte, dlen, end;
  1681. int short_response, diff, pkt_size, ret = 0;
  1682. char cmd;
  1683. int rlen = msg->rx_len;
  1684. u8 *buf;
  1685. if (rlen <= 2) {
  1686. short_response = 1;
  1687. pkt_size = rlen;
  1688. rx_byte = 4;
  1689. } else {
  1690. short_response = 0;
  1691. data_byte = 10; /* first read */
  1692. if (rlen < data_byte)
  1693. pkt_size = rlen;
  1694. else
  1695. pkt_size = data_byte;
  1696. rx_byte = data_byte + 6; /* 4 header + 2 crc */
  1697. }
  1698. buf = msm_host->rx_buf;
  1699. end = 0;
  1700. while (!end) {
  1701. u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
  1702. struct mipi_dsi_msg max_pkt_size_msg = {
  1703. .channel = msg->channel,
  1704. .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
  1705. .tx_len = 2,
  1706. .tx_buf = tx,
  1707. };
  1708. DBG("rlen=%d pkt_size=%d rx_byte=%d",
  1709. rlen, pkt_size, rx_byte);
  1710. ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
  1711. if (ret < 2) {
  1712. pr_err("%s: Set max pkt size failed, %d\n",
  1713. __func__, ret);
  1714. return -EINVAL;
  1715. }
  1716. if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
  1717. (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
  1718. /* Clear the RDBK_DATA registers */
  1719. dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
  1720. DSI_RDBK_DATA_CTRL_CLR);
  1721. wmb(); /* make sure the RDBK registers are cleared */
  1722. dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
  1723. wmb(); /* release cleared status before transfer */
  1724. }
  1725. ret = dsi_cmds2buf_tx(msm_host, msg);
  1726. if (ret < msg->tx_len) {
  1727. pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
  1728. return ret;
  1729. }
  1730. /*
  1731. * once cmd_dma_done interrupt received,
  1732. * return data from client is ready and stored
  1733. * at RDBK_DATA register already
  1734. * since rx fifo is 16 bytes, dcs header is kept at first loop,
  1735. * after that dcs header lost during shift into registers
  1736. */
  1737. dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
  1738. if (dlen <= 0)
  1739. return 0;
  1740. if (short_response)
  1741. break;
  1742. if (rlen <= data_byte) {
  1743. diff = data_byte - rlen;
  1744. end = 1;
  1745. } else {
  1746. diff = 0;
  1747. rlen -= data_byte;
  1748. }
  1749. if (!end) {
  1750. dlen -= 2; /* 2 crc */
  1751. dlen -= diff;
  1752. buf += dlen; /* next start position */
  1753. data_byte = 14; /* NOT first read */
  1754. if (rlen < data_byte)
  1755. pkt_size += rlen;
  1756. else
  1757. pkt_size += data_byte;
  1758. DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
  1759. }
  1760. }
  1761. /*
  1762. * For single Long read, if the requested rlen < 10,
  1763. * we need to shift the start position of rx
  1764. * data buffer to skip the bytes which are not
  1765. * updated.
  1766. */
  1767. if (pkt_size < 10 && !short_response)
  1768. buf = msm_host->rx_buf + (10 - rlen);
  1769. else
  1770. buf = msm_host->rx_buf;
  1771. cmd = buf[0];
  1772. switch (cmd) {
  1773. case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
  1774. pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
  1775. ret = 0;
  1776. break;
  1777. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
  1778. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
  1779. ret = dsi_short_read1_resp(buf, msg);
  1780. break;
  1781. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
  1782. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
  1783. ret = dsi_short_read2_resp(buf, msg);
  1784. break;
  1785. case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
  1786. case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
  1787. ret = dsi_long_read_resp(buf, msg);
  1788. break;
  1789. default:
  1790. pr_warn("%s:Invalid response cmd\n", __func__);
  1791. ret = 0;
  1792. }
  1793. return ret;
  1794. }
  1795. void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
  1796. u32 len)
  1797. {
  1798. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1799. dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
  1800. dsi_write(msm_host, REG_DSI_DMA_LEN, len);
  1801. dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
  1802. /* Make sure trigger happens */
  1803. wmb();
  1804. }
  1805. int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
  1806. struct msm_dsi_pll *src_pll)
  1807. {
  1808. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1809. struct clk *byte_clk_provider, *pixel_clk_provider;
  1810. int ret;
  1811. ret = msm_dsi_pll_get_clk_provider(src_pll,
  1812. &byte_clk_provider, &pixel_clk_provider);
  1813. if (ret) {
  1814. pr_info("%s: can't get provider from pll, don't set parent\n",
  1815. __func__);
  1816. return 0;
  1817. }
  1818. ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
  1819. if (ret) {
  1820. pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
  1821. __func__, ret);
  1822. goto exit;
  1823. }
  1824. ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
  1825. if (ret) {
  1826. pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
  1827. __func__, ret);
  1828. goto exit;
  1829. }
  1830. if (msm_host->dsi_clk_src) {
  1831. ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
  1832. if (ret) {
  1833. pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
  1834. __func__, ret);
  1835. goto exit;
  1836. }
  1837. }
  1838. if (msm_host->esc_clk_src) {
  1839. ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
  1840. if (ret) {
  1841. pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
  1842. __func__, ret);
  1843. goto exit;
  1844. }
  1845. }
  1846. exit:
  1847. return ret;
  1848. }
  1849. void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
  1850. {
  1851. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1852. DBG("");
  1853. dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
  1854. /* Make sure fully reset */
  1855. wmb();
  1856. udelay(1000);
  1857. dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
  1858. udelay(100);
  1859. }
  1860. void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
  1861. struct msm_dsi_phy_clk_request *clk_req,
  1862. bool is_dual_dsi)
  1863. {
  1864. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1865. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  1866. int ret;
  1867. ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_dual_dsi);
  1868. if (ret) {
  1869. pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
  1870. return;
  1871. }
  1872. clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
  1873. clk_req->escclk_rate = msm_host->esc_clk_rate;
  1874. }
  1875. int msm_dsi_host_enable(struct mipi_dsi_host *host)
  1876. {
  1877. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1878. dsi_op_mode_config(msm_host,
  1879. !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
  1880. /* TODO: clock should be turned off for command mode,
  1881. * and only turned on before MDP START.
  1882. * This part of code should be enabled once mdp driver support it.
  1883. */
  1884. /* if (msm_panel->mode == MSM_DSI_CMD_MODE) {
  1885. * dsi_link_clk_disable(msm_host);
  1886. * pm_runtime_put_autosuspend(&msm_host->pdev->dev);
  1887. * }
  1888. */
  1889. msm_host->enabled = true;
  1890. return 0;
  1891. }
  1892. int msm_dsi_host_disable(struct mipi_dsi_host *host)
  1893. {
  1894. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1895. msm_host->enabled = false;
  1896. dsi_op_mode_config(msm_host,
  1897. !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
  1898. /* Since we have disabled INTF, the video engine won't stop so that
  1899. * the cmd engine will be blocked.
  1900. * Reset to disable video engine so that we can send off cmd.
  1901. */
  1902. dsi_sw_reset(msm_host);
  1903. return 0;
  1904. }
  1905. static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
  1906. {
  1907. enum sfpb_ahb_arb_master_port_en en;
  1908. if (!msm_host->sfpb)
  1909. return;
  1910. en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
  1911. regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
  1912. SFPB_GPREG_MASTER_PORT_EN__MASK,
  1913. SFPB_GPREG_MASTER_PORT_EN(en));
  1914. }
  1915. int msm_dsi_host_power_on(struct mipi_dsi_host *host,
  1916. struct msm_dsi_phy_shared_timings *phy_shared_timings,
  1917. bool is_dual_dsi)
  1918. {
  1919. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1920. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  1921. int ret = 0;
  1922. mutex_lock(&msm_host->dev_mutex);
  1923. if (msm_host->power_on) {
  1924. DBG("dsi host already on");
  1925. goto unlock_ret;
  1926. }
  1927. msm_dsi_sfpb_config(msm_host, true);
  1928. ret = dsi_host_regulator_enable(msm_host);
  1929. if (ret) {
  1930. pr_err("%s:Failed to enable vregs.ret=%d\n",
  1931. __func__, ret);
  1932. goto unlock_ret;
  1933. }
  1934. pm_runtime_get_sync(&msm_host->pdev->dev);
  1935. ret = cfg_hnd->ops->link_clk_enable(msm_host);
  1936. if (ret) {
  1937. pr_err("%s: failed to enable link clocks. ret=%d\n",
  1938. __func__, ret);
  1939. goto fail_disable_reg;
  1940. }
  1941. ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
  1942. if (ret) {
  1943. pr_err("%s: failed to set pinctrl default state, %d\n",
  1944. __func__, ret);
  1945. goto fail_disable_clk;
  1946. }
  1947. dsi_timing_setup(msm_host, is_dual_dsi);
  1948. dsi_sw_reset(msm_host);
  1949. dsi_ctrl_config(msm_host, true, phy_shared_timings);
  1950. if (msm_host->disp_en_gpio)
  1951. gpiod_set_value(msm_host->disp_en_gpio, 1);
  1952. msm_host->power_on = true;
  1953. mutex_unlock(&msm_host->dev_mutex);
  1954. return 0;
  1955. fail_disable_clk:
  1956. cfg_hnd->ops->link_clk_disable(msm_host);
  1957. pm_runtime_put_autosuspend(&msm_host->pdev->dev);
  1958. fail_disable_reg:
  1959. dsi_host_regulator_disable(msm_host);
  1960. unlock_ret:
  1961. mutex_unlock(&msm_host->dev_mutex);
  1962. return ret;
  1963. }
  1964. int msm_dsi_host_power_off(struct mipi_dsi_host *host)
  1965. {
  1966. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1967. const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
  1968. mutex_lock(&msm_host->dev_mutex);
  1969. if (!msm_host->power_on) {
  1970. DBG("dsi host already off");
  1971. goto unlock_ret;
  1972. }
  1973. dsi_ctrl_config(msm_host, false, NULL);
  1974. if (msm_host->disp_en_gpio)
  1975. gpiod_set_value(msm_host->disp_en_gpio, 0);
  1976. pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
  1977. cfg_hnd->ops->link_clk_disable(msm_host);
  1978. pm_runtime_put_autosuspend(&msm_host->pdev->dev);
  1979. dsi_host_regulator_disable(msm_host);
  1980. msm_dsi_sfpb_config(msm_host, false);
  1981. DBG("-");
  1982. msm_host->power_on = false;
  1983. unlock_ret:
  1984. mutex_unlock(&msm_host->dev_mutex);
  1985. return 0;
  1986. }
  1987. int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
  1988. struct drm_display_mode *mode)
  1989. {
  1990. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  1991. if (msm_host->mode) {
  1992. drm_mode_destroy(msm_host->dev, msm_host->mode);
  1993. msm_host->mode = NULL;
  1994. }
  1995. msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
  1996. if (!msm_host->mode) {
  1997. pr_err("%s: cannot duplicate mode\n", __func__);
  1998. return -ENOMEM;
  1999. }
  2000. return 0;
  2001. }
  2002. struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
  2003. unsigned long *panel_flags)
  2004. {
  2005. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  2006. struct drm_panel *panel;
  2007. panel = of_drm_find_panel(msm_host->device_node);
  2008. if (panel_flags)
  2009. *panel_flags = msm_host->mode_flags;
  2010. return panel;
  2011. }
  2012. struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
  2013. {
  2014. struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
  2015. return of_drm_find_bridge(msm_host->device_node);
  2016. }