sdhci-msm.c 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038
  1. /*
  2. * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
  3. *
  4. * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 and
  8. * only version 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. #include <linux/module.h>
  17. #include <linux/of_device.h>
  18. #include <linux/delay.h>
  19. #include <linux/mmc/mmc.h>
  20. #include <linux/pm_runtime.h>
  21. #include <linux/slab.h>
  22. #include <linux/iopoll.h>
  23. #include <linux/regulator/consumer.h>
  24. #include "sdhci-pltfm.h"
  25. #define CORE_MCI_VERSION 0x50
  26. #define CORE_VERSION_MAJOR_SHIFT 28
  27. #define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
  28. #define CORE_VERSION_MINOR_MASK 0xff
  29. #define CORE_MCI_GENERICS 0x70
  30. #define SWITCHABLE_SIGNALING_VOLTAGE BIT(29)
  31. #define HC_MODE_EN 0x1
  32. #define CORE_POWER 0x0
  33. #define CORE_SW_RST BIT(7)
  34. #define FF_CLK_SW_RST_DIS BIT(13)
  35. #define CORE_PWRCTL_BUS_OFF BIT(0)
  36. #define CORE_PWRCTL_BUS_ON BIT(1)
  37. #define CORE_PWRCTL_IO_LOW BIT(2)
  38. #define CORE_PWRCTL_IO_HIGH BIT(3)
  39. #define CORE_PWRCTL_BUS_SUCCESS BIT(0)
  40. #define CORE_PWRCTL_IO_SUCCESS BIT(2)
  41. #define REQ_BUS_OFF BIT(0)
  42. #define REQ_BUS_ON BIT(1)
  43. #define REQ_IO_LOW BIT(2)
  44. #define REQ_IO_HIGH BIT(3)
  45. #define INT_MASK 0xf
  46. #define MAX_PHASES 16
  47. #define CORE_DLL_LOCK BIT(7)
  48. #define CORE_DDR_DLL_LOCK BIT(11)
  49. #define CORE_DLL_EN BIT(16)
  50. #define CORE_CDR_EN BIT(17)
  51. #define CORE_CK_OUT_EN BIT(18)
  52. #define CORE_CDR_EXT_EN BIT(19)
  53. #define CORE_DLL_PDN BIT(29)
  54. #define CORE_DLL_RST BIT(30)
  55. #define CORE_CMD_DAT_TRACK_SEL BIT(0)
  56. #define CORE_DDR_CAL_EN BIT(0)
  57. #define CORE_FLL_CYCLE_CNT BIT(18)
  58. #define CORE_DLL_CLOCK_DISABLE BIT(21)
  59. #define CORE_VENDOR_SPEC_POR_VAL 0xa1c
  60. #define CORE_CLK_PWRSAVE BIT(1)
  61. #define CORE_HC_MCLK_SEL_DFLT (2 << 8)
  62. #define CORE_HC_MCLK_SEL_HS400 (3 << 8)
  63. #define CORE_HC_MCLK_SEL_MASK (3 << 8)
  64. #define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
  65. #define CORE_IO_PAD_PWR_SWITCH (1 << 16)
  66. #define CORE_HC_SELECT_IN_EN BIT(18)
  67. #define CORE_HC_SELECT_IN_HS400 (6 << 19)
  68. #define CORE_HC_SELECT_IN_MASK (7 << 19)
  69. #define CORE_3_0V_SUPPORT (1 << 25)
  70. #define CORE_1_8V_SUPPORT (1 << 26)
  71. #define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT)
  72. #define CORE_CSR_CDC_CTLR_CFG0 0x130
  73. #define CORE_SW_TRIG_FULL_CALIB BIT(16)
  74. #define CORE_HW_AUTOCAL_ENA BIT(17)
  75. #define CORE_CSR_CDC_CTLR_CFG1 0x134
  76. #define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
  77. #define CORE_TIMER_ENA BIT(16)
  78. #define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
  79. #define CORE_CSR_CDC_REFCOUNT_CFG 0x140
  80. #define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
  81. #define CORE_CDC_OFFSET_CFG 0x14C
  82. #define CORE_CSR_CDC_DELAY_CFG 0x150
  83. #define CORE_CDC_SLAVE_DDA_CFG 0x160
  84. #define CORE_CSR_CDC_STATUS0 0x164
  85. #define CORE_CALIBRATION_DONE BIT(0)
  86. #define CORE_CDC_ERROR_CODE_MASK 0x7000000
  87. #define CORE_CSR_CDC_GEN_CFG 0x178
  88. #define CORE_CDC_SWITCH_BYPASS_OFF BIT(0)
  89. #define CORE_CDC_SWITCH_RC_EN BIT(1)
  90. #define CORE_CDC_T4_DLY_SEL BIT(0)
  91. #define CORE_CMDIN_RCLK_EN BIT(1)
  92. #define CORE_START_CDC_TRAFFIC BIT(6)
  93. #define CORE_PWRSAVE_DLL BIT(3)
  94. #define DDR_CONFIG_POR_VAL 0x80040873
  95. #define INVALID_TUNING_PHASE -1
  96. #define SDHCI_MSM_MIN_CLOCK 400000
  97. #define CORE_FREQ_100MHZ (100 * 1000 * 1000)
  98. #define CDR_SELEXT_SHIFT 20
  99. #define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
  100. #define CMUX_SHIFT_PHASE_SHIFT 24
  101. #define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
  102. #define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
  103. /* Timeout value to avoid infinite waiting for pwr_irq */
  104. #define MSM_PWR_IRQ_TIMEOUT_MS 5000
  105. #define msm_host_readl(msm_host, host, offset) \
  106. msm_host->var_ops->msm_readl_relaxed(host, offset)
  107. #define msm_host_writel(msm_host, val, host, offset) \
  108. msm_host->var_ops->msm_writel_relaxed(val, host, offset)
  109. struct sdhci_msm_offset {
  110. u32 core_hc_mode;
  111. u32 core_mci_data_cnt;
  112. u32 core_mci_status;
  113. u32 core_mci_fifo_cnt;
  114. u32 core_mci_version;
  115. u32 core_generics;
  116. u32 core_testbus_config;
  117. u32 core_testbus_sel2_bit;
  118. u32 core_testbus_ena;
  119. u32 core_testbus_sel2;
  120. u32 core_pwrctl_status;
  121. u32 core_pwrctl_mask;
  122. u32 core_pwrctl_clear;
  123. u32 core_pwrctl_ctl;
  124. u32 core_sdcc_debug_reg;
  125. u32 core_dll_config;
  126. u32 core_dll_status;
  127. u32 core_vendor_spec;
  128. u32 core_vendor_spec_adma_err_addr0;
  129. u32 core_vendor_spec_adma_err_addr1;
  130. u32 core_vendor_spec_func2;
  131. u32 core_vendor_spec_capabilities0;
  132. u32 core_ddr_200_cfg;
  133. u32 core_vendor_spec3;
  134. u32 core_dll_config_2;
  135. u32 core_dll_config_3;
  136. u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */
  137. u32 core_ddr_config;
  138. };
  139. static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
  140. .core_mci_data_cnt = 0x35c,
  141. .core_mci_status = 0x324,
  142. .core_mci_fifo_cnt = 0x308,
  143. .core_mci_version = 0x318,
  144. .core_generics = 0x320,
  145. .core_testbus_config = 0x32c,
  146. .core_testbus_sel2_bit = 3,
  147. .core_testbus_ena = (1 << 31),
  148. .core_testbus_sel2 = (1 << 3),
  149. .core_pwrctl_status = 0x240,
  150. .core_pwrctl_mask = 0x244,
  151. .core_pwrctl_clear = 0x248,
  152. .core_pwrctl_ctl = 0x24c,
  153. .core_sdcc_debug_reg = 0x358,
  154. .core_dll_config = 0x200,
  155. .core_dll_status = 0x208,
  156. .core_vendor_spec = 0x20c,
  157. .core_vendor_spec_adma_err_addr0 = 0x214,
  158. .core_vendor_spec_adma_err_addr1 = 0x218,
  159. .core_vendor_spec_func2 = 0x210,
  160. .core_vendor_spec_capabilities0 = 0x21c,
  161. .core_ddr_200_cfg = 0x224,
  162. .core_vendor_spec3 = 0x250,
  163. .core_dll_config_2 = 0x254,
  164. .core_dll_config_3 = 0x258,
  165. .core_ddr_config = 0x25c,
  166. };
  167. static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
  168. .core_hc_mode = 0x78,
  169. .core_mci_data_cnt = 0x30,
  170. .core_mci_status = 0x34,
  171. .core_mci_fifo_cnt = 0x44,
  172. .core_mci_version = 0x050,
  173. .core_generics = 0x70,
  174. .core_testbus_config = 0x0cc,
  175. .core_testbus_sel2_bit = 4,
  176. .core_testbus_ena = (1 << 3),
  177. .core_testbus_sel2 = (1 << 4),
  178. .core_pwrctl_status = 0xdc,
  179. .core_pwrctl_mask = 0xe0,
  180. .core_pwrctl_clear = 0xe4,
  181. .core_pwrctl_ctl = 0xe8,
  182. .core_sdcc_debug_reg = 0x124,
  183. .core_dll_config = 0x100,
  184. .core_dll_status = 0x108,
  185. .core_vendor_spec = 0x10c,
  186. .core_vendor_spec_adma_err_addr0 = 0x114,
  187. .core_vendor_spec_adma_err_addr1 = 0x118,
  188. .core_vendor_spec_func2 = 0x110,
  189. .core_vendor_spec_capabilities0 = 0x11c,
  190. .core_ddr_200_cfg = 0x184,
  191. .core_vendor_spec3 = 0x1b0,
  192. .core_dll_config_2 = 0x1b4,
  193. .core_ddr_config_old = 0x1b8,
  194. .core_ddr_config = 0x1bc,
  195. };
  196. struct sdhci_msm_variant_ops {
  197. u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset);
  198. void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host,
  199. u32 offset);
  200. };
  201. /*
  202. * From V5, register spaces have changed. Wrap this info in a structure
  203. * and choose the data_structure based on version info mentioned in DT.
  204. */
  205. struct sdhci_msm_variant_info {
  206. bool mci_removed;
  207. const struct sdhci_msm_variant_ops *var_ops;
  208. const struct sdhci_msm_offset *offset;
  209. };
  210. struct sdhci_msm_host {
  211. struct platform_device *pdev;
  212. void __iomem *core_mem; /* MSM SDCC mapped address */
  213. int pwr_irq; /* power irq */
  214. struct clk *bus_clk; /* SDHC bus voter clock */
  215. struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
  216. struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
  217. unsigned long clk_rate;
  218. struct mmc_host *mmc;
  219. bool use_14lpp_dll_reset;
  220. bool tuning_done;
  221. bool calibration_done;
  222. u8 saved_tuning_phase;
  223. bool use_cdclp533;
  224. u32 curr_pwr_state;
  225. u32 curr_io_level;
  226. wait_queue_head_t pwr_irq_wait;
  227. bool pwr_irq_flag;
  228. u32 caps_0;
  229. bool mci_removed;
  230. const struct sdhci_msm_variant_ops *var_ops;
  231. const struct sdhci_msm_offset *offset;
  232. bool use_cdr;
  233. u32 transfer_mode;
  234. bool updated_ddr_cfg;
  235. };
  236. static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
  237. {
  238. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  239. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  240. return msm_host->offset;
  241. }
  242. /*
  243. * APIs to read/write to vendor specific registers which were there in the
  244. * core_mem region before MCI was removed.
  245. */
  246. static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host,
  247. u32 offset)
  248. {
  249. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  250. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  251. return readl_relaxed(msm_host->core_mem + offset);
  252. }
  253. static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host,
  254. u32 offset)
  255. {
  256. return readl_relaxed(host->ioaddr + offset);
  257. }
  258. static void sdhci_msm_mci_variant_writel_relaxed(u32 val,
  259. struct sdhci_host *host, u32 offset)
  260. {
  261. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  262. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  263. writel_relaxed(val, msm_host->core_mem + offset);
  264. }
  265. static void sdhci_msm_v5_variant_writel_relaxed(u32 val,
  266. struct sdhci_host *host, u32 offset)
  267. {
  268. writel_relaxed(val, host->ioaddr + offset);
  269. }
  270. static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
  271. unsigned int clock)
  272. {
  273. struct mmc_ios ios = host->mmc->ios;
  274. /*
  275. * The SDHC requires internal clock frequency to be double the
  276. * actual clock that will be set for DDR mode. The controller
  277. * uses the faster clock(100/400MHz) for some of its parts and
  278. * send the actual required clock (50/200MHz) to the card.
  279. */
  280. if (ios.timing == MMC_TIMING_UHS_DDR50 ||
  281. ios.timing == MMC_TIMING_MMC_DDR52 ||
  282. ios.timing == MMC_TIMING_MMC_HS400 ||
  283. host->flags & SDHCI_HS400_TUNING)
  284. clock *= 2;
  285. return clock;
  286. }
  287. static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
  288. unsigned int clock)
  289. {
  290. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  291. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  292. struct mmc_ios curr_ios = host->mmc->ios;
  293. struct clk *core_clk = msm_host->bulk_clks[0].clk;
  294. int rc;
  295. clock = msm_get_clock_rate_for_bus_mode(host, clock);
  296. rc = clk_set_rate(core_clk, clock);
  297. if (rc) {
  298. pr_err("%s: Failed to set clock at rate %u at timing %d\n",
  299. mmc_hostname(host->mmc), clock,
  300. curr_ios.timing);
  301. return;
  302. }
  303. msm_host->clk_rate = clock;
  304. pr_debug("%s: Setting clock at rate %lu at timing %d\n",
  305. mmc_hostname(host->mmc), clk_get_rate(core_clk),
  306. curr_ios.timing);
  307. }
  308. /* Platform specific tuning */
  309. static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
  310. {
  311. u32 wait_cnt = 50;
  312. u8 ck_out_en;
  313. struct mmc_host *mmc = host->mmc;
  314. const struct sdhci_msm_offset *msm_offset =
  315. sdhci_priv_msm_offset(host);
  316. /* Poll for CK_OUT_EN bit. max. poll time = 50us */
  317. ck_out_en = !!(readl_relaxed(host->ioaddr +
  318. msm_offset->core_dll_config) & CORE_CK_OUT_EN);
  319. while (ck_out_en != poll) {
  320. if (--wait_cnt == 0) {
  321. dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
  322. mmc_hostname(mmc), poll);
  323. return -ETIMEDOUT;
  324. }
  325. udelay(1);
  326. ck_out_en = !!(readl_relaxed(host->ioaddr +
  327. msm_offset->core_dll_config) & CORE_CK_OUT_EN);
  328. }
  329. return 0;
  330. }
  331. static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
  332. {
  333. int rc;
  334. static const u8 grey_coded_phase_table[] = {
  335. 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
  336. 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
  337. };
  338. unsigned long flags;
  339. u32 config;
  340. struct mmc_host *mmc = host->mmc;
  341. const struct sdhci_msm_offset *msm_offset =
  342. sdhci_priv_msm_offset(host);
  343. if (phase > 0xf)
  344. return -EINVAL;
  345. spin_lock_irqsave(&host->lock, flags);
  346. config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
  347. config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
  348. config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
  349. writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
  350. /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
  351. rc = msm_dll_poll_ck_out_en(host, 0);
  352. if (rc)
  353. goto err_out;
  354. /*
  355. * Write the selected DLL clock output phase (0 ... 15)
  356. * to CDR_SELEXT bit field of DLL_CONFIG register.
  357. */
  358. config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
  359. config &= ~CDR_SELEXT_MASK;
  360. config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
  361. writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
  362. config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
  363. config |= CORE_CK_OUT_EN;
  364. writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
  365. /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
  366. rc = msm_dll_poll_ck_out_en(host, 1);
  367. if (rc)
  368. goto err_out;
  369. config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
  370. config |= CORE_CDR_EN;
  371. config &= ~CORE_CDR_EXT_EN;
  372. writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
  373. goto out;
  374. err_out:
  375. dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
  376. mmc_hostname(mmc), phase);
  377. out:
  378. spin_unlock_irqrestore(&host->lock, flags);
  379. return rc;
  380. }
  381. /*
  382. * Find out the greatest range of consecuitive selected
  383. * DLL clock output phases that can be used as sampling
  384. * setting for SD3.0 UHS-I card read operation (in SDR104
  385. * timing mode) or for eMMC4.5 card read operation (in
  386. * HS400/HS200 timing mode).
  387. * Select the 3/4 of the range and configure the DLL with the
  388. * selected DLL clock output phase.
  389. */
  390. static int msm_find_most_appropriate_phase(struct sdhci_host *host,
  391. u8 *phase_table, u8 total_phases)
  392. {
  393. int ret;
  394. u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
  395. u8 phases_per_row[MAX_PHASES] = { 0 };
  396. int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
  397. int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
  398. bool phase_0_found = false, phase_15_found = false;
  399. struct mmc_host *mmc = host->mmc;
  400. if (!total_phases || (total_phases > MAX_PHASES)) {
  401. dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
  402. mmc_hostname(mmc), total_phases);
  403. return -EINVAL;
  404. }
  405. for (cnt = 0; cnt < total_phases; cnt++) {
  406. ranges[row_index][col_index] = phase_table[cnt];
  407. phases_per_row[row_index] += 1;
  408. col_index++;
  409. if ((cnt + 1) == total_phases) {
  410. continue;
  411. /* check if next phase in phase_table is consecutive or not */
  412. } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
  413. row_index++;
  414. col_index = 0;
  415. }
  416. }
  417. if (row_index >= MAX_PHASES)
  418. return -EINVAL;
  419. /* Check if phase-0 is present in first valid window? */
  420. if (!ranges[0][0]) {
  421. phase_0_found = true;
  422. phase_0_raw_index = 0;
  423. /* Check if cycle exist between 2 valid windows */
  424. for (cnt = 1; cnt <= row_index; cnt++) {
  425. if (phases_per_row[cnt]) {
  426. for (i = 0; i < phases_per_row[cnt]; i++) {
  427. if (ranges[cnt][i] == 15) {
  428. phase_15_found = true;
  429. phase_15_raw_index = cnt;
  430. break;
  431. }
  432. }
  433. }
  434. }
  435. }
  436. /* If 2 valid windows form cycle then merge them as single window */
  437. if (phase_0_found && phase_15_found) {
  438. /* number of phases in raw where phase 0 is present */
  439. u8 phases_0 = phases_per_row[phase_0_raw_index];
  440. /* number of phases in raw where phase 15 is present */
  441. u8 phases_15 = phases_per_row[phase_15_raw_index];
  442. if (phases_0 + phases_15 >= MAX_PHASES)
  443. /*
  444. * If there are more than 1 phase windows then total
  445. * number of phases in both the windows should not be
  446. * more than or equal to MAX_PHASES.
  447. */
  448. return -EINVAL;
  449. /* Merge 2 cyclic windows */
  450. i = phases_15;
  451. for (cnt = 0; cnt < phases_0; cnt++) {
  452. ranges[phase_15_raw_index][i] =
  453. ranges[phase_0_raw_index][cnt];
  454. if (++i >= MAX_PHASES)
  455. break;
  456. }
  457. phases_per_row[phase_0_raw_index] = 0;
  458. phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
  459. }
  460. for (cnt = 0; cnt <= row_index; cnt++) {
  461. if (phases_per_row[cnt] > curr_max) {
  462. curr_max = phases_per_row[cnt];
  463. selected_row_index = cnt;
  464. }
  465. }
  466. i = (curr_max * 3) / 4;
  467. if (i)
  468. i--;
  469. ret = ranges[selected_row_index][i];
  470. if (ret >= MAX_PHASES) {
  471. ret = -EINVAL;
  472. dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
  473. mmc_hostname(mmc), ret);
  474. }
  475. return ret;
  476. }
  477. static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
  478. {
  479. u32 mclk_freq = 0, config;
  480. const struct sdhci_msm_offset *msm_offset =
  481. sdhci_priv_msm_offset(host);
  482. /* Program the MCLK value to MCLK_FREQ bit field */
  483. if (host->clock <= 112000000)
  484. mclk_freq = 0;
  485. else if (host->clock <= 125000000)
  486. mclk_freq = 1;
  487. else if (host->clock <= 137000000)
  488. mclk_freq = 2;
  489. else if (host->clock <= 150000000)
  490. mclk_freq = 3;
  491. else if (host->clock <= 162000000)
  492. mclk_freq = 4;
  493. else if (host->clock <= 175000000)
  494. mclk_freq = 5;
  495. else if (host->clock <= 187000000)
  496. mclk_freq = 6;
  497. else if (host->clock <= 200000000)
  498. mclk_freq = 7;
  499. config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
  500. config &= ~CMUX_SHIFT_PHASE_MASK;
  501. config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
  502. writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
  503. }
  504. /* Initialize the DLL (Programmable Delay Line) */
  505. static int msm_init_cm_dll(struct sdhci_host *host)
  506. {
  507. struct mmc_host *mmc = host->mmc;
  508. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  509. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  510. int wait_cnt = 50;
  511. unsigned long flags, xo_clk = 0;
  512. u32 config;
  513. const struct sdhci_msm_offset *msm_offset =
  514. msm_host->offset;
  515. if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk))
  516. xo_clk = clk_get_rate(msm_host->xo_clk);
  517. spin_lock_irqsave(&host->lock, flags);
  518. /*
  519. * Make sure that clock is always enabled when DLL
  520. * tuning is in progress. Keeping PWRSAVE ON may
  521. * turn off the clock.
  522. */
  523. config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
  524. config &= ~CORE_CLK_PWRSAVE;
  525. writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
  526. if (msm_host->use_14lpp_dll_reset) {
  527. config = readl_relaxed(host->ioaddr +
  528. msm_offset->core_dll_config);
  529. config &= ~CORE_CK_OUT_EN;
  530. writel_relaxed(config, host->ioaddr +
  531. msm_offset->core_dll_config);
  532. config = readl_relaxed(host->ioaddr +
  533. msm_offset->core_dll_config_2);
  534. config |= CORE_DLL_CLOCK_DISABLE;
  535. writel_relaxed(config, host->ioaddr +
  536. msm_offset->core_dll_config_2);
  537. }
  538. config = readl_relaxed(host->ioaddr +
  539. msm_offset->core_dll_config);
  540. config |= CORE_DLL_RST;
  541. writel_relaxed(config, host->ioaddr +
  542. msm_offset->core_dll_config);
  543. config = readl_relaxed(host->ioaddr +
  544. msm_offset->core_dll_config);
  545. config |= CORE_DLL_PDN;
  546. writel_relaxed(config, host->ioaddr +
  547. msm_offset->core_dll_config);
  548. msm_cm_dll_set_freq(host);
  549. if (msm_host->use_14lpp_dll_reset &&
  550. !IS_ERR_OR_NULL(msm_host->xo_clk)) {
  551. u32 mclk_freq = 0;
  552. config = readl_relaxed(host->ioaddr +
  553. msm_offset->core_dll_config_2);
  554. config &= CORE_FLL_CYCLE_CNT;
  555. if (config)
  556. mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
  557. xo_clk);
  558. else
  559. mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
  560. xo_clk);
  561. config = readl_relaxed(host->ioaddr +
  562. msm_offset->core_dll_config_2);
  563. config &= ~(0xFF << 10);
  564. config |= mclk_freq << 10;
  565. writel_relaxed(config, host->ioaddr +
  566. msm_offset->core_dll_config_2);
  567. /* wait for 5us before enabling DLL clock */
  568. udelay(5);
  569. }
  570. config = readl_relaxed(host->ioaddr +
  571. msm_offset->core_dll_config);
  572. config &= ~CORE_DLL_RST;
  573. writel_relaxed(config, host->ioaddr +
  574. msm_offset->core_dll_config);
  575. config = readl_relaxed(host->ioaddr +
  576. msm_offset->core_dll_config);
  577. config &= ~CORE_DLL_PDN;
  578. writel_relaxed(config, host->ioaddr +
  579. msm_offset->core_dll_config);
  580. if (msm_host->use_14lpp_dll_reset) {
  581. msm_cm_dll_set_freq(host);
  582. config = readl_relaxed(host->ioaddr +
  583. msm_offset->core_dll_config_2);
  584. config &= ~CORE_DLL_CLOCK_DISABLE;
  585. writel_relaxed(config, host->ioaddr +
  586. msm_offset->core_dll_config_2);
  587. }
  588. config = readl_relaxed(host->ioaddr +
  589. msm_offset->core_dll_config);
  590. config |= CORE_DLL_EN;
  591. writel_relaxed(config, host->ioaddr +
  592. msm_offset->core_dll_config);
  593. config = readl_relaxed(host->ioaddr +
  594. msm_offset->core_dll_config);
  595. config |= CORE_CK_OUT_EN;
  596. writel_relaxed(config, host->ioaddr +
  597. msm_offset->core_dll_config);
  598. /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
  599. while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) &
  600. CORE_DLL_LOCK)) {
  601. /* max. wait for 50us sec for LOCK bit to be set */
  602. if (--wait_cnt == 0) {
  603. dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
  604. mmc_hostname(mmc));
  605. spin_unlock_irqrestore(&host->lock, flags);
  606. return -ETIMEDOUT;
  607. }
  608. udelay(1);
  609. }
  610. spin_unlock_irqrestore(&host->lock, flags);
  611. return 0;
  612. }
  613. static void msm_hc_select_default(struct sdhci_host *host)
  614. {
  615. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  616. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  617. u32 config;
  618. const struct sdhci_msm_offset *msm_offset =
  619. msm_host->offset;
  620. if (!msm_host->use_cdclp533) {
  621. config = readl_relaxed(host->ioaddr +
  622. msm_offset->core_vendor_spec3);
  623. config &= ~CORE_PWRSAVE_DLL;
  624. writel_relaxed(config, host->ioaddr +
  625. msm_offset->core_vendor_spec3);
  626. }
  627. config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
  628. config &= ~CORE_HC_MCLK_SEL_MASK;
  629. config |= CORE_HC_MCLK_SEL_DFLT;
  630. writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
  631. /*
  632. * Disable HC_SELECT_IN to be able to use the UHS mode select
  633. * configuration from Host Control2 register for all other
  634. * modes.
  635. * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
  636. * in VENDOR_SPEC_FUNC
  637. */
  638. config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
  639. config &= ~CORE_HC_SELECT_IN_EN;
  640. config &= ~CORE_HC_SELECT_IN_MASK;
  641. writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
  642. /*
  643. * Make sure above writes impacting free running MCLK are completed
  644. * before changing the clk_rate at GCC.
  645. */
  646. wmb();
  647. }
  648. static void msm_hc_select_hs400(struct sdhci_host *host)
  649. {
  650. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  651. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  652. struct mmc_ios ios = host->mmc->ios;
  653. u32 config, dll_lock;
  654. int rc;
  655. const struct sdhci_msm_offset *msm_offset =
  656. msm_host->offset;
  657. /* Select the divided clock (free running MCLK/2) */
  658. config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
  659. config &= ~CORE_HC_MCLK_SEL_MASK;
  660. config |= CORE_HC_MCLK_SEL_HS400;
  661. writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
  662. /*
  663. * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
  664. * register
  665. */
  666. if ((msm_host->tuning_done || ios.enhanced_strobe) &&
  667. !msm_host->calibration_done) {
  668. config = readl_relaxed(host->ioaddr +
  669. msm_offset->core_vendor_spec);
  670. config |= CORE_HC_SELECT_IN_HS400;
  671. config |= CORE_HC_SELECT_IN_EN;
  672. writel_relaxed(config, host->ioaddr +
  673. msm_offset->core_vendor_spec);
  674. }
  675. if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
  676. /*
  677. * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
  678. * core_dll_status to be set. This should get set
  679. * within 15 us at 200 MHz.
  680. */
  681. rc = readl_relaxed_poll_timeout(host->ioaddr +
  682. msm_offset->core_dll_status,
  683. dll_lock,
  684. (dll_lock &
  685. (CORE_DLL_LOCK |
  686. CORE_DDR_DLL_LOCK)), 10,
  687. 1000);
  688. if (rc == -ETIMEDOUT)
  689. pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
  690. mmc_hostname(host->mmc), dll_lock);
  691. }
  692. /*
  693. * Make sure above writes impacting free running MCLK are completed
  694. * before changing the clk_rate at GCC.
  695. */
  696. wmb();
  697. }
  698. /*
  699. * sdhci_msm_hc_select_mode :- In general all timing modes are
  700. * controlled via UHS mode select in Host Control2 register.
  701. * eMMC specific HS200/HS400 doesn't have their respective modes
  702. * defined here, hence we use these values.
  703. *
  704. * HS200 - SDR104 (Since they both are equivalent in functionality)
  705. * HS400 - This involves multiple configurations
  706. * Initially SDR104 - when tuning is required as HS200
  707. * Then when switching to DDR @ 400MHz (HS400) we use
  708. * the vendor specific HC_SELECT_IN to control the mode.
  709. *
  710. * In addition to controlling the modes we also need to select the
  711. * correct input clock for DLL depending on the mode.
  712. *
  713. * HS400 - divided clock (free running MCLK/2)
  714. * All other modes - default (free running MCLK)
  715. */
  716. static void sdhci_msm_hc_select_mode(struct sdhci_host *host)
  717. {
  718. struct mmc_ios ios = host->mmc->ios;
  719. if (ios.timing == MMC_TIMING_MMC_HS400 ||
  720. host->flags & SDHCI_HS400_TUNING)
  721. msm_hc_select_hs400(host);
  722. else
  723. msm_hc_select_default(host);
  724. }
  725. static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
  726. {
  727. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  728. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  729. u32 config, calib_done;
  730. int ret;
  731. const struct sdhci_msm_offset *msm_offset =
  732. msm_host->offset;
  733. pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
  734. /*
  735. * Retuning in HS400 (DDR mode) will fail, just reset the
  736. * tuning block and restore the saved tuning phase.
  737. */
  738. ret = msm_init_cm_dll(host);
  739. if (ret)
  740. goto out;
  741. /* Set the selected phase in delay line hw block */
  742. ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
  743. if (ret)
  744. goto out;
  745. config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
  746. config |= CORE_CMD_DAT_TRACK_SEL;
  747. writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
  748. config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
  749. config &= ~CORE_CDC_T4_DLY_SEL;
  750. writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
  751. config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
  752. config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
  753. writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
  754. config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
  755. config |= CORE_CDC_SWITCH_RC_EN;
  756. writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
  757. config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
  758. config &= ~CORE_START_CDC_TRAFFIC;
  759. writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
  760. /* Perform CDC Register Initialization Sequence */
  761. writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
  762. writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
  763. writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
  764. writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
  765. writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
  766. writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
  767. writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
  768. writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
  769. writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
  770. /* CDC HW Calibration */
  771. config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
  772. config |= CORE_SW_TRIG_FULL_CALIB;
  773. writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
  774. config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
  775. config &= ~CORE_SW_TRIG_FULL_CALIB;
  776. writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
  777. config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
  778. config |= CORE_HW_AUTOCAL_ENA;
  779. writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
  780. config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
  781. config |= CORE_TIMER_ENA;
  782. writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
  783. ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
  784. calib_done,
  785. (calib_done & CORE_CALIBRATION_DONE),
  786. 1, 50);
  787. if (ret == -ETIMEDOUT) {
  788. pr_err("%s: %s: CDC calibration was not completed\n",
  789. mmc_hostname(host->mmc), __func__);
  790. goto out;
  791. }
  792. ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
  793. & CORE_CDC_ERROR_CODE_MASK;
  794. if (ret) {
  795. pr_err("%s: %s: CDC error code %d\n",
  796. mmc_hostname(host->mmc), __func__, ret);
  797. ret = -EINVAL;
  798. goto out;
  799. }
  800. config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
  801. config |= CORE_START_CDC_TRAFFIC;
  802. writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
  803. out:
  804. pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
  805. __func__, ret);
  806. return ret;
  807. }
  808. static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
  809. {
  810. struct mmc_host *mmc = host->mmc;
  811. u32 dll_status, config, ddr_cfg_offset;
  812. int ret;
  813. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  814. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  815. const struct sdhci_msm_offset *msm_offset =
  816. sdhci_priv_msm_offset(host);
  817. pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
  818. /*
  819. * Currently the core_ddr_config register defaults to desired
  820. * configuration on reset. Currently reprogramming the power on
  821. * reset (POR) value in case it might have been modified by
  822. * bootloaders. In the future, if this changes, then the desired
  823. * values will need to be programmed appropriately.
  824. */
  825. if (msm_host->updated_ddr_cfg)
  826. ddr_cfg_offset = msm_offset->core_ddr_config;
  827. else
  828. ddr_cfg_offset = msm_offset->core_ddr_config_old;
  829. writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset);
  830. if (mmc->ios.enhanced_strobe) {
  831. config = readl_relaxed(host->ioaddr +
  832. msm_offset->core_ddr_200_cfg);
  833. config |= CORE_CMDIN_RCLK_EN;
  834. writel_relaxed(config, host->ioaddr +
  835. msm_offset->core_ddr_200_cfg);
  836. }
  837. config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2);
  838. config |= CORE_DDR_CAL_EN;
  839. writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2);
  840. ret = readl_relaxed_poll_timeout(host->ioaddr +
  841. msm_offset->core_dll_status,
  842. dll_status,
  843. (dll_status & CORE_DDR_DLL_LOCK),
  844. 10, 1000);
  845. if (ret == -ETIMEDOUT) {
  846. pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
  847. mmc_hostname(host->mmc), __func__);
  848. goto out;
  849. }
  850. config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3);
  851. config |= CORE_PWRSAVE_DLL;
  852. writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec3);
  853. /*
  854. * Drain writebuffer to ensure above DLL calibration
  855. * and PWRSAVE DLL is enabled.
  856. */
  857. wmb();
  858. out:
  859. pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
  860. __func__, ret);
  861. return ret;
  862. }
  863. static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
  864. {
  865. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  866. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  867. struct mmc_host *mmc = host->mmc;
  868. int ret;
  869. u32 config;
  870. const struct sdhci_msm_offset *msm_offset =
  871. msm_host->offset;
  872. pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
  873. /*
  874. * Retuning in HS400 (DDR mode) will fail, just reset the
  875. * tuning block and restore the saved tuning phase.
  876. */
  877. ret = msm_init_cm_dll(host);
  878. if (ret)
  879. goto out;
  880. if (!mmc->ios.enhanced_strobe) {
  881. /* Set the selected phase in delay line hw block */
  882. ret = msm_config_cm_dll_phase(host,
  883. msm_host->saved_tuning_phase);
  884. if (ret)
  885. goto out;
  886. config = readl_relaxed(host->ioaddr +
  887. msm_offset->core_dll_config);
  888. config |= CORE_CMD_DAT_TRACK_SEL;
  889. writel_relaxed(config, host->ioaddr +
  890. msm_offset->core_dll_config);
  891. }
  892. if (msm_host->use_cdclp533)
  893. ret = sdhci_msm_cdclp533_calibration(host);
  894. else
  895. ret = sdhci_msm_cm_dll_sdc4_calibration(host);
  896. out:
  897. pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
  898. __func__, ret);
  899. return ret;
  900. }
  901. static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
  902. {
  903. const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host);
  904. u32 config, oldconfig = readl_relaxed(host->ioaddr +
  905. msm_offset->core_dll_config);
  906. config = oldconfig;
  907. if (enable) {
  908. config |= CORE_CDR_EN;
  909. config &= ~CORE_CDR_EXT_EN;
  910. } else {
  911. config &= ~CORE_CDR_EN;
  912. config |= CORE_CDR_EXT_EN;
  913. }
  914. if (config != oldconfig)
  915. writel_relaxed(config, host->ioaddr +
  916. msm_offset->core_dll_config);
  917. }
  918. static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
  919. {
  920. struct sdhci_host *host = mmc_priv(mmc);
  921. int tuning_seq_cnt = 10;
  922. u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
  923. int rc;
  924. struct mmc_ios ios = host->mmc->ios;
  925. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  926. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  927. /*
  928. * Tuning is required for SDR104, HS200 and HS400 cards and
  929. * if clock frequency is greater than 100MHz in these modes.
  930. */
  931. if (host->clock <= CORE_FREQ_100MHZ ||
  932. !(ios.timing == MMC_TIMING_MMC_HS400 ||
  933. ios.timing == MMC_TIMING_MMC_HS200 ||
  934. ios.timing == MMC_TIMING_UHS_SDR104)) {
  935. msm_host->use_cdr = false;
  936. sdhci_msm_set_cdr(host, false);
  937. return 0;
  938. }
  939. /* Clock-Data-Recovery used to dynamically adjust RX sampling point */
  940. msm_host->use_cdr = true;
  941. /*
  942. * Clear tuning_done flag before tuning to ensure proper
  943. * HS400 settings.
  944. */
  945. msm_host->tuning_done = 0;
  946. /*
  947. * For HS400 tuning in HS200 timing requires:
  948. * - select MCLK/2 in VENDOR_SPEC
  949. * - program MCLK to 400MHz (or nearest supported) in GCC
  950. */
  951. if (host->flags & SDHCI_HS400_TUNING) {
  952. sdhci_msm_hc_select_mode(host);
  953. msm_set_clock_rate_for_bus_mode(host, ios.clock);
  954. host->flags &= ~SDHCI_HS400_TUNING;
  955. }
  956. retry:
  957. /* First of all reset the tuning block */
  958. rc = msm_init_cm_dll(host);
  959. if (rc)
  960. return rc;
  961. phase = 0;
  962. do {
  963. /* Set the phase in delay line hw block */
  964. rc = msm_config_cm_dll_phase(host, phase);
  965. if (rc)
  966. return rc;
  967. msm_host->saved_tuning_phase = phase;
  968. rc = mmc_send_tuning(mmc, opcode, NULL);
  969. if (!rc) {
  970. /* Tuning is successful at this tuning point */
  971. tuned_phases[tuned_phase_cnt++] = phase;
  972. dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
  973. mmc_hostname(mmc), phase);
  974. }
  975. } while (++phase < ARRAY_SIZE(tuned_phases));
  976. if (tuned_phase_cnt) {
  977. if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) {
  978. /*
  979. * All phases valid is _almost_ as bad as no phases
  980. * valid. Probably all phases are not really reliable
  981. * but we didn't detect where the unreliable place is.
  982. * That means we'll essentially be guessing and hoping
  983. * we get a good phase. Better to try a few times.
  984. */
  985. dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n",
  986. mmc_hostname(mmc));
  987. if (--tuning_seq_cnt) {
  988. tuned_phase_cnt = 0;
  989. goto retry;
  990. }
  991. }
  992. rc = msm_find_most_appropriate_phase(host, tuned_phases,
  993. tuned_phase_cnt);
  994. if (rc < 0)
  995. return rc;
  996. else
  997. phase = rc;
  998. /*
  999. * Finally set the selected phase in delay
  1000. * line hw block.
  1001. */
  1002. rc = msm_config_cm_dll_phase(host, phase);
  1003. if (rc)
  1004. return rc;
  1005. dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
  1006. mmc_hostname(mmc), phase);
  1007. } else {
  1008. if (--tuning_seq_cnt)
  1009. goto retry;
  1010. /* Tuning failed */
  1011. dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
  1012. mmc_hostname(mmc));
  1013. rc = -EIO;
  1014. }
  1015. if (!rc)
  1016. msm_host->tuning_done = true;
  1017. return rc;
  1018. }
  1019. /*
  1020. * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation.
  1021. * This needs to be done for both tuning and enhanced_strobe mode.
  1022. * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz
  1023. * fixed feedback clock is used.
  1024. */
  1025. static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
  1026. {
  1027. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1028. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  1029. int ret;
  1030. if (host->clock > CORE_FREQ_100MHZ &&
  1031. (msm_host->tuning_done || ios->enhanced_strobe) &&
  1032. !msm_host->calibration_done) {
  1033. ret = sdhci_msm_hs400_dll_calibration(host);
  1034. if (!ret)
  1035. msm_host->calibration_done = true;
  1036. else
  1037. pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
  1038. mmc_hostname(host->mmc), ret);
  1039. }
  1040. }
  1041. static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
  1042. unsigned int uhs)
  1043. {
  1044. struct mmc_host *mmc = host->mmc;
  1045. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1046. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  1047. u16 ctrl_2;
  1048. u32 config;
  1049. const struct sdhci_msm_offset *msm_offset =
  1050. msm_host->offset;
  1051. ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  1052. /* Select Bus Speed Mode for host */
  1053. ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
  1054. switch (uhs) {
  1055. case MMC_TIMING_UHS_SDR12:
  1056. ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
  1057. break;
  1058. case MMC_TIMING_UHS_SDR25:
  1059. ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
  1060. break;
  1061. case MMC_TIMING_UHS_SDR50:
  1062. ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
  1063. break;
  1064. case MMC_TIMING_MMC_HS400:
  1065. case MMC_TIMING_MMC_HS200:
  1066. case MMC_TIMING_UHS_SDR104:
  1067. ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
  1068. break;
  1069. case MMC_TIMING_UHS_DDR50:
  1070. case MMC_TIMING_MMC_DDR52:
  1071. ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
  1072. break;
  1073. }
  1074. /*
  1075. * When clock frequency is less than 100MHz, the feedback clock must be
  1076. * provided and DLL must not be used so that tuning can be skipped. To
  1077. * provide feedback clock, the mode selection can be any value less
  1078. * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
  1079. */
  1080. if (host->clock <= CORE_FREQ_100MHZ) {
  1081. if (uhs == MMC_TIMING_MMC_HS400 ||
  1082. uhs == MMC_TIMING_MMC_HS200 ||
  1083. uhs == MMC_TIMING_UHS_SDR104)
  1084. ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
  1085. /*
  1086. * DLL is not required for clock <= 100MHz
  1087. * Thus, make sure DLL it is disabled when not required
  1088. */
  1089. config = readl_relaxed(host->ioaddr +
  1090. msm_offset->core_dll_config);
  1091. config |= CORE_DLL_RST;
  1092. writel_relaxed(config, host->ioaddr +
  1093. msm_offset->core_dll_config);
  1094. config = readl_relaxed(host->ioaddr +
  1095. msm_offset->core_dll_config);
  1096. config |= CORE_DLL_PDN;
  1097. writel_relaxed(config, host->ioaddr +
  1098. msm_offset->core_dll_config);
  1099. /*
  1100. * The DLL needs to be restored and CDCLP533 recalibrated
  1101. * when the clock frequency is set back to 400MHz.
  1102. */
  1103. msm_host->calibration_done = false;
  1104. }
  1105. dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
  1106. mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
  1107. sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
  1108. if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
  1109. sdhci_msm_hs400(host, &mmc->ios);
  1110. }
  1111. static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
  1112. {
  1113. init_waitqueue_head(&msm_host->pwr_irq_wait);
  1114. }
  1115. static inline void sdhci_msm_complete_pwr_irq_wait(
  1116. struct sdhci_msm_host *msm_host)
  1117. {
  1118. wake_up(&msm_host->pwr_irq_wait);
  1119. }
  1120. /*
  1121. * sdhci_msm_check_power_status API should be called when registers writes
  1122. * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens.
  1123. * To what state the register writes will change the IO lines should be passed
  1124. * as the argument req_type. This API will check whether the IO line's state
  1125. * is already the expected state and will wait for power irq only if
  1126. * power irq is expected to be trigerred based on the current IO line state
  1127. * and expected IO line state.
  1128. */
  1129. static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
  1130. {
  1131. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1132. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  1133. bool done = false;
  1134. u32 val = SWITCHABLE_SIGNALING_VOLTAGE;
  1135. const struct sdhci_msm_offset *msm_offset =
  1136. msm_host->offset;
  1137. pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
  1138. mmc_hostname(host->mmc), __func__, req_type,
  1139. msm_host->curr_pwr_state, msm_host->curr_io_level);
  1140. /*
  1141. * The power interrupt will not be generated for signal voltage
  1142. * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
  1143. * Since sdhci-msm-v5, this bit has been removed and SW must consider
  1144. * it as always set.
  1145. */
  1146. if (!msm_host->mci_removed)
  1147. val = msm_host_readl(msm_host, host,
  1148. msm_offset->core_generics);
  1149. if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
  1150. !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
  1151. return;
  1152. }
  1153. /*
  1154. * The IRQ for request type IO High/LOW will be generated when -
  1155. * there is a state change in 1.8V enable bit (bit 3) of
  1156. * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
  1157. * which indicates 3.3V IO voltage. So, when MMC core layer tries
  1158. * to set it to 3.3V before card detection happens, the
  1159. * IRQ doesn't get triggered as there is no state change in this bit.
  1160. * The driver already handles this case by changing the IO voltage
  1161. * level to high as part of controller power up sequence. Hence, check
  1162. * for host->pwr to handle a case where IO voltage high request is
  1163. * issued even before controller power up.
  1164. */
  1165. if ((req_type & REQ_IO_HIGH) && !host->pwr) {
  1166. pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
  1167. mmc_hostname(host->mmc), req_type);
  1168. return;
  1169. }
  1170. if ((req_type & msm_host->curr_pwr_state) ||
  1171. (req_type & msm_host->curr_io_level))
  1172. done = true;
  1173. /*
  1174. * This is needed here to handle cases where register writes will
  1175. * not change the current bus state or io level of the controller.
  1176. * In this case, no power irq will be triggerred and we should
  1177. * not wait.
  1178. */
  1179. if (!done) {
  1180. if (!wait_event_timeout(msm_host->pwr_irq_wait,
  1181. msm_host->pwr_irq_flag,
  1182. msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
  1183. dev_warn(&msm_host->pdev->dev,
  1184. "%s: pwr_irq for req: (%d) timed out\n",
  1185. mmc_hostname(host->mmc), req_type);
  1186. }
  1187. pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
  1188. __func__, req_type);
  1189. }
  1190. static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
  1191. {
  1192. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1193. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  1194. const struct sdhci_msm_offset *msm_offset =
  1195. msm_host->offset;
  1196. pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
  1197. mmc_hostname(host->mmc),
  1198. msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status),
  1199. msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask),
  1200. msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl));
  1201. }
  1202. static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
  1203. {
  1204. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1205. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  1206. u32 irq_status, irq_ack = 0;
  1207. int retry = 10;
  1208. u32 pwr_state = 0, io_level = 0;
  1209. u32 config;
  1210. const struct sdhci_msm_offset *msm_offset = msm_host->offset;
  1211. irq_status = msm_host_readl(msm_host, host,
  1212. msm_offset->core_pwrctl_status);
  1213. irq_status &= INT_MASK;
  1214. msm_host_writel(msm_host, irq_status, host,
  1215. msm_offset->core_pwrctl_clear);
  1216. /*
  1217. * There is a rare HW scenario where the first clear pulse could be
  1218. * lost when actual reset and clear/read of status register is
  1219. * happening at a time. Hence, retry for at least 10 times to make
  1220. * sure status register is cleared. Otherwise, this will result in
  1221. * a spurious power IRQ resulting in system instability.
  1222. */
  1223. while (irq_status & msm_host_readl(msm_host, host,
  1224. msm_offset->core_pwrctl_status)) {
  1225. if (retry == 0) {
  1226. pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
  1227. mmc_hostname(host->mmc), irq_status);
  1228. sdhci_msm_dump_pwr_ctrl_regs(host);
  1229. WARN_ON(1);
  1230. break;
  1231. }
  1232. msm_host_writel(msm_host, irq_status, host,
  1233. msm_offset->core_pwrctl_clear);
  1234. retry--;
  1235. udelay(10);
  1236. }
  1237. /* Handle BUS ON/OFF*/
  1238. if (irq_status & CORE_PWRCTL_BUS_ON) {
  1239. pwr_state = REQ_BUS_ON;
  1240. io_level = REQ_IO_HIGH;
  1241. irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
  1242. }
  1243. if (irq_status & CORE_PWRCTL_BUS_OFF) {
  1244. pwr_state = REQ_BUS_OFF;
  1245. io_level = REQ_IO_LOW;
  1246. irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
  1247. }
  1248. /* Handle IO LOW/HIGH */
  1249. if (irq_status & CORE_PWRCTL_IO_LOW) {
  1250. io_level = REQ_IO_LOW;
  1251. irq_ack |= CORE_PWRCTL_IO_SUCCESS;
  1252. }
  1253. if (irq_status & CORE_PWRCTL_IO_HIGH) {
  1254. io_level = REQ_IO_HIGH;
  1255. irq_ack |= CORE_PWRCTL_IO_SUCCESS;
  1256. }
  1257. /*
  1258. * The driver has to acknowledge the interrupt, switch voltages and
  1259. * report back if it succeded or not to this register. The voltage
  1260. * switches are handled by the sdhci core, so just report success.
  1261. */
  1262. msm_host_writel(msm_host, irq_ack, host,
  1263. msm_offset->core_pwrctl_ctl);
  1264. /*
  1265. * If we don't have info regarding the voltage levels supported by
  1266. * regulators, don't change the IO PAD PWR SWITCH.
  1267. */
  1268. if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
  1269. u32 new_config;
  1270. /*
  1271. * We should unset IO PAD PWR switch only if the register write
  1272. * can set IO lines high and the regulator also switches to 3 V.
  1273. * Else, we should keep the IO PAD PWR switch set.
  1274. * This is applicable to certain targets where eMMC vccq supply
  1275. * is only 1.8V. In such targets, even during REQ_IO_HIGH, the
  1276. * IO PAD PWR switch must be kept set to reflect actual
  1277. * regulator voltage. This way, during initialization of
  1278. * controllers with only 1.8V, we will set the IO PAD bit
  1279. * without waiting for a REQ_IO_LOW.
  1280. */
  1281. config = readl_relaxed(host->ioaddr +
  1282. msm_offset->core_vendor_spec);
  1283. new_config = config;
  1284. if ((io_level & REQ_IO_HIGH) &&
  1285. (msm_host->caps_0 & CORE_3_0V_SUPPORT))
  1286. new_config &= ~CORE_IO_PAD_PWR_SWITCH;
  1287. else if ((io_level & REQ_IO_LOW) ||
  1288. (msm_host->caps_0 & CORE_1_8V_SUPPORT))
  1289. new_config |= CORE_IO_PAD_PWR_SWITCH;
  1290. if (config ^ new_config)
  1291. writel_relaxed(new_config, host->ioaddr +
  1292. msm_offset->core_vendor_spec);
  1293. }
  1294. if (pwr_state)
  1295. msm_host->curr_pwr_state = pwr_state;
  1296. if (io_level)
  1297. msm_host->curr_io_level = io_level;
  1298. pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
  1299. mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
  1300. irq_ack);
  1301. }
  1302. static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
  1303. {
  1304. struct sdhci_host *host = (struct sdhci_host *)data;
  1305. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1306. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  1307. sdhci_msm_handle_pwr_irq(host, irq);
  1308. msm_host->pwr_irq_flag = 1;
  1309. sdhci_msm_complete_pwr_irq_wait(msm_host);
  1310. return IRQ_HANDLED;
  1311. }
  1312. static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
  1313. {
  1314. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1315. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  1316. struct clk *core_clk = msm_host->bulk_clks[0].clk;
  1317. return clk_round_rate(core_clk, ULONG_MAX);
  1318. }
  1319. static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
  1320. {
  1321. return SDHCI_MSM_MIN_CLOCK;
  1322. }
  1323. /**
  1324. * __sdhci_msm_set_clock - sdhci_msm clock control.
  1325. *
  1326. * Description:
  1327. * MSM controller does not use internal divider and
  1328. * instead directly control the GCC clock as per
  1329. * HW recommendation.
  1330. **/
  1331. static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
  1332. {
  1333. u16 clk;
  1334. /*
  1335. * Keep actual_clock as zero -
  1336. * - since there is no divider used so no need of having actual_clock.
  1337. * - MSM controller uses SDCLK for data timeout calculation. If
  1338. * actual_clock is zero, host->clock is taken for calculation.
  1339. */
  1340. host->mmc->actual_clock = 0;
  1341. sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
  1342. if (clock == 0)
  1343. return;
  1344. /*
  1345. * MSM controller do not use clock divider.
  1346. * Thus read SDHCI_CLOCK_CONTROL and only enable
  1347. * clock with no divider value programmed.
  1348. */
  1349. clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
  1350. sdhci_enable_clk(host, clk);
  1351. }
  1352. /* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
  1353. static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
  1354. {
  1355. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1356. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  1357. if (!clock) {
  1358. msm_host->clk_rate = clock;
  1359. goto out;
  1360. }
  1361. sdhci_msm_hc_select_mode(host);
  1362. msm_set_clock_rate_for_bus_mode(host, clock);
  1363. out:
  1364. __sdhci_msm_set_clock(host, clock);
  1365. }
  1366. /*
  1367. * Platform specific register write functions. This is so that, if any
  1368. * register write needs to be followed up by platform specific actions,
  1369. * they can be added here. These functions can go to sleep when writes
  1370. * to certain registers are done.
  1371. * These functions are relying on sdhci_set_ios not using spinlock.
  1372. */
  1373. static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
  1374. {
  1375. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1376. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  1377. u32 req_type = 0;
  1378. switch (reg) {
  1379. case SDHCI_HOST_CONTROL2:
  1380. req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
  1381. REQ_IO_HIGH;
  1382. break;
  1383. case SDHCI_SOFTWARE_RESET:
  1384. if (host->pwr && (val & SDHCI_RESET_ALL))
  1385. req_type = REQ_BUS_OFF;
  1386. break;
  1387. case SDHCI_POWER_CONTROL:
  1388. req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
  1389. break;
  1390. case SDHCI_TRANSFER_MODE:
  1391. msm_host->transfer_mode = val;
  1392. break;
  1393. case SDHCI_COMMAND:
  1394. if (!msm_host->use_cdr)
  1395. break;
  1396. if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
  1397. SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
  1398. SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
  1399. sdhci_msm_set_cdr(host, true);
  1400. else
  1401. sdhci_msm_set_cdr(host, false);
  1402. break;
  1403. }
  1404. if (req_type) {
  1405. msm_host->pwr_irq_flag = 0;
  1406. /*
  1407. * Since this register write may trigger a power irq, ensure
  1408. * all previous register writes are complete by this point.
  1409. */
  1410. mb();
  1411. }
  1412. return req_type;
  1413. }
  1414. /* This function may sleep*/
  1415. static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
  1416. {
  1417. u32 req_type = 0;
  1418. req_type = __sdhci_msm_check_write(host, val, reg);
  1419. writew_relaxed(val, host->ioaddr + reg);
  1420. if (req_type)
  1421. sdhci_msm_check_power_status(host, req_type);
  1422. }
  1423. /* This function may sleep*/
  1424. static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
  1425. {
  1426. u32 req_type = 0;
  1427. req_type = __sdhci_msm_check_write(host, val, reg);
  1428. writeb_relaxed(val, host->ioaddr + reg);
  1429. if (req_type)
  1430. sdhci_msm_check_power_status(host, req_type);
  1431. }
  1432. static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
  1433. {
  1434. struct mmc_host *mmc = msm_host->mmc;
  1435. struct regulator *supply = mmc->supply.vqmmc;
  1436. u32 caps = 0, config;
  1437. struct sdhci_host *host = mmc_priv(mmc);
  1438. const struct sdhci_msm_offset *msm_offset = msm_host->offset;
  1439. if (!IS_ERR(mmc->supply.vqmmc)) {
  1440. if (regulator_is_supported_voltage(supply, 1700000, 1950000))
  1441. caps |= CORE_1_8V_SUPPORT;
  1442. if (regulator_is_supported_voltage(supply, 2700000, 3600000))
  1443. caps |= CORE_3_0V_SUPPORT;
  1444. if (!caps)
  1445. pr_warn("%s: 1.8/3V not supported for vqmmc\n",
  1446. mmc_hostname(mmc));
  1447. }
  1448. if (caps) {
  1449. /*
  1450. * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH
  1451. * bit can be used as required later on.
  1452. */
  1453. u32 io_level = msm_host->curr_io_level;
  1454. config = readl_relaxed(host->ioaddr +
  1455. msm_offset->core_vendor_spec);
  1456. config |= CORE_IO_PAD_PWR_SWITCH_EN;
  1457. if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT))
  1458. config &= ~CORE_IO_PAD_PWR_SWITCH;
  1459. else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT))
  1460. config |= CORE_IO_PAD_PWR_SWITCH;
  1461. writel_relaxed(config,
  1462. host->ioaddr + msm_offset->core_vendor_spec);
  1463. }
  1464. msm_host->caps_0 |= caps;
  1465. pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps);
  1466. }
  1467. static const struct sdhci_msm_variant_ops mci_var_ops = {
  1468. .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed,
  1469. .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed,
  1470. };
  1471. static const struct sdhci_msm_variant_ops v5_var_ops = {
  1472. .msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed,
  1473. .msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed,
  1474. };
  1475. static const struct sdhci_msm_variant_info sdhci_msm_mci_var = {
  1476. .mci_removed = false,
  1477. .var_ops = &mci_var_ops,
  1478. .offset = &sdhci_msm_mci_offset,
  1479. };
  1480. static const struct sdhci_msm_variant_info sdhci_msm_v5_var = {
  1481. .mci_removed = true,
  1482. .var_ops = &v5_var_ops,
  1483. .offset = &sdhci_msm_v5_offset,
  1484. };
  1485. static const struct of_device_id sdhci_msm_dt_match[] = {
  1486. {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
  1487. {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
  1488. {},
  1489. };
  1490. MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
  1491. static const struct sdhci_ops sdhci_msm_ops = {
  1492. .reset = sdhci_reset,
  1493. .set_clock = sdhci_msm_set_clock,
  1494. .get_min_clock = sdhci_msm_get_min_clock,
  1495. .get_max_clock = sdhci_msm_get_max_clock,
  1496. .set_bus_width = sdhci_set_bus_width,
  1497. .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
  1498. .write_w = sdhci_msm_writew,
  1499. .write_b = sdhci_msm_writeb,
  1500. };
  1501. static const struct sdhci_pltfm_data sdhci_msm_pdata = {
  1502. .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
  1503. SDHCI_QUIRK_SINGLE_POWER_WRITE |
  1504. SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
  1505. SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
  1506. .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
  1507. .ops = &sdhci_msm_ops,
  1508. };
  1509. static int sdhci_msm_probe(struct platform_device *pdev)
  1510. {
  1511. struct sdhci_host *host;
  1512. struct sdhci_pltfm_host *pltfm_host;
  1513. struct sdhci_msm_host *msm_host;
  1514. struct resource *core_memres;
  1515. struct clk *clk;
  1516. int ret;
  1517. u16 host_version, core_minor;
  1518. u32 core_version, config;
  1519. u8 core_major;
  1520. const struct sdhci_msm_offset *msm_offset;
  1521. const struct sdhci_msm_variant_info *var_info;
  1522. host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
  1523. if (IS_ERR(host))
  1524. return PTR_ERR(host);
  1525. host->sdma_boundary = 0;
  1526. pltfm_host = sdhci_priv(host);
  1527. msm_host = sdhci_pltfm_priv(pltfm_host);
  1528. msm_host->mmc = host->mmc;
  1529. msm_host->pdev = pdev;
  1530. ret = mmc_of_parse(host->mmc);
  1531. if (ret)
  1532. goto pltfm_free;
  1533. /*
  1534. * Based on the compatible string, load the required msm host info from
  1535. * the data associated with the version info.
  1536. */
  1537. var_info = of_device_get_match_data(&pdev->dev);
  1538. msm_host->mci_removed = var_info->mci_removed;
  1539. msm_host->var_ops = var_info->var_ops;
  1540. msm_host->offset = var_info->offset;
  1541. msm_offset = msm_host->offset;
  1542. sdhci_get_of_property(pdev);
  1543. msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
  1544. /* Setup SDCC bus voter clock. */
  1545. msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
  1546. if (!IS_ERR(msm_host->bus_clk)) {
  1547. /* Vote for max. clk rate for max. performance */
  1548. ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
  1549. if (ret)
  1550. goto pltfm_free;
  1551. ret = clk_prepare_enable(msm_host->bus_clk);
  1552. if (ret)
  1553. goto pltfm_free;
  1554. }
  1555. /* Setup main peripheral bus clock */
  1556. clk = devm_clk_get(&pdev->dev, "iface");
  1557. if (IS_ERR(clk)) {
  1558. ret = PTR_ERR(clk);
  1559. dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
  1560. goto bus_clk_disable;
  1561. }
  1562. msm_host->bulk_clks[1].clk = clk;
  1563. /* Setup SDC MMC clock */
  1564. clk = devm_clk_get(&pdev->dev, "core");
  1565. if (IS_ERR(clk)) {
  1566. ret = PTR_ERR(clk);
  1567. dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
  1568. goto bus_clk_disable;
  1569. }
  1570. msm_host->bulk_clks[0].clk = clk;
  1571. /* Vote for maximum clock rate for maximum performance */
  1572. ret = clk_set_rate(clk, INT_MAX);
  1573. if (ret)
  1574. dev_warn(&pdev->dev, "core clock boost failed\n");
  1575. clk = devm_clk_get(&pdev->dev, "cal");
  1576. if (IS_ERR(clk))
  1577. clk = NULL;
  1578. msm_host->bulk_clks[2].clk = clk;
  1579. clk = devm_clk_get(&pdev->dev, "sleep");
  1580. if (IS_ERR(clk))
  1581. clk = NULL;
  1582. msm_host->bulk_clks[3].clk = clk;
  1583. ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
  1584. msm_host->bulk_clks);
  1585. if (ret)
  1586. goto bus_clk_disable;
  1587. /*
  1588. * xo clock is needed for FLL feature of cm_dll.
  1589. * In case if xo clock is not mentioned in DT, warn and proceed.
  1590. */
  1591. msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
  1592. if (IS_ERR(msm_host->xo_clk)) {
  1593. ret = PTR_ERR(msm_host->xo_clk);
  1594. dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
  1595. }
  1596. if (!msm_host->mci_removed) {
  1597. core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1598. msm_host->core_mem = devm_ioremap_resource(&pdev->dev,
  1599. core_memres);
  1600. if (IS_ERR(msm_host->core_mem)) {
  1601. ret = PTR_ERR(msm_host->core_mem);
  1602. goto clk_disable;
  1603. }
  1604. }
  1605. /* Reset the vendor spec register to power on reset state */
  1606. writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
  1607. host->ioaddr + msm_offset->core_vendor_spec);
  1608. if (!msm_host->mci_removed) {
  1609. /* Set HC_MODE_EN bit in HC_MODE register */
  1610. msm_host_writel(msm_host, HC_MODE_EN, host,
  1611. msm_offset->core_hc_mode);
  1612. config = msm_host_readl(msm_host, host,
  1613. msm_offset->core_hc_mode);
  1614. config |= FF_CLK_SW_RST_DIS;
  1615. msm_host_writel(msm_host, config, host,
  1616. msm_offset->core_hc_mode);
  1617. }
  1618. host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
  1619. dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
  1620. host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
  1621. SDHCI_VENDOR_VER_SHIFT));
  1622. core_version = msm_host_readl(msm_host, host,
  1623. msm_offset->core_mci_version);
  1624. core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
  1625. CORE_VERSION_MAJOR_SHIFT;
  1626. core_minor = core_version & CORE_VERSION_MINOR_MASK;
  1627. dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
  1628. core_version, core_major, core_minor);
  1629. if (core_major == 1 && core_minor >= 0x42)
  1630. msm_host->use_14lpp_dll_reset = true;
  1631. /*
  1632. * SDCC 5 controller with major version 1, minor version 0x34 and later
  1633. * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
  1634. */
  1635. if (core_major == 1 && core_minor < 0x34)
  1636. msm_host->use_cdclp533 = true;
  1637. /*
  1638. * Support for some capabilities is not advertised by newer
  1639. * controller versions and must be explicitly enabled.
  1640. */
  1641. if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
  1642. config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
  1643. config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
  1644. writel_relaxed(config, host->ioaddr +
  1645. msm_offset->core_vendor_spec_capabilities0);
  1646. }
  1647. if (core_major == 1 && core_minor >= 0x49)
  1648. msm_host->updated_ddr_cfg = true;
  1649. /*
  1650. * Power on reset state may trigger power irq if previous status of
  1651. * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
  1652. * interrupt in GIC, any pending power irq interrupt should be
  1653. * acknowledged. Otherwise power irq interrupt handler would be
  1654. * fired prematurely.
  1655. */
  1656. sdhci_msm_handle_pwr_irq(host, 0);
  1657. /*
  1658. * Ensure that above writes are propogated before interrupt enablement
  1659. * in GIC.
  1660. */
  1661. mb();
  1662. /* Setup IRQ for handling power/voltage tasks with PMIC */
  1663. msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
  1664. if (msm_host->pwr_irq < 0) {
  1665. dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
  1666. msm_host->pwr_irq);
  1667. ret = msm_host->pwr_irq;
  1668. goto clk_disable;
  1669. }
  1670. sdhci_msm_init_pwr_irq_wait(msm_host);
  1671. /* Enable pwr irq interrupts */
  1672. msm_host_writel(msm_host, INT_MASK, host,
  1673. msm_offset->core_pwrctl_mask);
  1674. ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
  1675. sdhci_msm_pwr_irq, IRQF_ONESHOT,
  1676. dev_name(&pdev->dev), host);
  1677. if (ret) {
  1678. dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
  1679. goto clk_disable;
  1680. }
  1681. msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
  1682. pm_runtime_get_noresume(&pdev->dev);
  1683. pm_runtime_set_active(&pdev->dev);
  1684. pm_runtime_enable(&pdev->dev);
  1685. pm_runtime_set_autosuspend_delay(&pdev->dev,
  1686. MSM_MMC_AUTOSUSPEND_DELAY_MS);
  1687. pm_runtime_use_autosuspend(&pdev->dev);
  1688. host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
  1689. ret = sdhci_add_host(host);
  1690. if (ret)
  1691. goto pm_runtime_disable;
  1692. sdhci_msm_set_regulator_caps(msm_host);
  1693. pm_runtime_mark_last_busy(&pdev->dev);
  1694. pm_runtime_put_autosuspend(&pdev->dev);
  1695. return 0;
  1696. pm_runtime_disable:
  1697. pm_runtime_disable(&pdev->dev);
  1698. pm_runtime_set_suspended(&pdev->dev);
  1699. pm_runtime_put_noidle(&pdev->dev);
  1700. clk_disable:
  1701. clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
  1702. msm_host->bulk_clks);
  1703. bus_clk_disable:
  1704. if (!IS_ERR(msm_host->bus_clk))
  1705. clk_disable_unprepare(msm_host->bus_clk);
  1706. pltfm_free:
  1707. sdhci_pltfm_free(pdev);
  1708. return ret;
  1709. }
  1710. static int sdhci_msm_remove(struct platform_device *pdev)
  1711. {
  1712. struct sdhci_host *host = platform_get_drvdata(pdev);
  1713. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1714. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  1715. int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
  1716. 0xffffffff);
  1717. sdhci_remove_host(host, dead);
  1718. pm_runtime_get_sync(&pdev->dev);
  1719. pm_runtime_disable(&pdev->dev);
  1720. pm_runtime_put_noidle(&pdev->dev);
  1721. clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
  1722. msm_host->bulk_clks);
  1723. if (!IS_ERR(msm_host->bus_clk))
  1724. clk_disable_unprepare(msm_host->bus_clk);
  1725. sdhci_pltfm_free(pdev);
  1726. return 0;
  1727. }
  1728. #ifdef CONFIG_PM
  1729. static int sdhci_msm_runtime_suspend(struct device *dev)
  1730. {
  1731. struct sdhci_host *host = dev_get_drvdata(dev);
  1732. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1733. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  1734. clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
  1735. msm_host->bulk_clks);
  1736. return 0;
  1737. }
  1738. static int sdhci_msm_runtime_resume(struct device *dev)
  1739. {
  1740. struct sdhci_host *host = dev_get_drvdata(dev);
  1741. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1742. struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
  1743. return clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
  1744. msm_host->bulk_clks);
  1745. }
  1746. #endif
  1747. static const struct dev_pm_ops sdhci_msm_pm_ops = {
  1748. SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
  1749. pm_runtime_force_resume)
  1750. SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
  1751. sdhci_msm_runtime_resume,
  1752. NULL)
  1753. };
  1754. static struct platform_driver sdhci_msm_driver = {
  1755. .probe = sdhci_msm_probe,
  1756. .remove = sdhci_msm_remove,
  1757. .driver = {
  1758. .name = "sdhci_msm",
  1759. .of_match_table = sdhci_msm_dt_match,
  1760. .pm = &sdhci_msm_pm_ops,
  1761. },
  1762. };
  1763. module_platform_driver(sdhci_msm_driver);
  1764. MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
  1765. MODULE_LICENSE("GPL v2");