meson-gx-mmc.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448
  1. /*
  2. * Amlogic SD/eMMC driver for the GX/S905 family SoCs
  3. *
  4. * Copyright (c) 2016 BayLibre, SAS.
  5. * Author: Kevin Hilman <khilman@baylibre.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of version 2 of the GNU General Public License as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. * The full GNU General Public License is included in this distribution
  19. * in the file called COPYING.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/init.h>
  24. #include <linux/delay.h>
  25. #include <linux/device.h>
  26. #include <linux/of_device.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/ioport.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/mmc/host.h>
  32. #include <linux/mmc/mmc.h>
  33. #include <linux/mmc/sdio.h>
  34. #include <linux/mmc/slot-gpio.h>
  35. #include <linux/io.h>
  36. #include <linux/clk.h>
  37. #include <linux/clk-provider.h>
  38. #include <linux/regulator/consumer.h>
  39. #include <linux/reset.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/bitfield.h>
  42. #include <linux/pinctrl/consumer.h>
  43. #define DRIVER_NAME "meson-gx-mmc"
  44. #define SD_EMMC_CLOCK 0x0
  45. #define CLK_DIV_MASK GENMASK(5, 0)
  46. #define CLK_SRC_MASK GENMASK(7, 6)
  47. #define CLK_CORE_PHASE_MASK GENMASK(9, 8)
  48. #define CLK_TX_PHASE_MASK GENMASK(11, 10)
  49. #define CLK_RX_PHASE_MASK GENMASK(13, 12)
  50. #define CLK_V2_TX_DELAY_MASK GENMASK(19, 16)
  51. #define CLK_V2_RX_DELAY_MASK GENMASK(23, 20)
  52. #define CLK_V2_ALWAYS_ON BIT(24)
  53. #define CLK_V3_TX_DELAY_MASK GENMASK(21, 16)
  54. #define CLK_V3_RX_DELAY_MASK GENMASK(27, 22)
  55. #define CLK_V3_ALWAYS_ON BIT(28)
  56. #define CLK_DELAY_STEP_PS 200
  57. #define CLK_PHASE_STEP 30
  58. #define CLK_PHASE_POINT_NUM (360 / CLK_PHASE_STEP)
  59. #define CLK_TX_DELAY_MASK(h) (h->data->tx_delay_mask)
  60. #define CLK_RX_DELAY_MASK(h) (h->data->rx_delay_mask)
  61. #define CLK_ALWAYS_ON(h) (h->data->always_on)
  62. #define SD_EMMC_DELAY 0x4
  63. #define SD_EMMC_ADJUST 0x8
  64. #define SD_EMMC_DELAY1 0x4
  65. #define SD_EMMC_DELAY2 0x8
  66. #define SD_EMMC_V3_ADJUST 0xc
  67. #define SD_EMMC_CALOUT 0x10
  68. #define SD_EMMC_START 0x40
  69. #define START_DESC_INIT BIT(0)
  70. #define START_DESC_BUSY BIT(1)
  71. #define START_DESC_ADDR_MASK GENMASK(31, 2)
  72. #define SD_EMMC_CFG 0x44
  73. #define CFG_BUS_WIDTH_MASK GENMASK(1, 0)
  74. #define CFG_BUS_WIDTH_1 0x0
  75. #define CFG_BUS_WIDTH_4 0x1
  76. #define CFG_BUS_WIDTH_8 0x2
  77. #define CFG_DDR BIT(2)
  78. #define CFG_BLK_LEN_MASK GENMASK(7, 4)
  79. #define CFG_RESP_TIMEOUT_MASK GENMASK(11, 8)
  80. #define CFG_RC_CC_MASK GENMASK(15, 12)
  81. #define CFG_STOP_CLOCK BIT(22)
  82. #define CFG_CLK_ALWAYS_ON BIT(18)
  83. #define CFG_CHK_DS BIT(20)
  84. #define CFG_AUTO_CLK BIT(23)
  85. #define CFG_ERR_ABORT BIT(27)
  86. #define SD_EMMC_STATUS 0x48
  87. #define STATUS_BUSY BIT(31)
  88. #define STATUS_DESC_BUSY BIT(30)
  89. #define STATUS_DATI GENMASK(23, 16)
  90. #define SD_EMMC_IRQ_EN 0x4c
  91. #define IRQ_RXD_ERR_MASK GENMASK(7, 0)
  92. #define IRQ_TXD_ERR BIT(8)
  93. #define IRQ_DESC_ERR BIT(9)
  94. #define IRQ_RESP_ERR BIT(10)
  95. #define IRQ_CRC_ERR \
  96. (IRQ_RXD_ERR_MASK | IRQ_TXD_ERR | IRQ_DESC_ERR | IRQ_RESP_ERR)
  97. #define IRQ_RESP_TIMEOUT BIT(11)
  98. #define IRQ_DESC_TIMEOUT BIT(12)
  99. #define IRQ_TIMEOUTS \
  100. (IRQ_RESP_TIMEOUT | IRQ_DESC_TIMEOUT)
  101. #define IRQ_END_OF_CHAIN BIT(13)
  102. #define IRQ_RESP_STATUS BIT(14)
  103. #define IRQ_SDIO BIT(15)
  104. #define IRQ_EN_MASK \
  105. (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN | IRQ_RESP_STATUS |\
  106. IRQ_SDIO)
  107. #define SD_EMMC_CMD_CFG 0x50
  108. #define SD_EMMC_CMD_ARG 0x54
  109. #define SD_EMMC_CMD_DAT 0x58
  110. #define SD_EMMC_CMD_RSP 0x5c
  111. #define SD_EMMC_CMD_RSP1 0x60
  112. #define SD_EMMC_CMD_RSP2 0x64
  113. #define SD_EMMC_CMD_RSP3 0x68
  114. #define SD_EMMC_RXD 0x94
  115. #define SD_EMMC_TXD 0x94
  116. #define SD_EMMC_LAST_REG SD_EMMC_TXD
  117. #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
  118. #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
  119. #define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */
  120. #define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */
  121. #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
  122. #define SD_EMMC_DESC_BUF_LEN PAGE_SIZE
  123. #define SD_EMMC_PRE_REQ_DONE BIT(0)
  124. #define SD_EMMC_DESC_CHAIN_MODE BIT(1)
  125. #define MUX_CLK_NUM_PARENTS 2
  126. struct meson_mmc_data {
  127. unsigned int tx_delay_mask;
  128. unsigned int rx_delay_mask;
  129. unsigned int always_on;
  130. };
  131. struct sd_emmc_desc {
  132. u32 cmd_cfg;
  133. u32 cmd_arg;
  134. u32 cmd_data;
  135. u32 cmd_resp;
  136. };
  137. struct meson_host {
  138. struct device *dev;
  139. struct meson_mmc_data *data;
  140. struct mmc_host *mmc;
  141. struct mmc_command *cmd;
  142. spinlock_t lock;
  143. void __iomem *regs;
  144. struct clk *core_clk;
  145. struct clk *mmc_clk;
  146. struct clk *rx_clk;
  147. struct clk *tx_clk;
  148. unsigned long req_rate;
  149. struct pinctrl *pinctrl;
  150. struct pinctrl_state *pins_default;
  151. struct pinctrl_state *pins_clk_gate;
  152. unsigned int bounce_buf_size;
  153. void *bounce_buf;
  154. dma_addr_t bounce_dma_addr;
  155. struct sd_emmc_desc *descs;
  156. dma_addr_t descs_dma_addr;
  157. int irq;
  158. bool vqmmc_enabled;
  159. };
  160. #define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
  161. #define CMD_CFG_BLOCK_MODE BIT(9)
  162. #define CMD_CFG_R1B BIT(10)
  163. #define CMD_CFG_END_OF_CHAIN BIT(11)
  164. #define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12)
  165. #define CMD_CFG_NO_RESP BIT(16)
  166. #define CMD_CFG_NO_CMD BIT(17)
  167. #define CMD_CFG_DATA_IO BIT(18)
  168. #define CMD_CFG_DATA_WR BIT(19)
  169. #define CMD_CFG_RESP_NOCRC BIT(20)
  170. #define CMD_CFG_RESP_128 BIT(21)
  171. #define CMD_CFG_RESP_NUM BIT(22)
  172. #define CMD_CFG_DATA_NUM BIT(23)
  173. #define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24)
  174. #define CMD_CFG_ERROR BIT(30)
  175. #define CMD_CFG_OWNER BIT(31)
  176. #define CMD_DATA_MASK GENMASK(31, 2)
  177. #define CMD_DATA_BIG_ENDIAN BIT(1)
  178. #define CMD_DATA_SRAM BIT(0)
  179. #define CMD_RESP_MASK GENMASK(31, 1)
  180. #define CMD_RESP_SRAM BIT(0)
  181. struct meson_mmc_phase {
  182. struct clk_hw hw;
  183. void __iomem *reg;
  184. unsigned long phase_mask;
  185. unsigned long delay_mask;
  186. unsigned int delay_step_ps;
  187. };
  188. #define to_meson_mmc_phase(_hw) container_of(_hw, struct meson_mmc_phase, hw)
  189. static int meson_mmc_clk_get_phase(struct clk_hw *hw)
  190. {
  191. struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw);
  192. unsigned int phase_num = 1 << hweight_long(mmc->phase_mask);
  193. unsigned long period_ps, p, d;
  194. int degrees;
  195. u32 val;
  196. val = readl(mmc->reg);
  197. p = (val & mmc->phase_mask) >> __ffs(mmc->phase_mask);
  198. degrees = p * 360 / phase_num;
  199. if (mmc->delay_mask) {
  200. period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000,
  201. clk_get_rate(hw->clk));
  202. d = (val & mmc->delay_mask) >> __ffs(mmc->delay_mask);
  203. degrees += d * mmc->delay_step_ps * 360 / period_ps;
  204. degrees %= 360;
  205. }
  206. return degrees;
  207. }
  208. static void meson_mmc_apply_phase_delay(struct meson_mmc_phase *mmc,
  209. unsigned int phase,
  210. unsigned int delay)
  211. {
  212. u32 val;
  213. val = readl(mmc->reg);
  214. val &= ~mmc->phase_mask;
  215. val |= phase << __ffs(mmc->phase_mask);
  216. if (mmc->delay_mask) {
  217. val &= ~mmc->delay_mask;
  218. val |= delay << __ffs(mmc->delay_mask);
  219. }
  220. writel(val, mmc->reg);
  221. }
  222. static int meson_mmc_clk_set_phase(struct clk_hw *hw, int degrees)
  223. {
  224. struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw);
  225. unsigned int phase_num = 1 << hweight_long(mmc->phase_mask);
  226. unsigned long period_ps, d = 0, r;
  227. uint64_t p;
  228. p = degrees % 360;
  229. if (!mmc->delay_mask) {
  230. p = DIV_ROUND_CLOSEST_ULL(p, 360 / phase_num);
  231. } else {
  232. period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000,
  233. clk_get_rate(hw->clk));
  234. /* First compute the phase index (p), the remainder (r) is the
  235. * part we'll try to acheive using the delays (d).
  236. */
  237. r = do_div(p, 360 / phase_num);
  238. d = DIV_ROUND_CLOSEST(r * period_ps,
  239. 360 * mmc->delay_step_ps);
  240. d = min(d, mmc->delay_mask >> __ffs(mmc->delay_mask));
  241. }
  242. meson_mmc_apply_phase_delay(mmc, p, d);
  243. return 0;
  244. }
  245. static const struct clk_ops meson_mmc_clk_phase_ops = {
  246. .get_phase = meson_mmc_clk_get_phase,
  247. .set_phase = meson_mmc_clk_set_phase,
  248. };
  249. static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data)
  250. {
  251. unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC;
  252. if (!timeout)
  253. return SD_EMMC_CMD_TIMEOUT_DATA;
  254. timeout = roundup_pow_of_two(timeout);
  255. return min(timeout, 32768U); /* max. 2^15 ms */
  256. }
  257. static struct mmc_command *meson_mmc_get_next_command(struct mmc_command *cmd)
  258. {
  259. if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error)
  260. return cmd->mrq->cmd;
  261. else if (mmc_op_multi(cmd->opcode) &&
  262. (!cmd->mrq->sbc || cmd->error || cmd->data->error))
  263. return cmd->mrq->stop;
  264. else
  265. return NULL;
  266. }
  267. static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
  268. struct mmc_request *mrq)
  269. {
  270. struct mmc_data *data = mrq->data;
  271. struct scatterlist *sg;
  272. int i;
  273. bool use_desc_chain_mode = true;
  274. /*
  275. * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been
  276. * reported. For some strange reason this occurs in descriptor
  277. * chain mode only. So let's fall back to bounce buffer mode
  278. * for command SD_IO_RW_EXTENDED.
  279. */
  280. if (mrq->cmd->opcode == SD_IO_RW_EXTENDED)
  281. return;
  282. for_each_sg(data->sg, sg, data->sg_len, i)
  283. /* check for 8 byte alignment */
  284. if (sg->offset & 7) {
  285. WARN_ONCE(1, "unaligned scatterlist buffer\n");
  286. use_desc_chain_mode = false;
  287. break;
  288. }
  289. if (use_desc_chain_mode)
  290. data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE;
  291. }
  292. static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data)
  293. {
  294. return data->host_cookie & SD_EMMC_DESC_CHAIN_MODE;
  295. }
  296. static inline bool meson_mmc_bounce_buf_read(const struct mmc_data *data)
  297. {
  298. return data && data->flags & MMC_DATA_READ &&
  299. !meson_mmc_desc_chain_mode(data);
  300. }
  301. static void meson_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
  302. {
  303. struct mmc_data *data = mrq->data;
  304. if (!data)
  305. return;
  306. meson_mmc_get_transfer_mode(mmc, mrq);
  307. data->host_cookie |= SD_EMMC_PRE_REQ_DONE;
  308. if (!meson_mmc_desc_chain_mode(data))
  309. return;
  310. data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
  311. mmc_get_dma_dir(data));
  312. if (!data->sg_count)
  313. dev_err(mmc_dev(mmc), "dma_map_sg failed");
  314. }
  315. static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
  316. int err)
  317. {
  318. struct mmc_data *data = mrq->data;
  319. if (data && meson_mmc_desc_chain_mode(data) && data->sg_count)
  320. dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
  321. mmc_get_dma_dir(data));
  322. }
  323. static bool meson_mmc_timing_is_ddr(struct mmc_ios *ios)
  324. {
  325. if (ios->timing == MMC_TIMING_MMC_DDR52 ||
  326. ios->timing == MMC_TIMING_UHS_DDR50 ||
  327. ios->timing == MMC_TIMING_MMC_HS400)
  328. return true;
  329. return false;
  330. }
  331. /*
  332. * Gating the clock on this controller is tricky. It seems the mmc clock
  333. * is also used by the controller. It may crash during some operation if the
  334. * clock is stopped. The safest thing to do, whenever possible, is to keep
  335. * clock running at stop it at the pad using the pinmux.
  336. */
  337. static void meson_mmc_clk_gate(struct meson_host *host)
  338. {
  339. u32 cfg;
  340. if (host->pins_clk_gate) {
  341. pinctrl_select_state(host->pinctrl, host->pins_clk_gate);
  342. } else {
  343. /*
  344. * If the pinmux is not provided - default to the classic and
  345. * unsafe method
  346. */
  347. cfg = readl(host->regs + SD_EMMC_CFG);
  348. cfg |= CFG_STOP_CLOCK;
  349. writel(cfg, host->regs + SD_EMMC_CFG);
  350. }
  351. }
  352. static void meson_mmc_clk_ungate(struct meson_host *host)
  353. {
  354. u32 cfg;
  355. if (host->pins_clk_gate)
  356. pinctrl_select_state(host->pinctrl, host->pins_default);
  357. /* Make sure the clock is not stopped in the controller */
  358. cfg = readl(host->regs + SD_EMMC_CFG);
  359. cfg &= ~CFG_STOP_CLOCK;
  360. writel(cfg, host->regs + SD_EMMC_CFG);
  361. }
  362. static int meson_mmc_clk_set(struct meson_host *host, struct mmc_ios *ios)
  363. {
  364. struct mmc_host *mmc = host->mmc;
  365. unsigned long rate = ios->clock;
  366. int ret;
  367. u32 cfg;
  368. /* DDR modes require higher module clock */
  369. if (meson_mmc_timing_is_ddr(ios))
  370. rate <<= 1;
  371. /* Same request - bail-out */
  372. if (host->req_rate == rate)
  373. return 0;
  374. /* stop clock */
  375. meson_mmc_clk_gate(host);
  376. host->req_rate = 0;
  377. if (!rate) {
  378. mmc->actual_clock = 0;
  379. /* return with clock being stopped */
  380. return 0;
  381. }
  382. /* Stop the clock during rate change to avoid glitches */
  383. cfg = readl(host->regs + SD_EMMC_CFG);
  384. cfg |= CFG_STOP_CLOCK;
  385. writel(cfg, host->regs + SD_EMMC_CFG);
  386. ret = clk_set_rate(host->mmc_clk, rate);
  387. if (ret) {
  388. dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n",
  389. rate, ret);
  390. return ret;
  391. }
  392. host->req_rate = rate;
  393. mmc->actual_clock = clk_get_rate(host->mmc_clk);
  394. /* We should report the real output frequency of the controller */
  395. if (meson_mmc_timing_is_ddr(ios))
  396. mmc->actual_clock >>= 1;
  397. dev_dbg(host->dev, "clk rate: %u Hz\n", mmc->actual_clock);
  398. if (ios->clock != mmc->actual_clock)
  399. dev_dbg(host->dev, "requested rate was %u\n", ios->clock);
  400. /* (re)start clock */
  401. meson_mmc_clk_ungate(host);
  402. return 0;
  403. }
  404. /*
  405. * The SD/eMMC IP block has an internal mux and divider used for
  406. * generating the MMC clock. Use the clock framework to create and
  407. * manage these clocks.
  408. */
  409. static int meson_mmc_clk_init(struct meson_host *host)
  410. {
  411. struct clk_init_data init;
  412. struct clk_mux *mux;
  413. struct clk_divider *div;
  414. struct meson_mmc_phase *core, *tx, *rx;
  415. struct clk *clk;
  416. char clk_name[32];
  417. int i, ret = 0;
  418. const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
  419. const char *clk_parent[1];
  420. u32 clk_reg;
  421. /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
  422. clk_reg = 0;
  423. clk_reg |= CLK_ALWAYS_ON(host);
  424. clk_reg |= CLK_DIV_MASK;
  425. writel(clk_reg, host->regs + SD_EMMC_CLOCK);
  426. /* get the mux parents */
  427. for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
  428. struct clk *clk;
  429. char name[16];
  430. snprintf(name, sizeof(name), "clkin%d", i);
  431. clk = devm_clk_get(host->dev, name);
  432. if (IS_ERR(clk)) {
  433. if (clk != ERR_PTR(-EPROBE_DEFER))
  434. dev_err(host->dev, "Missing clock %s\n", name);
  435. return PTR_ERR(clk);
  436. }
  437. mux_parent_names[i] = __clk_get_name(clk);
  438. }
  439. /* create the mux */
  440. mux = devm_kzalloc(host->dev, sizeof(*mux), GFP_KERNEL);
  441. if (!mux)
  442. return -ENOMEM;
  443. snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev));
  444. init.name = clk_name;
  445. init.ops = &clk_mux_ops;
  446. init.flags = 0;
  447. init.parent_names = mux_parent_names;
  448. init.num_parents = MUX_CLK_NUM_PARENTS;
  449. mux->reg = host->regs + SD_EMMC_CLOCK;
  450. mux->shift = __ffs(CLK_SRC_MASK);
  451. mux->mask = CLK_SRC_MASK >> mux->shift;
  452. mux->hw.init = &init;
  453. clk = devm_clk_register(host->dev, &mux->hw);
  454. if (WARN_ON(IS_ERR(clk)))
  455. return PTR_ERR(clk);
  456. /* create the divider */
  457. div = devm_kzalloc(host->dev, sizeof(*div), GFP_KERNEL);
  458. if (!div)
  459. return -ENOMEM;
  460. snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev));
  461. init.name = clk_name;
  462. init.ops = &clk_divider_ops;
  463. init.flags = CLK_SET_RATE_PARENT;
  464. clk_parent[0] = __clk_get_name(clk);
  465. init.parent_names = clk_parent;
  466. init.num_parents = 1;
  467. div->reg = host->regs + SD_EMMC_CLOCK;
  468. div->shift = __ffs(CLK_DIV_MASK);
  469. div->width = __builtin_popcountl(CLK_DIV_MASK);
  470. div->hw.init = &init;
  471. div->flags = CLK_DIVIDER_ONE_BASED;
  472. clk = devm_clk_register(host->dev, &div->hw);
  473. if (WARN_ON(IS_ERR(clk)))
  474. return PTR_ERR(clk);
  475. /* create the mmc core clock */
  476. core = devm_kzalloc(host->dev, sizeof(*core), GFP_KERNEL);
  477. if (!core)
  478. return -ENOMEM;
  479. snprintf(clk_name, sizeof(clk_name), "%s#core", dev_name(host->dev));
  480. init.name = clk_name;
  481. init.ops = &meson_mmc_clk_phase_ops;
  482. init.flags = CLK_SET_RATE_PARENT;
  483. clk_parent[0] = __clk_get_name(clk);
  484. init.parent_names = clk_parent;
  485. init.num_parents = 1;
  486. core->reg = host->regs + SD_EMMC_CLOCK;
  487. core->phase_mask = CLK_CORE_PHASE_MASK;
  488. core->hw.init = &init;
  489. host->mmc_clk = devm_clk_register(host->dev, &core->hw);
  490. if (WARN_ON(PTR_ERR_OR_ZERO(host->mmc_clk)))
  491. return PTR_ERR(host->mmc_clk);
  492. /* create the mmc tx clock */
  493. tx = devm_kzalloc(host->dev, sizeof(*tx), GFP_KERNEL);
  494. if (!tx)
  495. return -ENOMEM;
  496. snprintf(clk_name, sizeof(clk_name), "%s#tx", dev_name(host->dev));
  497. init.name = clk_name;
  498. init.ops = &meson_mmc_clk_phase_ops;
  499. init.flags = 0;
  500. clk_parent[0] = __clk_get_name(host->mmc_clk);
  501. init.parent_names = clk_parent;
  502. init.num_parents = 1;
  503. tx->reg = host->regs + SD_EMMC_CLOCK;
  504. tx->phase_mask = CLK_TX_PHASE_MASK;
  505. tx->delay_mask = CLK_TX_DELAY_MASK(host);
  506. tx->delay_step_ps = CLK_DELAY_STEP_PS;
  507. tx->hw.init = &init;
  508. host->tx_clk = devm_clk_register(host->dev, &tx->hw);
  509. if (WARN_ON(PTR_ERR_OR_ZERO(host->tx_clk)))
  510. return PTR_ERR(host->tx_clk);
  511. /* create the mmc rx clock */
  512. rx = devm_kzalloc(host->dev, sizeof(*rx), GFP_KERNEL);
  513. if (!rx)
  514. return -ENOMEM;
  515. snprintf(clk_name, sizeof(clk_name), "%s#rx", dev_name(host->dev));
  516. init.name = clk_name;
  517. init.ops = &meson_mmc_clk_phase_ops;
  518. init.flags = 0;
  519. clk_parent[0] = __clk_get_name(host->mmc_clk);
  520. init.parent_names = clk_parent;
  521. init.num_parents = 1;
  522. rx->reg = host->regs + SD_EMMC_CLOCK;
  523. rx->phase_mask = CLK_RX_PHASE_MASK;
  524. rx->delay_mask = CLK_RX_DELAY_MASK(host);
  525. rx->delay_step_ps = CLK_DELAY_STEP_PS;
  526. rx->hw.init = &init;
  527. host->rx_clk = devm_clk_register(host->dev, &rx->hw);
  528. if (WARN_ON(PTR_ERR_OR_ZERO(host->rx_clk)))
  529. return PTR_ERR(host->rx_clk);
  530. /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
  531. host->mmc->f_min = clk_round_rate(host->mmc_clk, 400000);
  532. ret = clk_set_rate(host->mmc_clk, host->mmc->f_min);
  533. if (ret)
  534. return ret;
  535. /*
  536. * Set phases : These values are mostly the datasheet recommended ones
  537. * except for the Tx phase. Datasheet recommends 180 but some cards
  538. * fail at initialisation with it. 270 works just fine, it fixes these
  539. * initialisation issues and enable eMMC DDR52 mode.
  540. */
  541. clk_set_phase(host->mmc_clk, 180);
  542. clk_set_phase(host->tx_clk, 270);
  543. clk_set_phase(host->rx_clk, 0);
  544. return clk_prepare_enable(host->mmc_clk);
  545. }
  546. static void meson_mmc_shift_map(unsigned long *map, unsigned long shift)
  547. {
  548. DECLARE_BITMAP(left, CLK_PHASE_POINT_NUM);
  549. DECLARE_BITMAP(right, CLK_PHASE_POINT_NUM);
  550. /*
  551. * shift the bitmap right and reintroduce the dropped bits on the left
  552. * of the bitmap
  553. */
  554. bitmap_shift_right(right, map, shift, CLK_PHASE_POINT_NUM);
  555. bitmap_shift_left(left, map, CLK_PHASE_POINT_NUM - shift,
  556. CLK_PHASE_POINT_NUM);
  557. bitmap_or(map, left, right, CLK_PHASE_POINT_NUM);
  558. }
  559. static void meson_mmc_find_next_region(unsigned long *map,
  560. unsigned long *start,
  561. unsigned long *stop)
  562. {
  563. *start = find_next_bit(map, CLK_PHASE_POINT_NUM, *start);
  564. *stop = find_next_zero_bit(map, CLK_PHASE_POINT_NUM, *start);
  565. }
  566. static int meson_mmc_find_tuning_point(unsigned long *test)
  567. {
  568. unsigned long shift, stop, offset = 0, start = 0, size = 0;
  569. /* Get the all good/all bad situation out the way */
  570. if (bitmap_full(test, CLK_PHASE_POINT_NUM))
  571. return 0; /* All points are good so point 0 will do */
  572. else if (bitmap_empty(test, CLK_PHASE_POINT_NUM))
  573. return -EIO; /* No successful tuning point */
  574. /*
  575. * Now we know there is a least one region find. Make sure it does
  576. * not wrap by the shifting the bitmap if necessary
  577. */
  578. shift = find_first_zero_bit(test, CLK_PHASE_POINT_NUM);
  579. if (shift != 0)
  580. meson_mmc_shift_map(test, shift);
  581. while (start < CLK_PHASE_POINT_NUM) {
  582. meson_mmc_find_next_region(test, &start, &stop);
  583. if ((stop - start) > size) {
  584. offset = start;
  585. size = stop - start;
  586. }
  587. start = stop;
  588. }
  589. /* Get the center point of the region */
  590. offset += (size / 2);
  591. /* Shift the result back */
  592. offset = (offset + shift) % CLK_PHASE_POINT_NUM;
  593. return offset;
  594. }
  595. static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
  596. struct clk *clk)
  597. {
  598. int point, ret;
  599. DECLARE_BITMAP(test, CLK_PHASE_POINT_NUM);
  600. dev_dbg(mmc_dev(mmc), "%s phase/delay tunning...\n",
  601. __clk_get_name(clk));
  602. bitmap_zero(test, CLK_PHASE_POINT_NUM);
  603. /* Explore tuning points */
  604. for (point = 0; point < CLK_PHASE_POINT_NUM; point++) {
  605. clk_set_phase(clk, point * CLK_PHASE_STEP);
  606. ret = mmc_send_tuning(mmc, opcode, NULL);
  607. if (!ret)
  608. set_bit(point, test);
  609. }
  610. /* Find the optimal tuning point and apply it */
  611. point = meson_mmc_find_tuning_point(test);
  612. if (point < 0)
  613. return point; /* tuning failed */
  614. clk_set_phase(clk, point * CLK_PHASE_STEP);
  615. dev_dbg(mmc_dev(mmc), "success with phase: %d\n",
  616. clk_get_phase(clk));
  617. return 0;
  618. }
  619. static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
  620. {
  621. struct meson_host *host = mmc_priv(mmc);
  622. return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
  623. }
  624. static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  625. {
  626. struct meson_host *host = mmc_priv(mmc);
  627. u32 bus_width, val;
  628. int err;
  629. /*
  630. * GPIO regulator, only controls switching between 1v8 and
  631. * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON.
  632. */
  633. switch (ios->power_mode) {
  634. case MMC_POWER_OFF:
  635. if (!IS_ERR(mmc->supply.vmmc))
  636. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
  637. if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
  638. regulator_disable(mmc->supply.vqmmc);
  639. host->vqmmc_enabled = false;
  640. }
  641. break;
  642. case MMC_POWER_UP:
  643. if (!IS_ERR(mmc->supply.vmmc))
  644. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
  645. /* Reset rx phase */
  646. clk_set_phase(host->rx_clk, 0);
  647. break;
  648. case MMC_POWER_ON:
  649. if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
  650. int ret = regulator_enable(mmc->supply.vqmmc);
  651. if (ret < 0)
  652. dev_err(host->dev,
  653. "failed to enable vqmmc regulator\n");
  654. else
  655. host->vqmmc_enabled = true;
  656. }
  657. break;
  658. }
  659. /* Bus width */
  660. switch (ios->bus_width) {
  661. case MMC_BUS_WIDTH_1:
  662. bus_width = CFG_BUS_WIDTH_1;
  663. break;
  664. case MMC_BUS_WIDTH_4:
  665. bus_width = CFG_BUS_WIDTH_4;
  666. break;
  667. case MMC_BUS_WIDTH_8:
  668. bus_width = CFG_BUS_WIDTH_8;
  669. break;
  670. default:
  671. dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n",
  672. ios->bus_width);
  673. bus_width = CFG_BUS_WIDTH_4;
  674. }
  675. val = readl(host->regs + SD_EMMC_CFG);
  676. val &= ~CFG_BUS_WIDTH_MASK;
  677. val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width);
  678. val &= ~CFG_DDR;
  679. if (meson_mmc_timing_is_ddr(ios))
  680. val |= CFG_DDR;
  681. val &= ~CFG_CHK_DS;
  682. if (ios->timing == MMC_TIMING_MMC_HS400)
  683. val |= CFG_CHK_DS;
  684. err = meson_mmc_clk_set(host, ios);
  685. if (err)
  686. dev_err(host->dev, "Failed to set clock: %d\n,", err);
  687. writel(val, host->regs + SD_EMMC_CFG);
  688. dev_dbg(host->dev, "SD_EMMC_CFG: 0x%08x\n", val);
  689. }
  690. static void meson_mmc_request_done(struct mmc_host *mmc,
  691. struct mmc_request *mrq)
  692. {
  693. struct meson_host *host = mmc_priv(mmc);
  694. host->cmd = NULL;
  695. mmc_request_done(host->mmc, mrq);
  696. }
  697. static void meson_mmc_set_blksz(struct mmc_host *mmc, unsigned int blksz)
  698. {
  699. struct meson_host *host = mmc_priv(mmc);
  700. u32 cfg, blksz_old;
  701. cfg = readl(host->regs + SD_EMMC_CFG);
  702. blksz_old = FIELD_GET(CFG_BLK_LEN_MASK, cfg);
  703. if (!is_power_of_2(blksz))
  704. dev_err(host->dev, "blksz %u is not a power of 2\n", blksz);
  705. blksz = ilog2(blksz);
  706. /* check if block-size matches, if not update */
  707. if (blksz == blksz_old)
  708. return;
  709. dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__,
  710. blksz_old, blksz);
  711. cfg &= ~CFG_BLK_LEN_MASK;
  712. cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, blksz);
  713. writel(cfg, host->regs + SD_EMMC_CFG);
  714. }
  715. static void meson_mmc_set_response_bits(struct mmc_command *cmd, u32 *cmd_cfg)
  716. {
  717. if (cmd->flags & MMC_RSP_PRESENT) {
  718. if (cmd->flags & MMC_RSP_136)
  719. *cmd_cfg |= CMD_CFG_RESP_128;
  720. *cmd_cfg |= CMD_CFG_RESP_NUM;
  721. if (!(cmd->flags & MMC_RSP_CRC))
  722. *cmd_cfg |= CMD_CFG_RESP_NOCRC;
  723. if (cmd->flags & MMC_RSP_BUSY)
  724. *cmd_cfg |= CMD_CFG_R1B;
  725. } else {
  726. *cmd_cfg |= CMD_CFG_NO_RESP;
  727. }
  728. }
  729. static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
  730. {
  731. struct meson_host *host = mmc_priv(mmc);
  732. struct sd_emmc_desc *desc = host->descs;
  733. struct mmc_data *data = host->cmd->data;
  734. struct scatterlist *sg;
  735. u32 start;
  736. int i;
  737. if (data->flags & MMC_DATA_WRITE)
  738. cmd_cfg |= CMD_CFG_DATA_WR;
  739. if (data->blocks > 1) {
  740. cmd_cfg |= CMD_CFG_BLOCK_MODE;
  741. meson_mmc_set_blksz(mmc, data->blksz);
  742. }
  743. for_each_sg(data->sg, sg, data->sg_count, i) {
  744. unsigned int len = sg_dma_len(sg);
  745. if (data->blocks > 1)
  746. len /= data->blksz;
  747. desc[i].cmd_cfg = cmd_cfg;
  748. desc[i].cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, len);
  749. if (i > 0)
  750. desc[i].cmd_cfg |= CMD_CFG_NO_CMD;
  751. desc[i].cmd_arg = host->cmd->arg;
  752. desc[i].cmd_resp = 0;
  753. desc[i].cmd_data = sg_dma_address(sg);
  754. }
  755. desc[data->sg_count - 1].cmd_cfg |= CMD_CFG_END_OF_CHAIN;
  756. dma_wmb(); /* ensure descriptor is written before kicked */
  757. start = host->descs_dma_addr | START_DESC_BUSY;
  758. writel(start, host->regs + SD_EMMC_START);
  759. }
  760. static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
  761. {
  762. struct meson_host *host = mmc_priv(mmc);
  763. struct mmc_data *data = cmd->data;
  764. u32 cmd_cfg = 0, cmd_data = 0;
  765. unsigned int xfer_bytes = 0;
  766. /* Setup descriptors */
  767. dma_rmb();
  768. host->cmd = cmd;
  769. cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
  770. cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
  771. cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */
  772. meson_mmc_set_response_bits(cmd, &cmd_cfg);
  773. /* data? */
  774. if (data) {
  775. data->bytes_xfered = 0;
  776. cmd_cfg |= CMD_CFG_DATA_IO;
  777. cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
  778. ilog2(meson_mmc_get_timeout_msecs(data)));
  779. if (meson_mmc_desc_chain_mode(data)) {
  780. meson_mmc_desc_chain_transfer(mmc, cmd_cfg);
  781. return;
  782. }
  783. if (data->blocks > 1) {
  784. cmd_cfg |= CMD_CFG_BLOCK_MODE;
  785. cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK,
  786. data->blocks);
  787. meson_mmc_set_blksz(mmc, data->blksz);
  788. } else {
  789. cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, data->blksz);
  790. }
  791. xfer_bytes = data->blksz * data->blocks;
  792. if (data->flags & MMC_DATA_WRITE) {
  793. cmd_cfg |= CMD_CFG_DATA_WR;
  794. WARN_ON(xfer_bytes > host->bounce_buf_size);
  795. sg_copy_to_buffer(data->sg, data->sg_len,
  796. host->bounce_buf, xfer_bytes);
  797. dma_wmb();
  798. }
  799. cmd_data = host->bounce_dma_addr & CMD_DATA_MASK;
  800. } else {
  801. cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
  802. ilog2(SD_EMMC_CMD_TIMEOUT));
  803. }
  804. /* Last descriptor */
  805. cmd_cfg |= CMD_CFG_END_OF_CHAIN;
  806. writel(cmd_cfg, host->regs + SD_EMMC_CMD_CFG);
  807. writel(cmd_data, host->regs + SD_EMMC_CMD_DAT);
  808. writel(0, host->regs + SD_EMMC_CMD_RSP);
  809. wmb(); /* ensure descriptor is written before kicked */
  810. writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
  811. }
  812. static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
  813. {
  814. struct meson_host *host = mmc_priv(mmc);
  815. bool needs_pre_post_req = mrq->data &&
  816. !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
  817. if (needs_pre_post_req) {
  818. meson_mmc_get_transfer_mode(mmc, mrq);
  819. if (!meson_mmc_desc_chain_mode(mrq->data))
  820. needs_pre_post_req = false;
  821. }
  822. if (needs_pre_post_req)
  823. meson_mmc_pre_req(mmc, mrq);
  824. /* Stop execution */
  825. writel(0, host->regs + SD_EMMC_START);
  826. meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd);
  827. if (needs_pre_post_req)
  828. meson_mmc_post_req(mmc, mrq, 0);
  829. }
  830. static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
  831. {
  832. struct meson_host *host = mmc_priv(mmc);
  833. if (cmd->flags & MMC_RSP_136) {
  834. cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3);
  835. cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2);
  836. cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1);
  837. cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP);
  838. } else if (cmd->flags & MMC_RSP_PRESENT) {
  839. cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP);
  840. }
  841. }
  842. static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
  843. {
  844. struct meson_host *host = dev_id;
  845. struct mmc_command *cmd;
  846. struct mmc_data *data;
  847. u32 irq_en, status, raw_status;
  848. irqreturn_t ret = IRQ_NONE;
  849. irq_en = readl(host->regs + SD_EMMC_IRQ_EN);
  850. raw_status = readl(host->regs + SD_EMMC_STATUS);
  851. status = raw_status & irq_en;
  852. if (!status) {
  853. dev_dbg(host->dev,
  854. "Unexpected IRQ! irq_en 0x%08x - status 0x%08x\n",
  855. irq_en, raw_status);
  856. return IRQ_NONE;
  857. }
  858. if (WARN_ON(!host) || WARN_ON(!host->cmd))
  859. return IRQ_NONE;
  860. spin_lock(&host->lock);
  861. cmd = host->cmd;
  862. data = cmd->data;
  863. cmd->error = 0;
  864. if (status & IRQ_CRC_ERR) {
  865. dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status);
  866. cmd->error = -EILSEQ;
  867. ret = IRQ_WAKE_THREAD;
  868. goto out;
  869. }
  870. if (status & IRQ_TIMEOUTS) {
  871. dev_dbg(host->dev, "Timeout - status 0x%08x\n", status);
  872. cmd->error = -ETIMEDOUT;
  873. ret = IRQ_WAKE_THREAD;
  874. goto out;
  875. }
  876. meson_mmc_read_resp(host->mmc, cmd);
  877. if (status & IRQ_SDIO) {
  878. dev_dbg(host->dev, "IRQ: SDIO TODO.\n");
  879. ret = IRQ_HANDLED;
  880. }
  881. if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) {
  882. if (data && !cmd->error)
  883. data->bytes_xfered = data->blksz * data->blocks;
  884. if (meson_mmc_bounce_buf_read(data) ||
  885. meson_mmc_get_next_command(cmd))
  886. ret = IRQ_WAKE_THREAD;
  887. else
  888. ret = IRQ_HANDLED;
  889. }
  890. out:
  891. /* ack all enabled interrupts */
  892. writel(irq_en, host->regs + SD_EMMC_STATUS);
  893. if (cmd->error) {
  894. /* Stop desc in case of errors */
  895. u32 start = readl(host->regs + SD_EMMC_START);
  896. start &= ~START_DESC_BUSY;
  897. writel(start, host->regs + SD_EMMC_START);
  898. }
  899. if (ret == IRQ_HANDLED)
  900. meson_mmc_request_done(host->mmc, cmd->mrq);
  901. spin_unlock(&host->lock);
  902. return ret;
  903. }
  904. static int meson_mmc_wait_desc_stop(struct meson_host *host)
  905. {
  906. int loop;
  907. u32 status;
  908. /*
  909. * It may sometimes take a while for it to actually halt. Here, we
  910. * are giving it 5ms to comply
  911. *
  912. * If we don't confirm the descriptor is stopped, it might raise new
  913. * IRQs after we have called mmc_request_done() which is bad.
  914. */
  915. for (loop = 50; loop; loop--) {
  916. status = readl(host->regs + SD_EMMC_STATUS);
  917. if (status & (STATUS_BUSY | STATUS_DESC_BUSY))
  918. udelay(100);
  919. else
  920. break;
  921. }
  922. if (status & (STATUS_BUSY | STATUS_DESC_BUSY)) {
  923. dev_err(host->dev, "Timed out waiting for host to stop\n");
  924. return -ETIMEDOUT;
  925. }
  926. return 0;
  927. }
  928. static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
  929. {
  930. struct meson_host *host = dev_id;
  931. struct mmc_command *next_cmd, *cmd = host->cmd;
  932. struct mmc_data *data;
  933. unsigned int xfer_bytes;
  934. if (WARN_ON(!cmd))
  935. return IRQ_NONE;
  936. if (cmd->error) {
  937. meson_mmc_wait_desc_stop(host);
  938. meson_mmc_request_done(host->mmc, cmd->mrq);
  939. return IRQ_HANDLED;
  940. }
  941. data = cmd->data;
  942. if (meson_mmc_bounce_buf_read(data)) {
  943. xfer_bytes = data->blksz * data->blocks;
  944. WARN_ON(xfer_bytes > host->bounce_buf_size);
  945. sg_copy_from_buffer(data->sg, data->sg_len,
  946. host->bounce_buf, xfer_bytes);
  947. }
  948. next_cmd = meson_mmc_get_next_command(cmd);
  949. if (next_cmd)
  950. meson_mmc_start_cmd(host->mmc, next_cmd);
  951. else
  952. meson_mmc_request_done(host->mmc, cmd->mrq);
  953. return IRQ_HANDLED;
  954. }
  955. /*
  956. * NOTE: we only need this until the GPIO/pinctrl driver can handle
  957. * interrupts. For now, the MMC core will use this for polling.
  958. */
  959. static int meson_mmc_get_cd(struct mmc_host *mmc)
  960. {
  961. int status = mmc_gpio_get_cd(mmc);
  962. if (status == -ENOSYS)
  963. return 1; /* assume present */
  964. return status;
  965. }
  966. static void meson_mmc_cfg_init(struct meson_host *host)
  967. {
  968. u32 cfg = 0;
  969. cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK,
  970. ilog2(SD_EMMC_CFG_RESP_TIMEOUT));
  971. cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP));
  972. cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE));
  973. /* abort chain on R/W errors */
  974. cfg |= CFG_ERR_ABORT;
  975. writel(cfg, host->regs + SD_EMMC_CFG);
  976. }
  977. static int meson_mmc_card_busy(struct mmc_host *mmc)
  978. {
  979. struct meson_host *host = mmc_priv(mmc);
  980. u32 regval;
  981. regval = readl(host->regs + SD_EMMC_STATUS);
  982. /* We are only interrested in lines 0 to 3, so mask the other ones */
  983. return !(FIELD_GET(STATUS_DATI, regval) & 0xf);
  984. }
  985. static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
  986. {
  987. /* vqmmc regulator is available */
  988. if (!IS_ERR(mmc->supply.vqmmc)) {
  989. /*
  990. * The usual amlogic setup uses a GPIO to switch from one
  991. * regulator to the other. While the voltage ramp up is
  992. * pretty fast, care must be taken when switching from 3.3v
  993. * to 1.8v. Please make sure the regulator framework is aware
  994. * of your own regulator constraints
  995. */
  996. return mmc_regulator_set_vqmmc(mmc, ios);
  997. }
  998. /* no vqmmc regulator, assume fixed regulator at 3/3.3V */
  999. if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
  1000. return 0;
  1001. return -EINVAL;
  1002. }
  1003. static const struct mmc_host_ops meson_mmc_ops = {
  1004. .request = meson_mmc_request,
  1005. .set_ios = meson_mmc_set_ios,
  1006. .get_cd = meson_mmc_get_cd,
  1007. .pre_req = meson_mmc_pre_req,
  1008. .post_req = meson_mmc_post_req,
  1009. .execute_tuning = meson_mmc_execute_tuning,
  1010. .card_busy = meson_mmc_card_busy,
  1011. .start_signal_voltage_switch = meson_mmc_voltage_switch,
  1012. };
  1013. static int meson_mmc_probe(struct platform_device *pdev)
  1014. {
  1015. struct resource *res;
  1016. struct meson_host *host;
  1017. struct mmc_host *mmc;
  1018. int ret;
  1019. mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
  1020. if (!mmc)
  1021. return -ENOMEM;
  1022. host = mmc_priv(mmc);
  1023. host->mmc = mmc;
  1024. host->dev = &pdev->dev;
  1025. dev_set_drvdata(&pdev->dev, host);
  1026. spin_lock_init(&host->lock);
  1027. /* Get regulators and the supported OCR mask */
  1028. host->vqmmc_enabled = false;
  1029. ret = mmc_regulator_get_supply(mmc);
  1030. if (ret)
  1031. goto free_host;
  1032. ret = mmc_of_parse(mmc);
  1033. if (ret) {
  1034. if (ret != -EPROBE_DEFER)
  1035. dev_warn(&pdev->dev, "error parsing DT: %d\n", ret);
  1036. goto free_host;
  1037. }
  1038. host->data = (struct meson_mmc_data *)
  1039. of_device_get_match_data(&pdev->dev);
  1040. if (!host->data) {
  1041. ret = -EINVAL;
  1042. goto free_host;
  1043. }
  1044. ret = device_reset_optional(&pdev->dev);
  1045. if (ret) {
  1046. if (ret != -EPROBE_DEFER)
  1047. dev_err(&pdev->dev, "device reset failed: %d\n", ret);
  1048. return ret;
  1049. }
  1050. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1051. host->regs = devm_ioremap_resource(&pdev->dev, res);
  1052. if (IS_ERR(host->regs)) {
  1053. ret = PTR_ERR(host->regs);
  1054. goto free_host;
  1055. }
  1056. host->irq = platform_get_irq(pdev, 0);
  1057. if (host->irq <= 0) {
  1058. dev_err(&pdev->dev, "failed to get interrupt resource.\n");
  1059. ret = -EINVAL;
  1060. goto free_host;
  1061. }
  1062. host->pinctrl = devm_pinctrl_get(&pdev->dev);
  1063. if (IS_ERR(host->pinctrl)) {
  1064. ret = PTR_ERR(host->pinctrl);
  1065. goto free_host;
  1066. }
  1067. host->pins_default = pinctrl_lookup_state(host->pinctrl,
  1068. PINCTRL_STATE_DEFAULT);
  1069. if (IS_ERR(host->pins_default)) {
  1070. ret = PTR_ERR(host->pins_default);
  1071. goto free_host;
  1072. }
  1073. host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl,
  1074. "clk-gate");
  1075. if (IS_ERR(host->pins_clk_gate)) {
  1076. dev_warn(&pdev->dev,
  1077. "can't get clk-gate pinctrl, using clk_stop bit\n");
  1078. host->pins_clk_gate = NULL;
  1079. }
  1080. host->core_clk = devm_clk_get(&pdev->dev, "core");
  1081. if (IS_ERR(host->core_clk)) {
  1082. ret = PTR_ERR(host->core_clk);
  1083. goto free_host;
  1084. }
  1085. ret = clk_prepare_enable(host->core_clk);
  1086. if (ret)
  1087. goto free_host;
  1088. ret = meson_mmc_clk_init(host);
  1089. if (ret)
  1090. goto err_core_clk;
  1091. /* set config to sane default */
  1092. meson_mmc_cfg_init(host);
  1093. /* Stop execution */
  1094. writel(0, host->regs + SD_EMMC_START);
  1095. /* clear, ack and enable interrupts */
  1096. writel(0, host->regs + SD_EMMC_IRQ_EN);
  1097. writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
  1098. host->regs + SD_EMMC_STATUS);
  1099. writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
  1100. host->regs + SD_EMMC_IRQ_EN);
  1101. ret = request_threaded_irq(host->irq, meson_mmc_irq,
  1102. meson_mmc_irq_thread, IRQF_SHARED,
  1103. dev_name(&pdev->dev), host);
  1104. if (ret)
  1105. goto err_init_clk;
  1106. mmc->caps |= MMC_CAP_CMD23;
  1107. mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
  1108. mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
  1109. mmc->max_segs = SD_EMMC_DESC_BUF_LEN / sizeof(struct sd_emmc_desc);
  1110. mmc->max_seg_size = mmc->max_req_size;
  1111. /* data bounce buffer */
  1112. host->bounce_buf_size = mmc->max_req_size;
  1113. host->bounce_buf =
  1114. dma_alloc_coherent(host->dev, host->bounce_buf_size,
  1115. &host->bounce_dma_addr, GFP_KERNEL);
  1116. if (host->bounce_buf == NULL) {
  1117. dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
  1118. ret = -ENOMEM;
  1119. goto err_free_irq;
  1120. }
  1121. host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
  1122. &host->descs_dma_addr, GFP_KERNEL);
  1123. if (!host->descs) {
  1124. dev_err(host->dev, "Allocating descriptor DMA buffer failed\n");
  1125. ret = -ENOMEM;
  1126. goto err_bounce_buf;
  1127. }
  1128. mmc->ops = &meson_mmc_ops;
  1129. mmc_add_host(mmc);
  1130. return 0;
  1131. err_bounce_buf:
  1132. dma_free_coherent(host->dev, host->bounce_buf_size,
  1133. host->bounce_buf, host->bounce_dma_addr);
  1134. err_free_irq:
  1135. free_irq(host->irq, host);
  1136. err_init_clk:
  1137. clk_disable_unprepare(host->mmc_clk);
  1138. err_core_clk:
  1139. clk_disable_unprepare(host->core_clk);
  1140. free_host:
  1141. mmc_free_host(mmc);
  1142. return ret;
  1143. }
  1144. static int meson_mmc_remove(struct platform_device *pdev)
  1145. {
  1146. struct meson_host *host = dev_get_drvdata(&pdev->dev);
  1147. mmc_remove_host(host->mmc);
  1148. /* disable interrupts */
  1149. writel(0, host->regs + SD_EMMC_IRQ_EN);
  1150. free_irq(host->irq, host);
  1151. dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
  1152. host->descs, host->descs_dma_addr);
  1153. dma_free_coherent(host->dev, host->bounce_buf_size,
  1154. host->bounce_buf, host->bounce_dma_addr);
  1155. clk_disable_unprepare(host->mmc_clk);
  1156. clk_disable_unprepare(host->core_clk);
  1157. mmc_free_host(host->mmc);
  1158. return 0;
  1159. }
  1160. static const struct meson_mmc_data meson_gx_data = {
  1161. .tx_delay_mask = CLK_V2_TX_DELAY_MASK,
  1162. .rx_delay_mask = CLK_V2_RX_DELAY_MASK,
  1163. .always_on = CLK_V2_ALWAYS_ON,
  1164. };
  1165. static const struct meson_mmc_data meson_axg_data = {
  1166. .tx_delay_mask = CLK_V3_TX_DELAY_MASK,
  1167. .rx_delay_mask = CLK_V3_RX_DELAY_MASK,
  1168. .always_on = CLK_V3_ALWAYS_ON,
  1169. };
  1170. static const struct of_device_id meson_mmc_of_match[] = {
  1171. { .compatible = "amlogic,meson-gx-mmc", .data = &meson_gx_data },
  1172. { .compatible = "amlogic,meson-gxbb-mmc", .data = &meson_gx_data },
  1173. { .compatible = "amlogic,meson-gxl-mmc", .data = &meson_gx_data },
  1174. { .compatible = "amlogic,meson-gxm-mmc", .data = &meson_gx_data },
  1175. { .compatible = "amlogic,meson-axg-mmc", .data = &meson_axg_data },
  1176. {}
  1177. };
  1178. MODULE_DEVICE_TABLE(of, meson_mmc_of_match);
  1179. static struct platform_driver meson_mmc_driver = {
  1180. .probe = meson_mmc_probe,
  1181. .remove = meson_mmc_remove,
  1182. .driver = {
  1183. .name = DRIVER_NAME,
  1184. .of_match_table = of_match_ptr(meson_mmc_of_match),
  1185. },
  1186. };
  1187. module_platform_driver(meson_mmc_driver);
  1188. MODULE_DESCRIPTION("Amlogic S905*/GX*/AXG SD/eMMC driver");
  1189. MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>");
  1190. MODULE_LICENSE("GPL v2");