meson-gx-mmc.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Amlogic SD/eMMC driver for the GX/S905 family SoCs
  4. *
  5. * Copyright (c) 2016 BayLibre, SAS.
  6. * Author: Kevin Hilman <khilman@baylibre.com>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/init.h>
  11. #include <linux/delay.h>
  12. #include <linux/device.h>
  13. #include <linux/iopoll.h>
  14. #include <linux/of.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/ioport.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/mmc/host.h>
  19. #include <linux/mmc/mmc.h>
  20. #include <linux/mmc/sdio.h>
  21. #include <linux/mmc/slot-gpio.h>
  22. #include <linux/io.h>
  23. #include <linux/clk.h>
  24. #include <linux/clk-provider.h>
  25. #include <linux/regulator/consumer.h>
  26. #include <linux/reset.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/bitfield.h>
  29. #include <linux/pinctrl/consumer.h>
  30. #define DRIVER_NAME "meson-gx-mmc"
  31. #define SD_EMMC_CLOCK 0x0
  32. #define CLK_DIV_MASK GENMASK(5, 0)
  33. #define CLK_SRC_MASK GENMASK(7, 6)
  34. #define CLK_CORE_PHASE_MASK GENMASK(9, 8)
  35. #define CLK_TX_PHASE_MASK GENMASK(11, 10)
  36. #define CLK_RX_PHASE_MASK GENMASK(13, 12)
  37. #define CLK_PHASE_0 0
  38. #define CLK_PHASE_180 2
  39. #define CLK_V2_TX_DELAY_MASK GENMASK(19, 16)
  40. #define CLK_V2_RX_DELAY_MASK GENMASK(23, 20)
  41. #define CLK_V2_ALWAYS_ON BIT(24)
  42. #define CLK_V2_IRQ_SDIO_SLEEP BIT(25)
  43. #define CLK_V3_TX_DELAY_MASK GENMASK(21, 16)
  44. #define CLK_V3_RX_DELAY_MASK GENMASK(27, 22)
  45. #define CLK_V3_ALWAYS_ON BIT(28)
  46. #define CLK_V3_IRQ_SDIO_SLEEP BIT(29)
  47. #define CLK_TX_DELAY_MASK(h) (h->data->tx_delay_mask)
  48. #define CLK_RX_DELAY_MASK(h) (h->data->rx_delay_mask)
  49. #define CLK_ALWAYS_ON(h) (h->data->always_on)
  50. #define CLK_IRQ_SDIO_SLEEP(h) (h->data->irq_sdio_sleep)
  51. #define SD_EMMC_DELAY 0x4
  52. #define SD_EMMC_ADJUST 0x8
  53. #define ADJUST_ADJ_DELAY_MASK GENMASK(21, 16)
  54. #define ADJUST_DS_EN BIT(15)
  55. #define ADJUST_ADJ_EN BIT(13)
  56. #define SD_EMMC_DELAY1 0x4
  57. #define SD_EMMC_DELAY2 0x8
  58. #define SD_EMMC_V3_ADJUST 0xc
  59. #define SD_EMMC_CALOUT 0x10
  60. #define SD_EMMC_START 0x40
  61. #define START_DESC_INIT BIT(0)
  62. #define START_DESC_BUSY BIT(1)
  63. #define START_DESC_ADDR_MASK GENMASK(31, 2)
  64. #define SD_EMMC_CFG 0x44
  65. #define CFG_BUS_WIDTH_MASK GENMASK(1, 0)
  66. #define CFG_BUS_WIDTH_1 0x0
  67. #define CFG_BUS_WIDTH_4 0x1
  68. #define CFG_BUS_WIDTH_8 0x2
  69. #define CFG_DDR BIT(2)
  70. #define CFG_BLK_LEN_MASK GENMASK(7, 4)
  71. #define CFG_RESP_TIMEOUT_MASK GENMASK(11, 8)
  72. #define CFG_RC_CC_MASK GENMASK(15, 12)
  73. #define CFG_STOP_CLOCK BIT(22)
  74. #define CFG_CLK_ALWAYS_ON BIT(18)
  75. #define CFG_CHK_DS BIT(20)
  76. #define CFG_AUTO_CLK BIT(23)
  77. #define CFG_ERR_ABORT BIT(27)
  78. #define SD_EMMC_STATUS 0x48
  79. #define STATUS_BUSY BIT(31)
  80. #define STATUS_DESC_BUSY BIT(30)
  81. #define STATUS_DATI GENMASK(23, 16)
  82. #define SD_EMMC_IRQ_EN 0x4c
  83. #define IRQ_RXD_ERR_MASK GENMASK(7, 0)
  84. #define IRQ_TXD_ERR BIT(8)
  85. #define IRQ_DESC_ERR BIT(9)
  86. #define IRQ_RESP_ERR BIT(10)
  87. #define IRQ_CRC_ERR \
  88. (IRQ_RXD_ERR_MASK | IRQ_TXD_ERR | IRQ_DESC_ERR | IRQ_RESP_ERR)
  89. #define IRQ_RESP_TIMEOUT BIT(11)
  90. #define IRQ_DESC_TIMEOUT BIT(12)
  91. #define IRQ_TIMEOUTS \
  92. (IRQ_RESP_TIMEOUT | IRQ_DESC_TIMEOUT)
  93. #define IRQ_END_OF_CHAIN BIT(13)
  94. #define IRQ_RESP_STATUS BIT(14)
  95. #define IRQ_SDIO BIT(15)
  96. #define IRQ_EN_MASK \
  97. (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN)
  98. #define SD_EMMC_CMD_CFG 0x50
  99. #define SD_EMMC_CMD_ARG 0x54
  100. #define SD_EMMC_CMD_DAT 0x58
  101. #define SD_EMMC_CMD_RSP 0x5c
  102. #define SD_EMMC_CMD_RSP1 0x60
  103. #define SD_EMMC_CMD_RSP2 0x64
  104. #define SD_EMMC_CMD_RSP3 0x68
  105. #define SD_EMMC_RXD 0x94
  106. #define SD_EMMC_TXD 0x94
  107. #define SD_EMMC_LAST_REG SD_EMMC_TXD
  108. #define SD_EMMC_SRAM_DATA_BUF_LEN 1536
  109. #define SD_EMMC_SRAM_DATA_BUF_OFF 0x200
  110. #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
  111. #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
  112. #define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */
  113. #define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */
  114. #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
  115. #define SD_EMMC_DESC_BUF_LEN PAGE_SIZE
  116. #define SD_EMMC_PRE_REQ_DONE BIT(0)
  117. #define SD_EMMC_DESC_CHAIN_MODE BIT(1)
  118. #define MUX_CLK_NUM_PARENTS 2
  119. struct meson_mmc_data {
  120. unsigned int tx_delay_mask;
  121. unsigned int rx_delay_mask;
  122. unsigned int always_on;
  123. unsigned int adjust;
  124. unsigned int irq_sdio_sleep;
  125. };
  126. struct sd_emmc_desc {
  127. u32 cmd_cfg;
  128. u32 cmd_arg;
  129. u32 cmd_data;
  130. u32 cmd_resp;
  131. };
  132. struct meson_host {
  133. struct device *dev;
  134. const struct meson_mmc_data *data;
  135. struct mmc_host *mmc;
  136. struct mmc_command *cmd;
  137. void __iomem *regs;
  138. struct clk *mux_clk;
  139. struct clk *mmc_clk;
  140. unsigned long req_rate;
  141. bool ddr;
  142. bool dram_access_quirk;
  143. struct pinctrl *pinctrl;
  144. struct pinctrl_state *pins_clk_gate;
  145. unsigned int bounce_buf_size;
  146. void *bounce_buf;
  147. void __iomem *bounce_iomem_buf;
  148. dma_addr_t bounce_dma_addr;
  149. struct sd_emmc_desc *descs;
  150. dma_addr_t descs_dma_addr;
  151. int irq;
  152. bool needs_pre_post_req;
  153. spinlock_t lock;
  154. };
  155. #define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
  156. #define CMD_CFG_BLOCK_MODE BIT(9)
  157. #define CMD_CFG_R1B BIT(10)
  158. #define CMD_CFG_END_OF_CHAIN BIT(11)
  159. #define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12)
  160. #define CMD_CFG_NO_RESP BIT(16)
  161. #define CMD_CFG_NO_CMD BIT(17)
  162. #define CMD_CFG_DATA_IO BIT(18)
  163. #define CMD_CFG_DATA_WR BIT(19)
  164. #define CMD_CFG_RESP_NOCRC BIT(20)
  165. #define CMD_CFG_RESP_128 BIT(21)
  166. #define CMD_CFG_RESP_NUM BIT(22)
  167. #define CMD_CFG_DATA_NUM BIT(23)
  168. #define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24)
  169. #define CMD_CFG_ERROR BIT(30)
  170. #define CMD_CFG_OWNER BIT(31)
  171. #define CMD_DATA_MASK GENMASK(31, 2)
  172. #define CMD_DATA_BIG_ENDIAN BIT(1)
  173. #define CMD_DATA_SRAM BIT(0)
  174. #define CMD_RESP_MASK GENMASK(31, 1)
  175. #define CMD_RESP_SRAM BIT(0)
  176. static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data)
  177. {
  178. unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC;
  179. if (!timeout)
  180. return SD_EMMC_CMD_TIMEOUT_DATA;
  181. timeout = roundup_pow_of_two(timeout);
  182. return min(timeout, 32768U); /* max. 2^15 ms */
  183. }
  184. static struct mmc_command *meson_mmc_get_next_command(struct mmc_command *cmd)
  185. {
  186. if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error)
  187. return cmd->mrq->cmd;
  188. else if (mmc_op_multi(cmd->opcode) &&
  189. (!cmd->mrq->sbc || cmd->error || cmd->data->error))
  190. return cmd->mrq->stop;
  191. else
  192. return NULL;
  193. }
  194. static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
  195. struct mmc_request *mrq)
  196. {
  197. struct meson_host *host = mmc_priv(mmc);
  198. struct mmc_data *data = mrq->data;
  199. struct scatterlist *sg;
  200. int i;
  201. /*
  202. * When Controller DMA cannot directly access DDR memory, disable
  203. * support for Chain Mode to directly use the internal SRAM using
  204. * the bounce buffer mode.
  205. */
  206. if (host->dram_access_quirk)
  207. return;
  208. /* SD_IO_RW_EXTENDED (CMD53) can also use block mode under the hood */
  209. if (data->blocks > 1 || mrq->cmd->opcode == SD_IO_RW_EXTENDED) {
  210. /*
  211. * In block mode DMA descriptor format, "length" field indicates
  212. * number of blocks and there is no way to pass DMA size that
  213. * is not multiple of SDIO block size, making it impossible to
  214. * tie more than one memory buffer with single SDIO block.
  215. * Block mode sg buffer size should be aligned with SDIO block
  216. * size, otherwise chain mode could not be used.
  217. */
  218. for_each_sg(data->sg, sg, data->sg_len, i) {
  219. if (sg->length % data->blksz) {
  220. dev_warn_once(mmc_dev(mmc),
  221. "unaligned sg len %u blksize %u, disabling descriptor DMA for transfer\n",
  222. sg->length, data->blksz);
  223. return;
  224. }
  225. }
  226. }
  227. for_each_sg(data->sg, sg, data->sg_len, i) {
  228. /* check for 8 byte alignment */
  229. if (sg->offset % 8) {
  230. dev_warn_once(mmc_dev(mmc),
  231. "unaligned sg offset %u, disabling descriptor DMA for transfer\n",
  232. sg->offset);
  233. return;
  234. }
  235. }
  236. data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE;
  237. }
  238. static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data)
  239. {
  240. return data->host_cookie & SD_EMMC_DESC_CHAIN_MODE;
  241. }
  242. static inline bool meson_mmc_bounce_buf_read(const struct mmc_data *data)
  243. {
  244. return data && data->flags & MMC_DATA_READ &&
  245. !meson_mmc_desc_chain_mode(data);
  246. }
  247. static void meson_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
  248. {
  249. struct mmc_data *data = mrq->data;
  250. if (!data)
  251. return;
  252. meson_mmc_get_transfer_mode(mmc, mrq);
  253. data->host_cookie |= SD_EMMC_PRE_REQ_DONE;
  254. if (!meson_mmc_desc_chain_mode(data))
  255. return;
  256. data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
  257. mmc_get_dma_dir(data));
  258. if (!data->sg_count)
  259. dev_err(mmc_dev(mmc), "dma_map_sg failed");
  260. }
  261. static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
  262. int err)
  263. {
  264. struct mmc_data *data = mrq->data;
  265. if (data && meson_mmc_desc_chain_mode(data) && data->sg_count)
  266. dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
  267. mmc_get_dma_dir(data));
  268. }
  269. /*
  270. * Gating the clock on this controller is tricky. It seems the mmc clock
  271. * is also used by the controller. It may crash during some operation if the
  272. * clock is stopped. The safest thing to do, whenever possible, is to keep
  273. * clock running at stop it at the pad using the pinmux.
  274. */
  275. static void meson_mmc_clk_gate(struct meson_host *host)
  276. {
  277. u32 cfg;
  278. if (host->pins_clk_gate) {
  279. pinctrl_select_state(host->pinctrl, host->pins_clk_gate);
  280. } else {
  281. /*
  282. * If the pinmux is not provided - default to the classic and
  283. * unsafe method
  284. */
  285. cfg = readl(host->regs + SD_EMMC_CFG);
  286. cfg |= CFG_STOP_CLOCK;
  287. writel(cfg, host->regs + SD_EMMC_CFG);
  288. }
  289. }
  290. static void meson_mmc_clk_ungate(struct meson_host *host)
  291. {
  292. u32 cfg;
  293. if (host->pins_clk_gate)
  294. pinctrl_select_default_state(host->dev);
  295. /* Make sure the clock is not stopped in the controller */
  296. cfg = readl(host->regs + SD_EMMC_CFG);
  297. cfg &= ~CFG_STOP_CLOCK;
  298. writel(cfg, host->regs + SD_EMMC_CFG);
  299. }
  300. static int meson_mmc_clk_set(struct meson_host *host, unsigned long rate,
  301. bool ddr)
  302. {
  303. struct mmc_host *mmc = host->mmc;
  304. int ret;
  305. u32 cfg;
  306. /* Same request - bail-out */
  307. if (host->ddr == ddr && host->req_rate == rate)
  308. return 0;
  309. /* stop clock */
  310. meson_mmc_clk_gate(host);
  311. host->req_rate = 0;
  312. mmc->actual_clock = 0;
  313. /* return with clock being stopped */
  314. if (!rate)
  315. return 0;
  316. /* Stop the clock during rate change to avoid glitches */
  317. cfg = readl(host->regs + SD_EMMC_CFG);
  318. cfg |= CFG_STOP_CLOCK;
  319. writel(cfg, host->regs + SD_EMMC_CFG);
  320. if (ddr) {
  321. /* DDR modes require higher module clock */
  322. rate <<= 1;
  323. cfg |= CFG_DDR;
  324. } else {
  325. cfg &= ~CFG_DDR;
  326. }
  327. writel(cfg, host->regs + SD_EMMC_CFG);
  328. host->ddr = ddr;
  329. ret = clk_set_rate(host->mmc_clk, rate);
  330. if (ret) {
  331. dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n",
  332. rate, ret);
  333. return ret;
  334. }
  335. host->req_rate = rate;
  336. mmc->actual_clock = clk_get_rate(host->mmc_clk);
  337. /* We should report the real output frequency of the controller */
  338. if (ddr) {
  339. host->req_rate >>= 1;
  340. mmc->actual_clock >>= 1;
  341. }
  342. dev_dbg(host->dev, "clk rate: %u Hz\n", mmc->actual_clock);
  343. if (rate != mmc->actual_clock)
  344. dev_dbg(host->dev, "requested rate was %lu\n", rate);
  345. /* (re)start clock */
  346. meson_mmc_clk_ungate(host);
  347. return 0;
  348. }
  349. /*
  350. * The SD/eMMC IP block has an internal mux and divider used for
  351. * generating the MMC clock. Use the clock framework to create and
  352. * manage these clocks.
  353. */
  354. static int meson_mmc_clk_init(struct meson_host *host)
  355. {
  356. struct clk_init_data init;
  357. struct clk_mux *mux;
  358. struct clk_divider *div;
  359. char clk_name[32];
  360. int i, ret = 0;
  361. const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
  362. const char *clk_parent[1];
  363. u32 clk_reg;
  364. /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
  365. clk_reg = CLK_ALWAYS_ON(host);
  366. clk_reg |= CLK_DIV_MASK;
  367. clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, CLK_PHASE_180);
  368. clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, CLK_PHASE_0);
  369. clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, CLK_PHASE_0);
  370. if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
  371. clk_reg |= CLK_IRQ_SDIO_SLEEP(host);
  372. writel(clk_reg, host->regs + SD_EMMC_CLOCK);
  373. /* get the mux parents */
  374. for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
  375. struct clk *clk;
  376. char name[16];
  377. snprintf(name, sizeof(name), "clkin%d", i);
  378. clk = devm_clk_get(host->dev, name);
  379. if (IS_ERR(clk))
  380. return dev_err_probe(host->dev, PTR_ERR(clk),
  381. "Missing clock %s\n", name);
  382. mux_parent_names[i] = __clk_get_name(clk);
  383. }
  384. /* create the mux */
  385. mux = devm_kzalloc(host->dev, sizeof(*mux), GFP_KERNEL);
  386. if (!mux)
  387. return -ENOMEM;
  388. snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev));
  389. init.name = clk_name;
  390. init.ops = &clk_mux_ops;
  391. init.flags = 0;
  392. init.parent_names = mux_parent_names;
  393. init.num_parents = MUX_CLK_NUM_PARENTS;
  394. mux->reg = host->regs + SD_EMMC_CLOCK;
  395. mux->shift = __ffs(CLK_SRC_MASK);
  396. mux->mask = CLK_SRC_MASK >> mux->shift;
  397. mux->hw.init = &init;
  398. host->mux_clk = devm_clk_register(host->dev, &mux->hw);
  399. if (WARN_ON(IS_ERR(host->mux_clk)))
  400. return PTR_ERR(host->mux_clk);
  401. /* create the divider */
  402. div = devm_kzalloc(host->dev, sizeof(*div), GFP_KERNEL);
  403. if (!div)
  404. return -ENOMEM;
  405. snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev));
  406. init.name = clk_name;
  407. init.ops = &clk_divider_ops;
  408. init.flags = CLK_SET_RATE_PARENT;
  409. clk_parent[0] = __clk_get_name(host->mux_clk);
  410. init.parent_names = clk_parent;
  411. init.num_parents = 1;
  412. div->reg = host->regs + SD_EMMC_CLOCK;
  413. div->shift = __ffs(CLK_DIV_MASK);
  414. div->width = __builtin_popcountl(CLK_DIV_MASK);
  415. div->hw.init = &init;
  416. div->flags = CLK_DIVIDER_ONE_BASED;
  417. host->mmc_clk = devm_clk_register(host->dev, &div->hw);
  418. if (WARN_ON(IS_ERR(host->mmc_clk)))
  419. return PTR_ERR(host->mmc_clk);
  420. /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
  421. host->mmc->f_min = clk_round_rate(host->mmc_clk, 400000);
  422. ret = clk_set_rate(host->mmc_clk, host->mmc->f_min);
  423. if (ret)
  424. return ret;
  425. return clk_prepare_enable(host->mmc_clk);
  426. }
  427. static void meson_mmc_disable_resampling(struct meson_host *host)
  428. {
  429. unsigned int val = readl(host->regs + host->data->adjust);
  430. val &= ~ADJUST_ADJ_EN;
  431. writel(val, host->regs + host->data->adjust);
  432. }
  433. static void meson_mmc_reset_resampling(struct meson_host *host)
  434. {
  435. unsigned int val;
  436. meson_mmc_disable_resampling(host);
  437. val = readl(host->regs + host->data->adjust);
  438. val &= ~ADJUST_ADJ_DELAY_MASK;
  439. writel(val, host->regs + host->data->adjust);
  440. }
  441. static int meson_mmc_resampling_tuning(struct mmc_host *mmc, u32 opcode)
  442. {
  443. struct meson_host *host = mmc_priv(mmc);
  444. unsigned int val, dly, max_dly, i;
  445. int ret;
  446. /* Resampling is done using the source clock */
  447. max_dly = DIV_ROUND_UP(clk_get_rate(host->mux_clk),
  448. clk_get_rate(host->mmc_clk));
  449. val = readl(host->regs + host->data->adjust);
  450. val |= ADJUST_ADJ_EN;
  451. writel(val, host->regs + host->data->adjust);
  452. if (mmc_doing_retune(mmc))
  453. dly = FIELD_GET(ADJUST_ADJ_DELAY_MASK, val) + 1;
  454. else
  455. dly = 0;
  456. for (i = 0; i < max_dly; i++) {
  457. val &= ~ADJUST_ADJ_DELAY_MASK;
  458. val |= FIELD_PREP(ADJUST_ADJ_DELAY_MASK, (dly + i) % max_dly);
  459. writel(val, host->regs + host->data->adjust);
  460. ret = mmc_send_tuning(mmc, opcode, NULL);
  461. if (!ret) {
  462. dev_dbg(mmc_dev(mmc), "resampling delay: %u\n",
  463. (dly + i) % max_dly);
  464. return 0;
  465. }
  466. }
  467. meson_mmc_reset_resampling(host);
  468. return -EIO;
  469. }
  470. static int meson_mmc_prepare_ios_clock(struct meson_host *host,
  471. struct mmc_ios *ios)
  472. {
  473. bool ddr;
  474. switch (ios->timing) {
  475. case MMC_TIMING_MMC_DDR52:
  476. case MMC_TIMING_UHS_DDR50:
  477. ddr = true;
  478. break;
  479. default:
  480. ddr = false;
  481. break;
  482. }
  483. return meson_mmc_clk_set(host, ios->clock, ddr);
  484. }
  485. static void meson_mmc_check_resampling(struct meson_host *host,
  486. struct mmc_ios *ios)
  487. {
  488. switch (ios->timing) {
  489. case MMC_TIMING_LEGACY:
  490. case MMC_TIMING_MMC_HS:
  491. case MMC_TIMING_SD_HS:
  492. case MMC_TIMING_MMC_DDR52:
  493. meson_mmc_disable_resampling(host);
  494. break;
  495. }
  496. }
  497. static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  498. {
  499. struct meson_host *host = mmc_priv(mmc);
  500. u32 bus_width, val;
  501. int err;
  502. /*
  503. * GPIO regulator, only controls switching between 1v8 and
  504. * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON.
  505. */
  506. switch (ios->power_mode) {
  507. case MMC_POWER_OFF:
  508. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
  509. mmc_regulator_disable_vqmmc(mmc);
  510. break;
  511. case MMC_POWER_UP:
  512. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
  513. break;
  514. case MMC_POWER_ON:
  515. mmc_regulator_enable_vqmmc(mmc);
  516. break;
  517. }
  518. /* Bus width */
  519. switch (ios->bus_width) {
  520. case MMC_BUS_WIDTH_1:
  521. bus_width = CFG_BUS_WIDTH_1;
  522. break;
  523. case MMC_BUS_WIDTH_4:
  524. bus_width = CFG_BUS_WIDTH_4;
  525. break;
  526. case MMC_BUS_WIDTH_8:
  527. bus_width = CFG_BUS_WIDTH_8;
  528. break;
  529. default:
  530. dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n",
  531. ios->bus_width);
  532. bus_width = CFG_BUS_WIDTH_4;
  533. }
  534. val = readl(host->regs + SD_EMMC_CFG);
  535. val &= ~CFG_BUS_WIDTH_MASK;
  536. val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width);
  537. writel(val, host->regs + SD_EMMC_CFG);
  538. meson_mmc_check_resampling(host, ios);
  539. err = meson_mmc_prepare_ios_clock(host, ios);
  540. if (err)
  541. dev_err(host->dev, "Failed to set clock: %d\n,", err);
  542. dev_dbg(host->dev, "SD_EMMC_CFG: 0x%08x\n", val);
  543. }
  544. static void meson_mmc_request_done(struct mmc_host *mmc,
  545. struct mmc_request *mrq)
  546. {
  547. struct meson_host *host = mmc_priv(mmc);
  548. host->cmd = NULL;
  549. if (host->needs_pre_post_req)
  550. meson_mmc_post_req(mmc, mrq, 0);
  551. mmc_request_done(host->mmc, mrq);
  552. }
  553. static void meson_mmc_set_blksz(struct mmc_host *mmc, unsigned int blksz)
  554. {
  555. struct meson_host *host = mmc_priv(mmc);
  556. u32 cfg, blksz_old;
  557. cfg = readl(host->regs + SD_EMMC_CFG);
  558. blksz_old = FIELD_GET(CFG_BLK_LEN_MASK, cfg);
  559. if (!is_power_of_2(blksz))
  560. dev_err(host->dev, "blksz %u is not a power of 2\n", blksz);
  561. blksz = ilog2(blksz);
  562. /* check if block-size matches, if not update */
  563. if (blksz == blksz_old)
  564. return;
  565. dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__,
  566. blksz_old, blksz);
  567. cfg &= ~CFG_BLK_LEN_MASK;
  568. cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, blksz);
  569. writel(cfg, host->regs + SD_EMMC_CFG);
  570. }
  571. static void meson_mmc_set_response_bits(struct mmc_command *cmd, u32 *cmd_cfg)
  572. {
  573. if (cmd->flags & MMC_RSP_PRESENT) {
  574. if (cmd->flags & MMC_RSP_136)
  575. *cmd_cfg |= CMD_CFG_RESP_128;
  576. *cmd_cfg |= CMD_CFG_RESP_NUM;
  577. if (!(cmd->flags & MMC_RSP_CRC))
  578. *cmd_cfg |= CMD_CFG_RESP_NOCRC;
  579. if (cmd->flags & MMC_RSP_BUSY)
  580. *cmd_cfg |= CMD_CFG_R1B;
  581. } else {
  582. *cmd_cfg |= CMD_CFG_NO_RESP;
  583. }
  584. }
  585. static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
  586. {
  587. struct meson_host *host = mmc_priv(mmc);
  588. struct sd_emmc_desc *desc = host->descs;
  589. struct mmc_data *data = host->cmd->data;
  590. struct scatterlist *sg;
  591. u32 start;
  592. int i;
  593. if (data->flags & MMC_DATA_WRITE)
  594. cmd_cfg |= CMD_CFG_DATA_WR;
  595. if (data->blocks > 1) {
  596. cmd_cfg |= CMD_CFG_BLOCK_MODE;
  597. meson_mmc_set_blksz(mmc, data->blksz);
  598. }
  599. for_each_sg(data->sg, sg, data->sg_count, i) {
  600. unsigned int len = sg_dma_len(sg);
  601. if (data->blocks > 1)
  602. len /= data->blksz;
  603. desc[i].cmd_cfg = cmd_cfg;
  604. desc[i].cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, len);
  605. if (i > 0)
  606. desc[i].cmd_cfg |= CMD_CFG_NO_CMD;
  607. desc[i].cmd_arg = host->cmd->arg;
  608. desc[i].cmd_resp = 0;
  609. desc[i].cmd_data = sg_dma_address(sg);
  610. }
  611. desc[data->sg_count - 1].cmd_cfg |= CMD_CFG_END_OF_CHAIN;
  612. dma_wmb(); /* ensure descriptor is written before kicked */
  613. start = host->descs_dma_addr | START_DESC_BUSY;
  614. writel(start, host->regs + SD_EMMC_START);
  615. }
  616. /* local sg copy for dram_access_quirk */
  617. static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
  618. size_t buflen, bool to_buffer)
  619. {
  620. unsigned int sg_flags = SG_MITER_ATOMIC;
  621. struct scatterlist *sgl = data->sg;
  622. unsigned int nents = data->sg_len;
  623. struct sg_mapping_iter miter;
  624. unsigned int offset = 0;
  625. if (to_buffer)
  626. sg_flags |= SG_MITER_FROM_SG;
  627. else
  628. sg_flags |= SG_MITER_TO_SG;
  629. sg_miter_start(&miter, sgl, nents, sg_flags);
  630. while ((offset < buflen) && sg_miter_next(&miter)) {
  631. unsigned int buf_offset = 0;
  632. unsigned int len, left;
  633. u32 *buf = miter.addr;
  634. len = min(miter.length, buflen - offset);
  635. left = len;
  636. if (to_buffer) {
  637. do {
  638. writel(*buf++, host->bounce_iomem_buf + offset + buf_offset);
  639. buf_offset += 4;
  640. left -= 4;
  641. } while (left);
  642. } else {
  643. do {
  644. *buf++ = readl(host->bounce_iomem_buf + offset + buf_offset);
  645. buf_offset += 4;
  646. left -= 4;
  647. } while (left);
  648. }
  649. offset += len;
  650. }
  651. sg_miter_stop(&miter);
  652. }
  653. static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
  654. {
  655. struct meson_host *host = mmc_priv(mmc);
  656. struct mmc_data *data = cmd->data;
  657. u32 cmd_cfg = 0, cmd_data = 0;
  658. unsigned int xfer_bytes = 0;
  659. /* Setup descriptors */
  660. dma_rmb();
  661. host->cmd = cmd;
  662. cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
  663. cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
  664. meson_mmc_set_response_bits(cmd, &cmd_cfg);
  665. /* data? */
  666. if (data) {
  667. data->bytes_xfered = 0;
  668. cmd_cfg |= CMD_CFG_DATA_IO;
  669. cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
  670. ilog2(meson_mmc_get_timeout_msecs(data)));
  671. if (meson_mmc_desc_chain_mode(data)) {
  672. meson_mmc_desc_chain_transfer(mmc, cmd_cfg);
  673. return;
  674. }
  675. if (data->blocks > 1) {
  676. cmd_cfg |= CMD_CFG_BLOCK_MODE;
  677. cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK,
  678. data->blocks);
  679. meson_mmc_set_blksz(mmc, data->blksz);
  680. } else {
  681. cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, data->blksz);
  682. }
  683. xfer_bytes = data->blksz * data->blocks;
  684. if (data->flags & MMC_DATA_WRITE) {
  685. cmd_cfg |= CMD_CFG_DATA_WR;
  686. WARN_ON(xfer_bytes > host->bounce_buf_size);
  687. if (host->dram_access_quirk)
  688. meson_mmc_copy_buffer(host, data, xfer_bytes, true);
  689. else
  690. sg_copy_to_buffer(data->sg, data->sg_len,
  691. host->bounce_buf, xfer_bytes);
  692. dma_wmb();
  693. }
  694. cmd_data = host->bounce_dma_addr & CMD_DATA_MASK;
  695. } else {
  696. cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
  697. ilog2(SD_EMMC_CMD_TIMEOUT));
  698. }
  699. /* Last descriptor */
  700. cmd_cfg |= CMD_CFG_END_OF_CHAIN;
  701. writel(cmd_cfg, host->regs + SD_EMMC_CMD_CFG);
  702. writel(cmd_data, host->regs + SD_EMMC_CMD_DAT);
  703. writel(0, host->regs + SD_EMMC_CMD_RSP);
  704. wmb(); /* ensure descriptor is written before kicked */
  705. writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
  706. }
  707. static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data *data)
  708. {
  709. struct scatterlist *sg;
  710. int i;
  711. /* Reject request if any element offset or size is not 32bit aligned */
  712. for_each_sg(data->sg, sg, data->sg_len, i) {
  713. if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
  714. !IS_ALIGNED(sg->length, sizeof(u32))) {
  715. dev_err(mmc_dev(mmc), "unaligned sg offset %u len %u\n",
  716. data->sg->offset, data->sg->length);
  717. return -EINVAL;
  718. }
  719. }
  720. return 0;
  721. }
  722. static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
  723. {
  724. struct meson_host *host = mmc_priv(mmc);
  725. host->needs_pre_post_req = mrq->data &&
  726. !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
  727. /*
  728. * The memory at the end of the controller used as bounce buffer for
  729. * the dram_access_quirk only accepts 32bit read/write access,
  730. * check the aligment and length of the data before starting the request.
  731. */
  732. if (host->dram_access_quirk && mrq->data) {
  733. mrq->cmd->error = meson_mmc_validate_dram_access(mmc, mrq->data);
  734. if (mrq->cmd->error) {
  735. mmc_request_done(mmc, mrq);
  736. return;
  737. }
  738. }
  739. if (host->needs_pre_post_req) {
  740. meson_mmc_get_transfer_mode(mmc, mrq);
  741. if (!meson_mmc_desc_chain_mode(mrq->data))
  742. host->needs_pre_post_req = false;
  743. }
  744. if (host->needs_pre_post_req)
  745. meson_mmc_pre_req(mmc, mrq);
  746. /* Stop execution */
  747. writel(0, host->regs + SD_EMMC_START);
  748. meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd);
  749. }
  750. static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
  751. {
  752. struct meson_host *host = mmc_priv(mmc);
  753. if (cmd->flags & MMC_RSP_136) {
  754. cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3);
  755. cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2);
  756. cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1);
  757. cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP);
  758. } else if (cmd->flags & MMC_RSP_PRESENT) {
  759. cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP);
  760. }
  761. }
  762. static void __meson_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
  763. {
  764. struct meson_host *host = mmc_priv(mmc);
  765. u32 reg_irqen = IRQ_EN_MASK;
  766. if (enable)
  767. reg_irqen |= IRQ_SDIO;
  768. writel(reg_irqen, host->regs + SD_EMMC_IRQ_EN);
  769. }
  770. static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
  771. {
  772. struct meson_host *host = dev_id;
  773. struct mmc_command *cmd;
  774. u32 status, raw_status, irq_mask = IRQ_EN_MASK;
  775. irqreturn_t ret = IRQ_NONE;
  776. if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
  777. irq_mask |= IRQ_SDIO;
  778. raw_status = readl(host->regs + SD_EMMC_STATUS);
  779. status = raw_status & irq_mask;
  780. if (!status) {
  781. dev_dbg(host->dev,
  782. "Unexpected IRQ! irq_en 0x%08x - status 0x%08x\n",
  783. irq_mask, raw_status);
  784. return IRQ_NONE;
  785. }
  786. /* ack all raised interrupts */
  787. writel(status, host->regs + SD_EMMC_STATUS);
  788. cmd = host->cmd;
  789. if (status & IRQ_SDIO) {
  790. spin_lock(&host->lock);
  791. __meson_mmc_enable_sdio_irq(host->mmc, 0);
  792. sdio_signal_irq(host->mmc);
  793. spin_unlock(&host->lock);
  794. status &= ~IRQ_SDIO;
  795. if (!status)
  796. return IRQ_HANDLED;
  797. }
  798. if (WARN_ON(!cmd))
  799. return IRQ_NONE;
  800. cmd->error = 0;
  801. if (status & IRQ_CRC_ERR) {
  802. dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status);
  803. cmd->error = -EILSEQ;
  804. ret = IRQ_WAKE_THREAD;
  805. goto out;
  806. }
  807. if (status & IRQ_TIMEOUTS) {
  808. dev_dbg(host->dev, "Timeout - status 0x%08x\n", status);
  809. cmd->error = -ETIMEDOUT;
  810. ret = IRQ_WAKE_THREAD;
  811. goto out;
  812. }
  813. meson_mmc_read_resp(host->mmc, cmd);
  814. if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) {
  815. struct mmc_data *data = cmd->data;
  816. if (data && !cmd->error)
  817. data->bytes_xfered = data->blksz * data->blocks;
  818. return IRQ_WAKE_THREAD;
  819. }
  820. out:
  821. if (cmd->error) {
  822. /* Stop desc in case of errors */
  823. u32 start = readl(host->regs + SD_EMMC_START);
  824. start &= ~START_DESC_BUSY;
  825. writel(start, host->regs + SD_EMMC_START);
  826. }
  827. return ret;
  828. }
  829. static int meson_mmc_wait_desc_stop(struct meson_host *host)
  830. {
  831. u32 status;
  832. /*
  833. * It may sometimes take a while for it to actually halt. Here, we
  834. * are giving it 5ms to comply
  835. *
  836. * If we don't confirm the descriptor is stopped, it might raise new
  837. * IRQs after we have called mmc_request_done() which is bad.
  838. */
  839. return readl_poll_timeout(host->regs + SD_EMMC_STATUS, status,
  840. !(status & (STATUS_BUSY | STATUS_DESC_BUSY)),
  841. 100, 5000);
  842. }
  843. static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
  844. {
  845. struct meson_host *host = dev_id;
  846. struct mmc_command *next_cmd, *cmd = host->cmd;
  847. struct mmc_data *data;
  848. unsigned int xfer_bytes;
  849. if (WARN_ON(!cmd))
  850. return IRQ_NONE;
  851. if (cmd->error) {
  852. meson_mmc_wait_desc_stop(host);
  853. meson_mmc_request_done(host->mmc, cmd->mrq);
  854. return IRQ_HANDLED;
  855. }
  856. data = cmd->data;
  857. if (meson_mmc_bounce_buf_read(data)) {
  858. xfer_bytes = data->blksz * data->blocks;
  859. WARN_ON(xfer_bytes > host->bounce_buf_size);
  860. if (host->dram_access_quirk)
  861. meson_mmc_copy_buffer(host, data, xfer_bytes, false);
  862. else
  863. sg_copy_from_buffer(data->sg, data->sg_len,
  864. host->bounce_buf, xfer_bytes);
  865. }
  866. next_cmd = meson_mmc_get_next_command(cmd);
  867. if (next_cmd)
  868. meson_mmc_start_cmd(host->mmc, next_cmd);
  869. else
  870. meson_mmc_request_done(host->mmc, cmd->mrq);
  871. return IRQ_HANDLED;
  872. }
  873. static void meson_mmc_cfg_init(struct meson_host *host)
  874. {
  875. u32 cfg = 0;
  876. cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK,
  877. ilog2(SD_EMMC_CFG_RESP_TIMEOUT));
  878. cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP));
  879. cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE));
  880. /* abort chain on R/W errors */
  881. cfg |= CFG_ERR_ABORT;
  882. writel(cfg, host->regs + SD_EMMC_CFG);
  883. }
  884. static int meson_mmc_card_busy(struct mmc_host *mmc)
  885. {
  886. struct meson_host *host = mmc_priv(mmc);
  887. u32 regval;
  888. regval = readl(host->regs + SD_EMMC_STATUS);
  889. /* We are only interrested in lines 0 to 3, so mask the other ones */
  890. return !(FIELD_GET(STATUS_DATI, regval) & 0xf);
  891. }
  892. static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
  893. {
  894. int ret;
  895. /* vqmmc regulator is available */
  896. if (!IS_ERR(mmc->supply.vqmmc)) {
  897. /*
  898. * The usual amlogic setup uses a GPIO to switch from one
  899. * regulator to the other. While the voltage ramp up is
  900. * pretty fast, care must be taken when switching from 3.3v
  901. * to 1.8v. Please make sure the regulator framework is aware
  902. * of your own regulator constraints
  903. */
  904. ret = mmc_regulator_set_vqmmc(mmc, ios);
  905. return ret < 0 ? ret : 0;
  906. }
  907. /* no vqmmc regulator, assume fixed regulator at 3/3.3V */
  908. if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
  909. return 0;
  910. return -EINVAL;
  911. }
  912. static void meson_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
  913. {
  914. struct meson_host *host = mmc_priv(mmc);
  915. unsigned long flags;
  916. spin_lock_irqsave(&host->lock, flags);
  917. __meson_mmc_enable_sdio_irq(mmc, enable);
  918. spin_unlock_irqrestore(&host->lock, flags);
  919. }
  920. static void meson_mmc_ack_sdio_irq(struct mmc_host *mmc)
  921. {
  922. meson_mmc_enable_sdio_irq(mmc, 1);
  923. }
  924. static const struct mmc_host_ops meson_mmc_ops = {
  925. .request = meson_mmc_request,
  926. .set_ios = meson_mmc_set_ios,
  927. .get_cd = mmc_gpio_get_cd,
  928. .pre_req = meson_mmc_pre_req,
  929. .post_req = meson_mmc_post_req,
  930. .execute_tuning = meson_mmc_resampling_tuning,
  931. .card_busy = meson_mmc_card_busy,
  932. .start_signal_voltage_switch = meson_mmc_voltage_switch,
  933. .enable_sdio_irq = meson_mmc_enable_sdio_irq,
  934. .ack_sdio_irq = meson_mmc_ack_sdio_irq,
  935. };
  936. static int meson_mmc_probe(struct platform_device *pdev)
  937. {
  938. struct resource *res;
  939. struct meson_host *host;
  940. struct mmc_host *mmc;
  941. struct clk *core_clk;
  942. int cd_irq, ret;
  943. mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(struct meson_host));
  944. if (!mmc)
  945. return -ENOMEM;
  946. host = mmc_priv(mmc);
  947. host->mmc = mmc;
  948. host->dev = &pdev->dev;
  949. dev_set_drvdata(&pdev->dev, host);
  950. /* The G12A SDIO Controller needs an SRAM bounce buffer */
  951. host->dram_access_quirk = device_property_read_bool(&pdev->dev,
  952. "amlogic,dram-access-quirk");
  953. /* Get regulators and the supported OCR mask */
  954. ret = mmc_regulator_get_supply(mmc);
  955. if (ret)
  956. return ret;
  957. ret = mmc_of_parse(mmc);
  958. if (ret)
  959. return dev_err_probe(&pdev->dev, ret, "error parsing DT\n");
  960. mmc->caps |= MMC_CAP_CMD23;
  961. if (mmc->caps & MMC_CAP_SDIO_IRQ)
  962. mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
  963. host->data = of_device_get_match_data(&pdev->dev);
  964. if (!host->data)
  965. return -EINVAL;
  966. ret = device_reset_optional(&pdev->dev);
  967. if (ret)
  968. return dev_err_probe(&pdev->dev, ret, "device reset failed\n");
  969. host->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
  970. if (IS_ERR(host->regs))
  971. return PTR_ERR(host->regs);
  972. host->irq = platform_get_irq(pdev, 0);
  973. if (host->irq < 0)
  974. return host->irq;
  975. cd_irq = platform_get_irq_optional(pdev, 1);
  976. mmc_gpio_set_cd_irq(mmc, cd_irq);
  977. host->pinctrl = devm_pinctrl_get(&pdev->dev);
  978. if (IS_ERR(host->pinctrl))
  979. return PTR_ERR(host->pinctrl);
  980. host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl,
  981. "clk-gate");
  982. if (IS_ERR(host->pins_clk_gate)) {
  983. dev_warn(&pdev->dev,
  984. "can't get clk-gate pinctrl, using clk_stop bit\n");
  985. host->pins_clk_gate = NULL;
  986. }
  987. core_clk = devm_clk_get_enabled(&pdev->dev, "core");
  988. if (IS_ERR(core_clk))
  989. return PTR_ERR(core_clk);
  990. ret = meson_mmc_clk_init(host);
  991. if (ret)
  992. return ret;
  993. /* set config to sane default */
  994. meson_mmc_cfg_init(host);
  995. /* Stop execution */
  996. writel(0, host->regs + SD_EMMC_START);
  997. /* clear, ack and enable interrupts */
  998. writel(0, host->regs + SD_EMMC_IRQ_EN);
  999. writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
  1000. writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN);
  1001. ret = request_threaded_irq(host->irq, meson_mmc_irq,
  1002. meson_mmc_irq_thread, IRQF_ONESHOT,
  1003. dev_name(&pdev->dev), host);
  1004. if (ret)
  1005. goto err_init_clk;
  1006. spin_lock_init(&host->lock);
  1007. if (host->dram_access_quirk) {
  1008. /* Limit segments to 1 due to low available sram memory */
  1009. mmc->max_segs = 1;
  1010. /* Limit to the available sram memory */
  1011. mmc->max_blk_count = SD_EMMC_SRAM_DATA_BUF_LEN /
  1012. mmc->max_blk_size;
  1013. } else {
  1014. mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
  1015. mmc->max_segs = SD_EMMC_DESC_BUF_LEN /
  1016. sizeof(struct sd_emmc_desc);
  1017. }
  1018. mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
  1019. mmc->max_seg_size = mmc->max_req_size;
  1020. /*
  1021. * At the moment, we don't know how to reliably enable HS400.
  1022. * From the different datasheets, it is not even clear if this mode
  1023. * is officially supported by any of the SoCs
  1024. */
  1025. mmc->caps2 &= ~MMC_CAP2_HS400;
  1026. if (host->dram_access_quirk) {
  1027. /*
  1028. * The MMC Controller embeds 1,5KiB of internal SRAM
  1029. * that can be used to be used as bounce buffer.
  1030. * In the case of the G12A SDIO controller, use these
  1031. * instead of the DDR memory
  1032. */
  1033. host->bounce_buf_size = SD_EMMC_SRAM_DATA_BUF_LEN;
  1034. host->bounce_iomem_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
  1035. host->bounce_dma_addr = res->start + SD_EMMC_SRAM_DATA_BUF_OFF;
  1036. } else {
  1037. /* data bounce buffer */
  1038. host->bounce_buf_size = mmc->max_req_size;
  1039. host->bounce_buf =
  1040. dmam_alloc_coherent(host->dev, host->bounce_buf_size,
  1041. &host->bounce_dma_addr, GFP_KERNEL);
  1042. if (host->bounce_buf == NULL) {
  1043. dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
  1044. ret = -ENOMEM;
  1045. goto err_free_irq;
  1046. }
  1047. }
  1048. host->descs = dmam_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
  1049. &host->descs_dma_addr, GFP_KERNEL);
  1050. if (!host->descs) {
  1051. dev_err(host->dev, "Allocating descriptor DMA buffer failed\n");
  1052. ret = -ENOMEM;
  1053. goto err_free_irq;
  1054. }
  1055. mmc->ops = &meson_mmc_ops;
  1056. ret = mmc_add_host(mmc);
  1057. if (ret)
  1058. goto err_free_irq;
  1059. return 0;
  1060. err_free_irq:
  1061. free_irq(host->irq, host);
  1062. err_init_clk:
  1063. clk_disable_unprepare(host->mmc_clk);
  1064. return ret;
  1065. }
  1066. static void meson_mmc_remove(struct platform_device *pdev)
  1067. {
  1068. struct meson_host *host = dev_get_drvdata(&pdev->dev);
  1069. mmc_remove_host(host->mmc);
  1070. /* disable interrupts */
  1071. writel(0, host->regs + SD_EMMC_IRQ_EN);
  1072. free_irq(host->irq, host);
  1073. clk_disable_unprepare(host->mmc_clk);
  1074. }
  1075. static const struct meson_mmc_data meson_gx_data = {
  1076. .tx_delay_mask = CLK_V2_TX_DELAY_MASK,
  1077. .rx_delay_mask = CLK_V2_RX_DELAY_MASK,
  1078. .always_on = CLK_V2_ALWAYS_ON,
  1079. .adjust = SD_EMMC_ADJUST,
  1080. .irq_sdio_sleep = CLK_V2_IRQ_SDIO_SLEEP,
  1081. };
  1082. static const struct meson_mmc_data meson_axg_data = {
  1083. .tx_delay_mask = CLK_V3_TX_DELAY_MASK,
  1084. .rx_delay_mask = CLK_V3_RX_DELAY_MASK,
  1085. .always_on = CLK_V3_ALWAYS_ON,
  1086. .adjust = SD_EMMC_V3_ADJUST,
  1087. .irq_sdio_sleep = CLK_V3_IRQ_SDIO_SLEEP,
  1088. };
  1089. static const struct of_device_id meson_mmc_of_match[] = {
  1090. { .compatible = "amlogic,meson-gx-mmc", .data = &meson_gx_data },
  1091. { .compatible = "amlogic,meson-gxbb-mmc", .data = &meson_gx_data },
  1092. { .compatible = "amlogic,meson-gxl-mmc", .data = &meson_gx_data },
  1093. { .compatible = "amlogic,meson-gxm-mmc", .data = &meson_gx_data },
  1094. { .compatible = "amlogic,meson-axg-mmc", .data = &meson_axg_data },
  1095. {}
  1096. };
  1097. MODULE_DEVICE_TABLE(of, meson_mmc_of_match);
  1098. static struct platform_driver meson_mmc_driver = {
  1099. .probe = meson_mmc_probe,
  1100. .remove_new = meson_mmc_remove,
  1101. .driver = {
  1102. .name = DRIVER_NAME,
  1103. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  1104. .of_match_table = meson_mmc_of_match,
  1105. },
  1106. };
  1107. module_platform_driver(meson_mmc_driver);
  1108. MODULE_DESCRIPTION("Amlogic S905*/GX*/AXG SD/eMMC driver");
  1109. MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>");
  1110. MODULE_LICENSE("GPL v2");