sdhci.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2011, Marvell Semiconductor Inc.
  4. * Lei Wen <leiwen@marvell.com>
  5. *
  6. * Back ported to the 8xx platform (from the 8260 platform) by
  7. * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
  8. */
  9. #include <common.h>
  10. #include <cpu_func.h>
  11. #include <dm.h>
  12. #include <errno.h>
  13. #include <log.h>
  14. #include <malloc.h>
  15. #include <mmc.h>
  16. #include <sdhci.h>
  17. #include <asm/cache.h>
  18. #include <linux/bitops.h>
  19. #include <linux/delay.h>
  20. #include <linux/dma-mapping.h>
  21. #include <phys2bus.h>
  22. #include <power/regulator.h>
  23. static void sdhci_reset(struct sdhci_host *host, u8 mask)
  24. {
  25. unsigned long timeout;
  26. /* Wait max 100 ms */
  27. timeout = 100;
  28. sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
  29. while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
  30. if (timeout == 0) {
  31. printf("%s: Reset 0x%x never completed.\n",
  32. __func__, (int)mask);
  33. return;
  34. }
  35. timeout--;
  36. udelay(1000);
  37. }
  38. }
  39. static void sdhci_cmd_done(struct sdhci_host *host, struct mmc_cmd *cmd)
  40. {
  41. int i;
  42. if (cmd->resp_type & MMC_RSP_136) {
  43. /* CRC is stripped so we need to do some shifting. */
  44. for (i = 0; i < 4; i++) {
  45. cmd->response[i] = sdhci_readl(host,
  46. SDHCI_RESPONSE + (3-i)*4) << 8;
  47. if (i != 3)
  48. cmd->response[i] |= sdhci_readb(host,
  49. SDHCI_RESPONSE + (3-i)*4-1);
  50. }
  51. } else {
  52. cmd->response[0] = sdhci_readl(host, SDHCI_RESPONSE);
  53. }
  54. }
  55. static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data)
  56. {
  57. int i;
  58. char *offs;
  59. for (i = 0; i < data->blocksize; i += 4) {
  60. offs = data->dest + i;
  61. if (data->flags == MMC_DATA_READ)
  62. *(u32 *)offs = sdhci_readl(host, SDHCI_BUFFER);
  63. else
  64. sdhci_writel(host, *(u32 *)offs, SDHCI_BUFFER);
  65. }
  66. }
  67. #if (CONFIG_IS_ENABLED(MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
  68. static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
  69. int *is_aligned, int trans_bytes)
  70. {
  71. dma_addr_t dma_addr;
  72. unsigned char ctrl;
  73. void *buf;
  74. if (data->flags == MMC_DATA_READ)
  75. buf = data->dest;
  76. else
  77. buf = (void *)data->src;
  78. ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  79. ctrl &= ~SDHCI_CTRL_DMA_MASK;
  80. if (host->flags & USE_ADMA64)
  81. ctrl |= SDHCI_CTRL_ADMA64;
  82. else if (host->flags & USE_ADMA)
  83. ctrl |= SDHCI_CTRL_ADMA32;
  84. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  85. if (host->flags & USE_SDMA &&
  86. (host->force_align_buffer ||
  87. (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR &&
  88. ((unsigned long)buf & 0x7) != 0x0))) {
  89. *is_aligned = 0;
  90. if (data->flags != MMC_DATA_READ)
  91. memcpy(host->align_buffer, buf, trans_bytes);
  92. buf = host->align_buffer;
  93. }
  94. host->start_addr = dma_map_single(buf, trans_bytes,
  95. mmc_get_dma_dir(data));
  96. if (host->flags & USE_SDMA) {
  97. dma_addr = dev_phys_to_bus(mmc_to_dev(host->mmc), host->start_addr);
  98. sdhci_writel(host, dma_addr, SDHCI_DMA_ADDRESS);
  99. }
  100. #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
  101. else if (host->flags & (USE_ADMA | USE_ADMA64)) {
  102. sdhci_prepare_adma_table(host->adma_desc_table, data,
  103. host->start_addr);
  104. sdhci_writel(host, lower_32_bits(host->adma_addr),
  105. SDHCI_ADMA_ADDRESS);
  106. if (host->flags & USE_ADMA64)
  107. sdhci_writel(host, upper_32_bits(host->adma_addr),
  108. SDHCI_ADMA_ADDRESS_HI);
  109. }
  110. #endif
  111. }
  112. #else
  113. static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
  114. int *is_aligned, int trans_bytes)
  115. {}
  116. #endif
  117. static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data)
  118. {
  119. dma_addr_t start_addr = host->start_addr;
  120. unsigned int stat, rdy, mask, timeout, block = 0;
  121. bool transfer_done = false;
  122. timeout = 1000000;
  123. rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL;
  124. mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE;
  125. do {
  126. stat = sdhci_readl(host, SDHCI_INT_STATUS);
  127. if (stat & SDHCI_INT_ERROR) {
  128. pr_debug("%s: Error detected in status(0x%X)!\n",
  129. __func__, stat);
  130. return -EIO;
  131. }
  132. if (!transfer_done && (stat & rdy)) {
  133. if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask))
  134. continue;
  135. sdhci_writel(host, rdy, SDHCI_INT_STATUS);
  136. sdhci_transfer_pio(host, data);
  137. data->dest += data->blocksize;
  138. if (++block >= data->blocks) {
  139. /* Keep looping until the SDHCI_INT_DATA_END is
  140. * cleared, even if we finished sending all the
  141. * blocks.
  142. */
  143. transfer_done = true;
  144. continue;
  145. }
  146. }
  147. if ((host->flags & USE_DMA) && !transfer_done &&
  148. (stat & SDHCI_INT_DMA_END)) {
  149. sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS);
  150. if (host->flags & USE_SDMA) {
  151. start_addr &=
  152. ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1);
  153. start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE;
  154. start_addr = dev_phys_to_bus(mmc_to_dev(host->mmc),
  155. start_addr);
  156. sdhci_writel(host, start_addr, SDHCI_DMA_ADDRESS);
  157. }
  158. }
  159. if (timeout-- > 0)
  160. udelay(10);
  161. else {
  162. printf("%s: Transfer data timeout\n", __func__);
  163. return -ETIMEDOUT;
  164. }
  165. } while (!(stat & SDHCI_INT_DATA_END));
  166. #if (CONFIG_IS_ENABLED(MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
  167. dma_unmap_single(host->start_addr, data->blocks * data->blocksize,
  168. mmc_get_dma_dir(data));
  169. #endif
  170. return 0;
  171. }
  172. /*
  173. * No command will be sent by driver if card is busy, so driver must wait
  174. * for card ready state.
  175. * Every time when card is busy after timeout then (last) timeout value will be
  176. * increased twice but only if it doesn't exceed global defined maximum.
  177. * Each function call will use last timeout value.
  178. */
  179. #define SDHCI_CMD_MAX_TIMEOUT 3200
  180. #define SDHCI_CMD_DEFAULT_TIMEOUT 100
  181. #define SDHCI_READ_STATUS_TIMEOUT 1000
  182. #ifdef CONFIG_DM_MMC
  183. static int sdhci_send_command(struct udevice *dev, struct mmc_cmd *cmd,
  184. struct mmc_data *data)
  185. {
  186. struct mmc *mmc = mmc_get_mmc_dev(dev);
  187. #else
  188. static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd,
  189. struct mmc_data *data)
  190. {
  191. #endif
  192. struct sdhci_host *host = mmc->priv;
  193. unsigned int stat = 0;
  194. int ret = 0;
  195. int trans_bytes = 0, is_aligned = 1;
  196. u32 mask, flags, mode = 0;
  197. unsigned int time = 0;
  198. int mmc_dev = mmc_get_blk_desc(mmc)->devnum;
  199. ulong start = get_timer(0);
  200. host->start_addr = 0;
  201. /* Timeout unit - ms */
  202. static unsigned int cmd_timeout = SDHCI_CMD_DEFAULT_TIMEOUT;
  203. mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
  204. /* We shouldn't wait for data inihibit for stop commands, even
  205. though they might use busy signaling */
  206. if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION ||
  207. ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
  208. cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data))
  209. mask &= ~SDHCI_DATA_INHIBIT;
  210. while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
  211. if (time >= cmd_timeout) {
  212. printf("%s: MMC: %d busy ", __func__, mmc_dev);
  213. if (2 * cmd_timeout <= SDHCI_CMD_MAX_TIMEOUT) {
  214. cmd_timeout += cmd_timeout;
  215. printf("timeout increasing to: %u ms.\n",
  216. cmd_timeout);
  217. } else {
  218. puts("timeout.\n");
  219. return -ECOMM;
  220. }
  221. }
  222. time++;
  223. udelay(1000);
  224. }
  225. sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
  226. mask = SDHCI_INT_RESPONSE;
  227. if ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
  228. cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)
  229. mask = SDHCI_INT_DATA_AVAIL;
  230. if (!(cmd->resp_type & MMC_RSP_PRESENT))
  231. flags = SDHCI_CMD_RESP_NONE;
  232. else if (cmd->resp_type & MMC_RSP_136)
  233. flags = SDHCI_CMD_RESP_LONG;
  234. else if (cmd->resp_type & MMC_RSP_BUSY) {
  235. flags = SDHCI_CMD_RESP_SHORT_BUSY;
  236. mask |= SDHCI_INT_DATA_END;
  237. } else
  238. flags = SDHCI_CMD_RESP_SHORT;
  239. if (cmd->resp_type & MMC_RSP_CRC)
  240. flags |= SDHCI_CMD_CRC;
  241. if (cmd->resp_type & MMC_RSP_OPCODE)
  242. flags |= SDHCI_CMD_INDEX;
  243. if (data || cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
  244. cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
  245. flags |= SDHCI_CMD_DATA;
  246. /* Set Transfer mode regarding to data flag */
  247. if (data) {
  248. sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
  249. if (!(host->quirks & SDHCI_QUIRK_SUPPORT_SINGLE))
  250. mode = SDHCI_TRNS_BLK_CNT_EN;
  251. trans_bytes = data->blocks * data->blocksize;
  252. if (data->blocks > 1)
  253. mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_BLK_CNT_EN;
  254. if (data->flags == MMC_DATA_READ)
  255. mode |= SDHCI_TRNS_READ;
  256. if (host->flags & USE_DMA) {
  257. mode |= SDHCI_TRNS_DMA;
  258. sdhci_prepare_dma(host, data, &is_aligned, trans_bytes);
  259. }
  260. sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
  261. data->blocksize),
  262. SDHCI_BLOCK_SIZE);
  263. sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
  264. sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
  265. } else if (cmd->resp_type & MMC_RSP_BUSY) {
  266. sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
  267. }
  268. sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT);
  269. sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND);
  270. start = get_timer(0);
  271. do {
  272. stat = sdhci_readl(host, SDHCI_INT_STATUS);
  273. if (stat & SDHCI_INT_ERROR)
  274. break;
  275. if (get_timer(start) >= SDHCI_READ_STATUS_TIMEOUT) {
  276. if (host->quirks & SDHCI_QUIRK_BROKEN_R1B) {
  277. return 0;
  278. } else {
  279. printf("%s: Timeout for status update!\n",
  280. __func__);
  281. return -ETIMEDOUT;
  282. }
  283. }
  284. } while ((stat & mask) != mask);
  285. if ((stat & (SDHCI_INT_ERROR | mask)) == mask) {
  286. sdhci_cmd_done(host, cmd);
  287. sdhci_writel(host, mask, SDHCI_INT_STATUS);
  288. } else
  289. ret = -1;
  290. if (!ret && data)
  291. ret = sdhci_transfer_data(host, data);
  292. if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD)
  293. udelay(1000);
  294. stat = sdhci_readl(host, SDHCI_INT_STATUS);
  295. sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
  296. if (!ret) {
  297. if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
  298. !is_aligned && (data->flags == MMC_DATA_READ))
  299. memcpy(data->dest, host->align_buffer, trans_bytes);
  300. return 0;
  301. }
  302. sdhci_reset(host, SDHCI_RESET_CMD);
  303. sdhci_reset(host, SDHCI_RESET_DATA);
  304. if (stat & SDHCI_INT_TIMEOUT)
  305. return -ETIMEDOUT;
  306. else
  307. return -ECOMM;
  308. }
  309. #if defined(CONFIG_DM_MMC) && defined(MMC_SUPPORTS_TUNING)
  310. static int sdhci_execute_tuning(struct udevice *dev, uint opcode)
  311. {
  312. int err;
  313. struct mmc *mmc = mmc_get_mmc_dev(dev);
  314. struct sdhci_host *host = mmc->priv;
  315. debug("%s\n", __func__);
  316. if (host->ops && host->ops->platform_execute_tuning) {
  317. err = host->ops->platform_execute_tuning(mmc, opcode);
  318. if (err)
  319. return err;
  320. return 0;
  321. }
  322. return 0;
  323. }
  324. #endif
  325. int sdhci_set_clock(struct mmc *mmc, unsigned int clock)
  326. {
  327. struct sdhci_host *host = mmc->priv;
  328. unsigned int div, clk = 0, timeout;
  329. int ret;
  330. /* Wait max 20 ms */
  331. timeout = 200;
  332. while (sdhci_readl(host, SDHCI_PRESENT_STATE) &
  333. (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) {
  334. if (timeout == 0) {
  335. printf("%s: Timeout to wait cmd & data inhibit\n",
  336. __func__);
  337. return -EBUSY;
  338. }
  339. timeout--;
  340. udelay(100);
  341. }
  342. sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
  343. if (clock == 0)
  344. return 0;
  345. if (host->ops && host->ops->set_delay) {
  346. ret = host->ops->set_delay(host);
  347. if (ret) {
  348. printf("%s: Error while setting tap delay\n", __func__);
  349. return ret;
  350. }
  351. }
  352. if (host->ops && host->ops->config_dll) {
  353. ret = host->ops->config_dll(host, clock, false);
  354. if (ret) {
  355. printf("%s: Error while configuring dll\n", __func__);
  356. return ret;
  357. }
  358. }
  359. if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
  360. /*
  361. * Check if the Host Controller supports Programmable Clock
  362. * Mode.
  363. */
  364. if (host->clk_mul) {
  365. for (div = 1; div <= 1024; div++) {
  366. if ((host->max_clk / div) <= clock)
  367. break;
  368. }
  369. /*
  370. * Set Programmable Clock Mode in the Clock
  371. * Control register.
  372. */
  373. clk = SDHCI_PROG_CLOCK_MODE;
  374. div--;
  375. } else {
  376. /* Version 3.00 divisors must be a multiple of 2. */
  377. if (host->max_clk <= clock) {
  378. div = 1;
  379. } else {
  380. for (div = 2;
  381. div < SDHCI_MAX_DIV_SPEC_300;
  382. div += 2) {
  383. if ((host->max_clk / div) <= clock)
  384. break;
  385. }
  386. }
  387. div >>= 1;
  388. }
  389. } else {
  390. /* Version 2.00 divisors must be a power of 2. */
  391. for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
  392. if ((host->max_clk / div) <= clock)
  393. break;
  394. }
  395. div >>= 1;
  396. }
  397. if (host->ops && host->ops->set_clock)
  398. host->ops->set_clock(host, div);
  399. if (host->ops && host->ops->config_dll) {
  400. ret = host->ops->config_dll(host, clock, true);
  401. if (ret) {
  402. printf("%s: Error while configuring dll\n", __func__);
  403. return ret;
  404. }
  405. }
  406. clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
  407. clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
  408. << SDHCI_DIVIDER_HI_SHIFT;
  409. clk |= SDHCI_CLOCK_INT_EN;
  410. sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  411. /* Wait max 20 ms */
  412. timeout = 20;
  413. while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
  414. & SDHCI_CLOCK_INT_STABLE)) {
  415. if (timeout == 0) {
  416. printf("%s: Internal clock never stabilised.\n",
  417. __func__);
  418. return -EBUSY;
  419. }
  420. timeout--;
  421. udelay(1000);
  422. }
  423. clk |= SDHCI_CLOCK_CARD_EN;
  424. sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  425. return 0;
  426. }
  427. static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
  428. {
  429. u8 pwr = 0;
  430. if (power != (unsigned short)-1) {
  431. switch (1 << power) {
  432. case MMC_VDD_165_195:
  433. pwr = SDHCI_POWER_180;
  434. break;
  435. case MMC_VDD_29_30:
  436. case MMC_VDD_30_31:
  437. pwr = SDHCI_POWER_300;
  438. break;
  439. case MMC_VDD_32_33:
  440. case MMC_VDD_33_34:
  441. pwr = SDHCI_POWER_330;
  442. break;
  443. }
  444. }
  445. if (pwr == 0) {
  446. sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
  447. return;
  448. }
  449. pwr |= SDHCI_POWER_ON;
  450. sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
  451. }
  452. void sdhci_set_uhs_timing(struct sdhci_host *host)
  453. {
  454. struct mmc *mmc = host->mmc;
  455. u32 reg;
  456. reg = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  457. reg &= ~SDHCI_CTRL_UHS_MASK;
  458. switch (mmc->selected_mode) {
  459. case UHS_SDR25:
  460. case MMC_HS:
  461. reg |= SDHCI_CTRL_UHS_SDR25;
  462. break;
  463. case UHS_SDR50:
  464. case MMC_HS_52:
  465. reg |= SDHCI_CTRL_UHS_SDR50;
  466. break;
  467. case UHS_DDR50:
  468. case MMC_DDR_52:
  469. reg |= SDHCI_CTRL_UHS_DDR50;
  470. break;
  471. case UHS_SDR104:
  472. case MMC_HS_200:
  473. reg |= SDHCI_CTRL_UHS_SDR104;
  474. break;
  475. case MMC_HS_400:
  476. case MMC_HS_400_ES:
  477. reg |= SDHCI_CTRL_HS400;
  478. break;
  479. default:
  480. reg |= SDHCI_CTRL_UHS_SDR12;
  481. }
  482. sdhci_writew(host, reg, SDHCI_HOST_CONTROL2);
  483. }
  484. static void sdhci_set_voltage(struct sdhci_host *host)
  485. {
  486. if (IS_ENABLED(CONFIG_MMC_IO_VOLTAGE)) {
  487. struct mmc *mmc = (struct mmc *)host->mmc;
  488. u32 ctrl;
  489. ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  490. switch (mmc->signal_voltage) {
  491. case MMC_SIGNAL_VOLTAGE_330:
  492. #if CONFIG_IS_ENABLED(DM_REGULATOR)
  493. if (mmc->vqmmc_supply) {
  494. if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, false)) {
  495. pr_err("failed to disable vqmmc-supply\n");
  496. return;
  497. }
  498. if (regulator_set_value(mmc->vqmmc_supply, 3300000)) {
  499. pr_err("failed to set vqmmc-voltage to 3.3V\n");
  500. return;
  501. }
  502. if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, true)) {
  503. pr_err("failed to enable vqmmc-supply\n");
  504. return;
  505. }
  506. }
  507. #endif
  508. if (IS_SD(mmc)) {
  509. ctrl &= ~SDHCI_CTRL_VDD_180;
  510. sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
  511. }
  512. /* Wait for 5ms */
  513. mdelay(5);
  514. /* 3.3V regulator output should be stable within 5 ms */
  515. if (IS_SD(mmc)) {
  516. if (ctrl & SDHCI_CTRL_VDD_180) {
  517. pr_err("3.3V regulator output did not become stable\n");
  518. return;
  519. }
  520. }
  521. break;
  522. case MMC_SIGNAL_VOLTAGE_180:
  523. #if CONFIG_IS_ENABLED(DM_REGULATOR)
  524. if (mmc->vqmmc_supply) {
  525. if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, false)) {
  526. pr_err("failed to disable vqmmc-supply\n");
  527. return;
  528. }
  529. if (regulator_set_value(mmc->vqmmc_supply, 1800000)) {
  530. pr_err("failed to set vqmmc-voltage to 1.8V\n");
  531. return;
  532. }
  533. if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, true)) {
  534. pr_err("failed to enable vqmmc-supply\n");
  535. return;
  536. }
  537. }
  538. #endif
  539. if (IS_SD(mmc)) {
  540. ctrl |= SDHCI_CTRL_VDD_180;
  541. sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
  542. }
  543. /* Wait for 5 ms */
  544. mdelay(5);
  545. /* 1.8V regulator output has to be stable within 5 ms */
  546. if (IS_SD(mmc)) {
  547. if (!(ctrl & SDHCI_CTRL_VDD_180)) {
  548. pr_err("1.8V regulator output did not become stable\n");
  549. return;
  550. }
  551. }
  552. break;
  553. default:
  554. /* No signal voltage switch required */
  555. return;
  556. }
  557. }
  558. }
  559. void sdhci_set_control_reg(struct sdhci_host *host)
  560. {
  561. sdhci_set_voltage(host);
  562. sdhci_set_uhs_timing(host);
  563. }
  564. #ifdef CONFIG_DM_MMC
  565. static int sdhci_set_ios(struct udevice *dev)
  566. {
  567. struct mmc *mmc = mmc_get_mmc_dev(dev);
  568. #else
  569. static int sdhci_set_ios(struct mmc *mmc)
  570. {
  571. #endif
  572. u32 ctrl;
  573. struct sdhci_host *host = mmc->priv;
  574. bool no_hispd_bit = false;
  575. if (host->ops && host->ops->set_control_reg)
  576. host->ops->set_control_reg(host);
  577. if (mmc->clock != host->clock)
  578. sdhci_set_clock(mmc, mmc->clock);
  579. if (mmc->clk_disable)
  580. sdhci_set_clock(mmc, 0);
  581. /* Set bus width */
  582. ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  583. if (mmc->bus_width == 8) {
  584. ctrl &= ~SDHCI_CTRL_4BITBUS;
  585. if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
  586. (host->quirks & SDHCI_QUIRK_USE_WIDE8))
  587. ctrl |= SDHCI_CTRL_8BITBUS;
  588. } else {
  589. if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
  590. (host->quirks & SDHCI_QUIRK_USE_WIDE8))
  591. ctrl &= ~SDHCI_CTRL_8BITBUS;
  592. if (mmc->bus_width == 4)
  593. ctrl |= SDHCI_CTRL_4BITBUS;
  594. else
  595. ctrl &= ~SDHCI_CTRL_4BITBUS;
  596. }
  597. if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) ||
  598. (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE)) {
  599. ctrl &= ~SDHCI_CTRL_HISPD;
  600. no_hispd_bit = true;
  601. }
  602. if (!no_hispd_bit) {
  603. if (mmc->selected_mode == MMC_HS ||
  604. mmc->selected_mode == SD_HS ||
  605. mmc->selected_mode == MMC_HS_52 ||
  606. mmc->selected_mode == MMC_DDR_52 ||
  607. mmc->selected_mode == MMC_HS_200 ||
  608. mmc->selected_mode == MMC_HS_400 ||
  609. mmc->selected_mode == MMC_HS_400_ES ||
  610. mmc->selected_mode == UHS_SDR25 ||
  611. mmc->selected_mode == UHS_SDR50 ||
  612. mmc->selected_mode == UHS_SDR104 ||
  613. mmc->selected_mode == UHS_DDR50)
  614. ctrl |= SDHCI_CTRL_HISPD;
  615. else
  616. ctrl &= ~SDHCI_CTRL_HISPD;
  617. }
  618. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  619. /* If available, call the driver specific "post" set_ios() function */
  620. if (host->ops && host->ops->set_ios_post)
  621. return host->ops->set_ios_post(host);
  622. return 0;
  623. }
  624. static int sdhci_init(struct mmc *mmc)
  625. {
  626. struct sdhci_host *host = mmc->priv;
  627. #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_GPIO)
  628. struct udevice *dev = mmc->dev;
  629. gpio_request_by_name(dev, "cd-gpios", 0,
  630. &host->cd_gpio, GPIOD_IS_IN);
  631. #endif
  632. sdhci_reset(host, SDHCI_RESET_ALL);
  633. #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
  634. host->align_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER;
  635. /*
  636. * Always use this bounce-buffer when CONFIG_FIXED_SDHCI_ALIGNED_BUFFER
  637. * is defined.
  638. */
  639. host->force_align_buffer = true;
  640. #else
  641. if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) {
  642. host->align_buffer = memalign(8, 512 * 1024);
  643. if (!host->align_buffer) {
  644. printf("%s: Aligned buffer alloc failed!!!\n",
  645. __func__);
  646. return -ENOMEM;
  647. }
  648. }
  649. #endif
  650. sdhci_set_power(host, fls(mmc->cfg->voltages) - 1);
  651. if (host->ops && host->ops->get_cd)
  652. host->ops->get_cd(host);
  653. /* Enable only interrupts served by the SD controller */
  654. sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK,
  655. SDHCI_INT_ENABLE);
  656. /* Mask all sdhci interrupt sources */
  657. sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE);
  658. return 0;
  659. }
  660. #ifdef CONFIG_DM_MMC
  661. int sdhci_probe(struct udevice *dev)
  662. {
  663. struct mmc *mmc = mmc_get_mmc_dev(dev);
  664. return sdhci_init(mmc);
  665. }
  666. static int sdhci_deferred_probe(struct udevice *dev)
  667. {
  668. int err;
  669. struct mmc *mmc = mmc_get_mmc_dev(dev);
  670. struct sdhci_host *host = mmc->priv;
  671. if (host->ops && host->ops->deferred_probe) {
  672. err = host->ops->deferred_probe(host);
  673. if (err)
  674. return err;
  675. }
  676. return 0;
  677. }
  678. static int sdhci_get_cd(struct udevice *dev)
  679. {
  680. struct mmc *mmc = mmc_get_mmc_dev(dev);
  681. struct sdhci_host *host = mmc->priv;
  682. int value;
  683. /* If nonremovable, assume that the card is always present. */
  684. if (mmc->cfg->host_caps & MMC_CAP_NONREMOVABLE)
  685. return 1;
  686. /* If polling, assume that the card is always present. */
  687. if (mmc->cfg->host_caps & MMC_CAP_NEEDS_POLL)
  688. return 1;
  689. #if CONFIG_IS_ENABLED(DM_GPIO)
  690. value = dm_gpio_get_value(&host->cd_gpio);
  691. if (value >= 0) {
  692. if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
  693. return !value;
  694. else
  695. return value;
  696. }
  697. #endif
  698. value = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
  699. SDHCI_CARD_PRESENT);
  700. if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
  701. return !value;
  702. else
  703. return value;
  704. }
  705. static int sdhci_wait_dat0(struct udevice *dev, int state,
  706. int timeout_us)
  707. {
  708. int tmp;
  709. struct mmc *mmc = mmc_get_mmc_dev(dev);
  710. struct sdhci_host *host = mmc->priv;
  711. unsigned long timeout = timer_get_us() + timeout_us;
  712. // readx_poll_timeout is unsuitable because sdhci_readl accepts
  713. // two arguments
  714. do {
  715. tmp = sdhci_readl(host, SDHCI_PRESENT_STATE);
  716. if (!!(tmp & SDHCI_DATA_0_LVL_MASK) == !!state)
  717. return 0;
  718. } while (!timeout_us || !time_after(timer_get_us(), timeout));
  719. return -ETIMEDOUT;
  720. }
  721. #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
  722. static int sdhci_set_enhanced_strobe(struct udevice *dev)
  723. {
  724. struct mmc *mmc = mmc_get_mmc_dev(dev);
  725. struct sdhci_host *host = mmc->priv;
  726. if (host->ops && host->ops->set_enhanced_strobe)
  727. return host->ops->set_enhanced_strobe(host);
  728. return -ENOTSUPP;
  729. }
  730. #endif
  731. const struct dm_mmc_ops sdhci_ops = {
  732. .send_cmd = sdhci_send_command,
  733. .set_ios = sdhci_set_ios,
  734. .get_cd = sdhci_get_cd,
  735. .deferred_probe = sdhci_deferred_probe,
  736. #ifdef MMC_SUPPORTS_TUNING
  737. .execute_tuning = sdhci_execute_tuning,
  738. #endif
  739. .wait_dat0 = sdhci_wait_dat0,
  740. #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
  741. .set_enhanced_strobe = sdhci_set_enhanced_strobe,
  742. #endif
  743. };
  744. #else
  745. static const struct mmc_ops sdhci_ops = {
  746. .send_cmd = sdhci_send_command,
  747. .set_ios = sdhci_set_ios,
  748. .init = sdhci_init,
  749. };
  750. #endif
  751. int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host,
  752. u32 f_max, u32 f_min)
  753. {
  754. u32 caps, caps_1 = 0;
  755. #if CONFIG_IS_ENABLED(DM_MMC)
  756. u64 dt_caps, dt_caps_mask;
  757. dt_caps_mask = dev_read_u64_default(host->mmc->dev,
  758. "sdhci-caps-mask", 0);
  759. dt_caps = dev_read_u64_default(host->mmc->dev,
  760. "sdhci-caps", 0);
  761. caps = ~lower_32_bits(dt_caps_mask) &
  762. sdhci_readl(host, SDHCI_CAPABILITIES);
  763. caps |= lower_32_bits(dt_caps);
  764. #else
  765. caps = sdhci_readl(host, SDHCI_CAPABILITIES);
  766. #endif
  767. debug("%s, caps: 0x%x\n", __func__, caps);
  768. #if CONFIG_IS_ENABLED(MMC_SDHCI_SDMA)
  769. if ((caps & SDHCI_CAN_DO_SDMA)) {
  770. host->flags |= USE_SDMA;
  771. } else {
  772. debug("%s: Your controller doesn't support SDMA!!\n",
  773. __func__);
  774. }
  775. #endif
  776. #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
  777. if (!(caps & SDHCI_CAN_DO_ADMA2)) {
  778. printf("%s: Your controller doesn't support ADMA!!\n",
  779. __func__);
  780. return -EINVAL;
  781. }
  782. host->adma_desc_table = sdhci_adma_init();
  783. host->adma_addr = (dma_addr_t)host->adma_desc_table;
  784. #ifdef CONFIG_DMA_ADDR_T_64BIT
  785. host->flags |= USE_ADMA64;
  786. #else
  787. host->flags |= USE_ADMA;
  788. #endif
  789. #endif
  790. if (host->quirks & SDHCI_QUIRK_REG32_RW)
  791. host->version =
  792. sdhci_readl(host, SDHCI_HOST_VERSION - 2) >> 16;
  793. else
  794. host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
  795. cfg->name = host->name;
  796. #ifndef CONFIG_DM_MMC
  797. cfg->ops = &sdhci_ops;
  798. #endif
  799. /* Check whether the clock multiplier is supported or not */
  800. if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
  801. #if CONFIG_IS_ENABLED(DM_MMC)
  802. caps_1 = ~upper_32_bits(dt_caps_mask) &
  803. sdhci_readl(host, SDHCI_CAPABILITIES_1);
  804. caps_1 |= upper_32_bits(dt_caps);
  805. #else
  806. caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
  807. #endif
  808. debug("%s, caps_1: 0x%x\n", __func__, caps_1);
  809. host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >>
  810. SDHCI_CLOCK_MUL_SHIFT;
  811. }
  812. if (host->max_clk == 0) {
  813. if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
  814. host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >>
  815. SDHCI_CLOCK_BASE_SHIFT;
  816. else
  817. host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >>
  818. SDHCI_CLOCK_BASE_SHIFT;
  819. host->max_clk *= 1000000;
  820. if (host->clk_mul)
  821. host->max_clk *= host->clk_mul;
  822. }
  823. if (host->max_clk == 0) {
  824. printf("%s: Hardware doesn't specify base clock frequency\n",
  825. __func__);
  826. return -EINVAL;
  827. }
  828. if (f_max && (f_max < host->max_clk))
  829. cfg->f_max = f_max;
  830. else
  831. cfg->f_max = host->max_clk;
  832. if (f_min)
  833. cfg->f_min = f_min;
  834. else {
  835. if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
  836. cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300;
  837. else
  838. cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200;
  839. }
  840. cfg->voltages = 0;
  841. if (caps & SDHCI_CAN_VDD_330)
  842. cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34;
  843. if (caps & SDHCI_CAN_VDD_300)
  844. cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31;
  845. if (caps & SDHCI_CAN_VDD_180)
  846. cfg->voltages |= MMC_VDD_165_195;
  847. if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE)
  848. cfg->voltages |= host->voltages;
  849. if (caps & SDHCI_CAN_DO_HISPD)
  850. cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
  851. cfg->host_caps |= MMC_MODE_4BIT;
  852. /* Since Host Controller Version3.0 */
  853. if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
  854. if (!(caps & SDHCI_CAN_DO_8BIT))
  855. cfg->host_caps &= ~MMC_MODE_8BIT;
  856. }
  857. if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) {
  858. cfg->host_caps &= ~MMC_MODE_HS;
  859. cfg->host_caps &= ~MMC_MODE_HS_52MHz;
  860. }
  861. if (!(cfg->voltages & MMC_VDD_165_195) ||
  862. (host->quirks & SDHCI_QUIRK_NO_1_8_V))
  863. caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
  864. SDHCI_SUPPORT_DDR50);
  865. if (caps_1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
  866. SDHCI_SUPPORT_DDR50))
  867. cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25);
  868. if (caps_1 & SDHCI_SUPPORT_SDR104) {
  869. cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50);
  870. /*
  871. * SD3.0: SDR104 is supported so (for eMMC) the caps2
  872. * field can be promoted to support HS200.
  873. */
  874. cfg->host_caps |= MMC_CAP(MMC_HS_200);
  875. } else if (caps_1 & SDHCI_SUPPORT_SDR50) {
  876. cfg->host_caps |= MMC_CAP(UHS_SDR50);
  877. }
  878. if ((host->quirks & SDHCI_QUIRK_CAPS_BIT63_FOR_HS400) &&
  879. (caps_1 & SDHCI_SUPPORT_HS400))
  880. cfg->host_caps |= MMC_CAP(MMC_HS_400);
  881. if (caps_1 & SDHCI_SUPPORT_DDR50)
  882. cfg->host_caps |= MMC_CAP(UHS_DDR50);
  883. if (host->host_caps)
  884. cfg->host_caps |= host->host_caps;
  885. cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
  886. return 0;
  887. }
  888. #ifdef CONFIG_BLK
  889. int sdhci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
  890. {
  891. return mmc_bind(dev, mmc, cfg);
  892. }
  893. #else
  894. int add_sdhci(struct sdhci_host *host, u32 f_max, u32 f_min)
  895. {
  896. int ret;
  897. ret = sdhci_setup_cfg(&host->cfg, host, f_max, f_min);
  898. if (ret)
  899. return ret;
  900. host->mmc = mmc_create(&host->cfg, host);
  901. if (host->mmc == NULL) {
  902. printf("%s: mmc create fail!\n", __func__);
  903. return -ENOMEM;
  904. }
  905. return 0;
  906. }
  907. #endif