mmc_ops.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055
  1. /*
  2. * linux/drivers/mmc/core/mmc_ops.h
  3. *
  4. * Copyright 2006-2007 Pierre Ossman
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or (at
  9. * your option) any later version.
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/export.h>
  13. #include <linux/types.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/mmc/host.h>
  16. #include <linux/mmc/card.h>
  17. #include <linux/mmc/mmc.h>
  18. #include "core.h"
  19. #include "card.h"
  20. #include "host.h"
  21. #include "mmc_ops.h"
  22. #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
  23. static const u8 tuning_blk_pattern_4bit[] = {
  24. 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  25. 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  26. 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  27. 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  28. 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  29. 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  30. 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  31. 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  32. };
  33. static const u8 tuning_blk_pattern_8bit[] = {
  34. 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  35. 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  36. 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  37. 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  38. 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  39. 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  40. 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  41. 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  42. 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  43. 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  44. 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  45. 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  46. 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  47. 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  48. 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  49. 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  50. };
  51. int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
  52. {
  53. int err;
  54. struct mmc_command cmd = {};
  55. cmd.opcode = MMC_SEND_STATUS;
  56. if (!mmc_host_is_spi(card->host))
  57. cmd.arg = card->rca << 16;
  58. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  59. err = mmc_wait_for_cmd(card->host, &cmd, retries);
  60. if (err)
  61. return err;
  62. /* NOTE: callers are required to understand the difference
  63. * between "native" and SPI format status words!
  64. */
  65. if (status)
  66. *status = cmd.resp[0];
  67. return 0;
  68. }
  69. EXPORT_SYMBOL_GPL(__mmc_send_status);
  70. int mmc_send_status(struct mmc_card *card, u32 *status)
  71. {
  72. return __mmc_send_status(card, status, MMC_CMD_RETRIES);
  73. }
  74. EXPORT_SYMBOL_GPL(mmc_send_status);
  75. static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  76. {
  77. struct mmc_command cmd = {};
  78. cmd.opcode = MMC_SELECT_CARD;
  79. if (card) {
  80. cmd.arg = card->rca << 16;
  81. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  82. } else {
  83. cmd.arg = 0;
  84. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  85. }
  86. return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  87. }
  88. int mmc_select_card(struct mmc_card *card)
  89. {
  90. return _mmc_select_card(card->host, card);
  91. }
  92. int mmc_deselect_cards(struct mmc_host *host)
  93. {
  94. return _mmc_select_card(host, NULL);
  95. }
  96. /*
  97. * Write the value specified in the device tree or board code into the optional
  98. * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
  99. * drive strength of the DAT and CMD outputs. The actual meaning of a given
  100. * value is hardware dependant.
  101. * The presence of the DSR register can be determined from the CSD register,
  102. * bit 76.
  103. */
  104. int mmc_set_dsr(struct mmc_host *host)
  105. {
  106. struct mmc_command cmd = {};
  107. cmd.opcode = MMC_SET_DSR;
  108. cmd.arg = (host->dsr << 16) | 0xffff;
  109. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  110. return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  111. }
  112. int mmc_go_idle(struct mmc_host *host)
  113. {
  114. int err;
  115. struct mmc_command cmd = {};
  116. /*
  117. * Non-SPI hosts need to prevent chipselect going active during
  118. * GO_IDLE; that would put chips into SPI mode. Remind them of
  119. * that in case of hardware that won't pull up DAT3/nCS otherwise.
  120. *
  121. * SPI hosts ignore ios.chip_select; it's managed according to
  122. * rules that must accommodate non-MMC slaves which this layer
  123. * won't even know about.
  124. */
  125. if (!mmc_host_is_spi(host)) {
  126. mmc_set_chip_select(host, MMC_CS_HIGH);
  127. mmc_delay(1);
  128. }
  129. cmd.opcode = MMC_GO_IDLE_STATE;
  130. cmd.arg = 0;
  131. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
  132. err = mmc_wait_for_cmd(host, &cmd, 0);
  133. mmc_delay(1);
  134. if (!mmc_host_is_spi(host)) {
  135. mmc_set_chip_select(host, MMC_CS_DONTCARE);
  136. mmc_delay(1);
  137. }
  138. host->use_spi_crc = 0;
  139. return err;
  140. }
  141. int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
  142. {
  143. struct mmc_command cmd = {};
  144. int i, err = 0;
  145. cmd.opcode = MMC_SEND_OP_COND;
  146. cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
  147. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
  148. for (i = 100; i; i--) {
  149. err = mmc_wait_for_cmd(host, &cmd, 0);
  150. if (err)
  151. break;
  152. /* if we're just probing, do a single pass */
  153. if (ocr == 0)
  154. break;
  155. /* otherwise wait until reset completes */
  156. if (mmc_host_is_spi(host)) {
  157. if (!(cmd.resp[0] & R1_SPI_IDLE))
  158. break;
  159. } else {
  160. if (cmd.resp[0] & MMC_CARD_BUSY)
  161. break;
  162. }
  163. err = -ETIMEDOUT;
  164. mmc_delay(10);
  165. }
  166. if (rocr && !mmc_host_is_spi(host))
  167. *rocr = cmd.resp[0];
  168. return err;
  169. }
  170. int mmc_set_relative_addr(struct mmc_card *card)
  171. {
  172. struct mmc_command cmd = {};
  173. cmd.opcode = MMC_SET_RELATIVE_ADDR;
  174. cmd.arg = card->rca << 16;
  175. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  176. return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  177. }
  178. static int
  179. mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
  180. {
  181. int err;
  182. struct mmc_command cmd = {};
  183. cmd.opcode = opcode;
  184. cmd.arg = arg;
  185. cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
  186. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  187. if (err)
  188. return err;
  189. memcpy(cxd, cmd.resp, sizeof(u32) * 4);
  190. return 0;
  191. }
  192. /*
  193. * NOTE: void *buf, caller for the buf is required to use DMA-capable
  194. * buffer or on-stack buffer (with some overhead in callee).
  195. */
  196. static int
  197. mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
  198. u32 opcode, void *buf, unsigned len)
  199. {
  200. struct mmc_request mrq = {};
  201. struct mmc_command cmd = {};
  202. struct mmc_data data = {};
  203. struct scatterlist sg;
  204. mrq.cmd = &cmd;
  205. mrq.data = &data;
  206. cmd.opcode = opcode;
  207. cmd.arg = 0;
  208. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  209. * rely on callers to never use this with "native" calls for reading
  210. * CSD or CID. Native versions of those commands use the R2 type,
  211. * not R1 plus a data block.
  212. */
  213. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  214. data.blksz = len;
  215. data.blocks = 1;
  216. data.flags = MMC_DATA_READ;
  217. data.sg = &sg;
  218. data.sg_len = 1;
  219. sg_init_one(&sg, buf, len);
  220. if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
  221. /*
  222. * The spec states that CSR and CID accesses have a timeout
  223. * of 64 clock cycles.
  224. */
  225. data.timeout_ns = 0;
  226. data.timeout_clks = 64;
  227. } else
  228. mmc_set_data_timeout(&data, card);
  229. mmc_wait_for_req(host, &mrq);
  230. if (cmd.error)
  231. return cmd.error;
  232. if (data.error)
  233. return data.error;
  234. return 0;
  235. }
  236. static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
  237. {
  238. int ret, i;
  239. __be32 *csd_tmp;
  240. csd_tmp = kzalloc(16, GFP_KERNEL);
  241. if (!csd_tmp)
  242. return -ENOMEM;
  243. ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
  244. if (ret)
  245. goto err;
  246. for (i = 0; i < 4; i++)
  247. csd[i] = be32_to_cpu(csd_tmp[i]);
  248. err:
  249. kfree(csd_tmp);
  250. return ret;
  251. }
  252. int mmc_send_csd(struct mmc_card *card, u32 *csd)
  253. {
  254. if (mmc_host_is_spi(card->host))
  255. return mmc_spi_send_csd(card, csd);
  256. return mmc_send_cxd_native(card->host, card->rca << 16, csd,
  257. MMC_SEND_CSD);
  258. }
  259. static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
  260. {
  261. int ret, i;
  262. __be32 *cid_tmp;
  263. cid_tmp = kzalloc(16, GFP_KERNEL);
  264. if (!cid_tmp)
  265. return -ENOMEM;
  266. ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
  267. if (ret)
  268. goto err;
  269. for (i = 0; i < 4; i++)
  270. cid[i] = be32_to_cpu(cid_tmp[i]);
  271. err:
  272. kfree(cid_tmp);
  273. return ret;
  274. }
  275. int mmc_send_cid(struct mmc_host *host, u32 *cid)
  276. {
  277. if (mmc_host_is_spi(host))
  278. return mmc_spi_send_cid(host, cid);
  279. return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
  280. }
  281. int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
  282. {
  283. int err;
  284. u8 *ext_csd;
  285. if (!card || !new_ext_csd)
  286. return -EINVAL;
  287. if (!mmc_can_ext_csd(card))
  288. return -EOPNOTSUPP;
  289. /*
  290. * As the ext_csd is so large and mostly unused, we don't store the
  291. * raw block in mmc_card.
  292. */
  293. ext_csd = kzalloc(512, GFP_KERNEL);
  294. if (!ext_csd)
  295. return -ENOMEM;
  296. err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
  297. 512);
  298. if (err)
  299. kfree(ext_csd);
  300. else
  301. *new_ext_csd = ext_csd;
  302. return err;
  303. }
  304. EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
  305. int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
  306. {
  307. struct mmc_command cmd = {};
  308. int err;
  309. cmd.opcode = MMC_SPI_READ_OCR;
  310. cmd.arg = highcap ? (1 << 30) : 0;
  311. cmd.flags = MMC_RSP_SPI_R3;
  312. err = mmc_wait_for_cmd(host, &cmd, 0);
  313. *ocrp = cmd.resp[1];
  314. return err;
  315. }
  316. int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
  317. {
  318. struct mmc_command cmd = {};
  319. int err;
  320. cmd.opcode = MMC_SPI_CRC_ON_OFF;
  321. cmd.flags = MMC_RSP_SPI_R1;
  322. cmd.arg = use_crc;
  323. err = mmc_wait_for_cmd(host, &cmd, 0);
  324. if (!err)
  325. host->use_spi_crc = use_crc;
  326. return err;
  327. }
  328. static int mmc_switch_status_error(struct mmc_host *host, u32 status)
  329. {
  330. if (mmc_host_is_spi(host)) {
  331. if (status & R1_SPI_ILLEGAL_COMMAND)
  332. return -EBADMSG;
  333. } else {
  334. if (R1_STATUS(status))
  335. pr_warn("%s: unexpected status %#x after switch\n",
  336. mmc_hostname(host), status);
  337. if (status & R1_SWITCH_ERROR)
  338. return -EBADMSG;
  339. }
  340. return 0;
  341. }
  342. /* Caller must hold re-tuning */
  343. int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
  344. {
  345. u32 status;
  346. int err;
  347. err = mmc_send_status(card, &status);
  348. if (!crc_err_fatal && err == -EILSEQ)
  349. return 0;
  350. if (err)
  351. return err;
  352. return mmc_switch_status_error(card->host, status);
  353. }
  354. int mmc_switch_status(struct mmc_card *card)
  355. {
  356. return __mmc_switch_status(card, true);
  357. }
  358. static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
  359. bool send_status, bool retry_crc_err)
  360. {
  361. struct mmc_host *host = card->host;
  362. int err;
  363. unsigned long timeout;
  364. u32 status = 0;
  365. bool expired = false;
  366. bool busy = false;
  367. /* We have an unspecified cmd timeout, use the fallback value. */
  368. if (!timeout_ms)
  369. timeout_ms = MMC_OPS_TIMEOUT_MS;
  370. /*
  371. * In cases when not allowed to poll by using CMD13 or because we aren't
  372. * capable of polling by using ->card_busy(), then rely on waiting the
  373. * stated timeout to be sufficient.
  374. */
  375. if (!send_status && !host->ops->card_busy) {
  376. mmc_delay(timeout_ms);
  377. return 0;
  378. }
  379. timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
  380. do {
  381. /*
  382. * Due to the possibility of being preempted while polling,
  383. * check the expiration time first.
  384. */
  385. expired = time_after(jiffies, timeout);
  386. if (host->ops->card_busy) {
  387. busy = host->ops->card_busy(host);
  388. } else {
  389. err = mmc_send_status(card, &status);
  390. if (retry_crc_err && err == -EILSEQ) {
  391. busy = true;
  392. } else if (err) {
  393. return err;
  394. } else {
  395. err = mmc_switch_status_error(host, status);
  396. if (err)
  397. return err;
  398. busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
  399. }
  400. }
  401. /* Timeout if the device still remains busy. */
  402. if (expired && busy) {
  403. pr_err("%s: Card stuck being busy! %s\n",
  404. mmc_hostname(host), __func__);
  405. return -ETIMEDOUT;
  406. }
  407. } while (busy);
  408. return 0;
  409. }
  410. /**
  411. * __mmc_switch - modify EXT_CSD register
  412. * @card: the MMC card associated with the data transfer
  413. * @set: cmd set values
  414. * @index: EXT_CSD register index
  415. * @value: value to program into EXT_CSD register
  416. * @timeout_ms: timeout (ms) for operation performed by register write,
  417. * timeout of zero implies maximum possible timeout
  418. * @timing: new timing to change to
  419. * @use_busy_signal: use the busy signal as response type
  420. * @send_status: send status cmd to poll for busy
  421. * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
  422. *
  423. * Modifies the EXT_CSD register for selected card.
  424. */
  425. int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  426. unsigned int timeout_ms, unsigned char timing,
  427. bool use_busy_signal, bool send_status, bool retry_crc_err)
  428. {
  429. struct mmc_host *host = card->host;
  430. int err;
  431. struct mmc_command cmd = {};
  432. bool use_r1b_resp = use_busy_signal;
  433. unsigned char old_timing = host->ios.timing;
  434. mmc_retune_hold(host);
  435. /*
  436. * If the cmd timeout and the max_busy_timeout of the host are both
  437. * specified, let's validate them. A failure means we need to prevent
  438. * the host from doing hw busy detection, which is done by converting
  439. * to a R1 response instead of a R1B. Note, some hosts requires R1B,
  440. * which also means they are on their own when it comes to deal with the
  441. * busy timeout.
  442. */
  443. if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && timeout_ms &&
  444. host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
  445. use_r1b_resp = false;
  446. cmd.opcode = MMC_SWITCH;
  447. cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
  448. (index << 16) |
  449. (value << 8) |
  450. set;
  451. cmd.flags = MMC_CMD_AC;
  452. if (use_r1b_resp) {
  453. cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
  454. /*
  455. * A busy_timeout of zero means the host can decide to use
  456. * whatever value it finds suitable.
  457. */
  458. cmd.busy_timeout = timeout_ms;
  459. } else {
  460. cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
  461. }
  462. if (index == EXT_CSD_SANITIZE_START)
  463. cmd.sanitize_busy = true;
  464. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  465. if (err)
  466. goto out;
  467. /* No need to check card status in case of unblocking command */
  468. if (!use_busy_signal)
  469. goto out;
  470. /*If SPI or used HW busy detection above, then we don't need to poll. */
  471. if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
  472. mmc_host_is_spi(host))
  473. goto out_tim;
  474. /* Let's try to poll to find out when the command is completed. */
  475. err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
  476. if (err)
  477. goto out;
  478. out_tim:
  479. /* Switch to new timing before check switch status. */
  480. if (timing)
  481. mmc_set_timing(host, timing);
  482. if (send_status) {
  483. err = mmc_switch_status(card);
  484. if (err && timing)
  485. mmc_set_timing(host, old_timing);
  486. }
  487. out:
  488. mmc_retune_release(host);
  489. return err;
  490. }
  491. int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  492. unsigned int timeout_ms)
  493. {
  494. return __mmc_switch(card, set, index, value, timeout_ms, 0,
  495. true, true, false);
  496. }
  497. EXPORT_SYMBOL_GPL(mmc_switch);
  498. int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
  499. {
  500. struct mmc_request mrq = {};
  501. struct mmc_command cmd = {};
  502. struct mmc_data data = {};
  503. struct scatterlist sg;
  504. struct mmc_ios *ios = &host->ios;
  505. const u8 *tuning_block_pattern;
  506. int size, err = 0;
  507. u8 *data_buf;
  508. if (ios->bus_width == MMC_BUS_WIDTH_8) {
  509. tuning_block_pattern = tuning_blk_pattern_8bit;
  510. size = sizeof(tuning_blk_pattern_8bit);
  511. } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
  512. tuning_block_pattern = tuning_blk_pattern_4bit;
  513. size = sizeof(tuning_blk_pattern_4bit);
  514. } else
  515. return -EINVAL;
  516. data_buf = kzalloc(size, GFP_KERNEL);
  517. if (!data_buf)
  518. return -ENOMEM;
  519. mrq.cmd = &cmd;
  520. mrq.data = &data;
  521. cmd.opcode = opcode;
  522. cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
  523. data.blksz = size;
  524. data.blocks = 1;
  525. data.flags = MMC_DATA_READ;
  526. /*
  527. * According to the tuning specs, Tuning process
  528. * is normally shorter 40 executions of CMD19,
  529. * and timeout value should be shorter than 150 ms
  530. */
  531. data.timeout_ns = 150 * NSEC_PER_MSEC;
  532. data.sg = &sg;
  533. data.sg_len = 1;
  534. sg_init_one(&sg, data_buf, size);
  535. mmc_wait_for_req(host, &mrq);
  536. if (cmd_error)
  537. *cmd_error = cmd.error;
  538. if (cmd.error) {
  539. err = cmd.error;
  540. goto out;
  541. }
  542. if (data.error) {
  543. err = data.error;
  544. goto out;
  545. }
  546. if (memcmp(data_buf, tuning_block_pattern, size))
  547. err = -EIO;
  548. out:
  549. kfree(data_buf);
  550. return err;
  551. }
  552. EXPORT_SYMBOL_GPL(mmc_send_tuning);
  553. int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
  554. {
  555. struct mmc_command cmd = {};
  556. /*
  557. * eMMC specification specifies that CMD12 can be used to stop a tuning
  558. * command, but SD specification does not, so do nothing unless it is
  559. * eMMC.
  560. */
  561. if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
  562. return 0;
  563. cmd.opcode = MMC_STOP_TRANSMISSION;
  564. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  565. /*
  566. * For drivers that override R1 to R1b, set an arbitrary timeout based
  567. * on the tuning timeout i.e. 150ms.
  568. */
  569. cmd.busy_timeout = 150;
  570. return mmc_wait_for_cmd(host, &cmd, 0);
  571. }
  572. EXPORT_SYMBOL_GPL(mmc_abort_tuning);
  573. static int
  574. mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
  575. u8 len)
  576. {
  577. struct mmc_request mrq = {};
  578. struct mmc_command cmd = {};
  579. struct mmc_data data = {};
  580. struct scatterlist sg;
  581. u8 *data_buf;
  582. u8 *test_buf;
  583. int i, err;
  584. static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
  585. static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
  586. /* dma onto stack is unsafe/nonportable, but callers to this
  587. * routine normally provide temporary on-stack buffers ...
  588. */
  589. data_buf = kmalloc(len, GFP_KERNEL);
  590. if (!data_buf)
  591. return -ENOMEM;
  592. if (len == 8)
  593. test_buf = testdata_8bit;
  594. else if (len == 4)
  595. test_buf = testdata_4bit;
  596. else {
  597. pr_err("%s: Invalid bus_width %d\n",
  598. mmc_hostname(host), len);
  599. kfree(data_buf);
  600. return -EINVAL;
  601. }
  602. if (opcode == MMC_BUS_TEST_W)
  603. memcpy(data_buf, test_buf, len);
  604. mrq.cmd = &cmd;
  605. mrq.data = &data;
  606. cmd.opcode = opcode;
  607. cmd.arg = 0;
  608. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  609. * rely on callers to never use this with "native" calls for reading
  610. * CSD or CID. Native versions of those commands use the R2 type,
  611. * not R1 plus a data block.
  612. */
  613. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  614. data.blksz = len;
  615. data.blocks = 1;
  616. if (opcode == MMC_BUS_TEST_R)
  617. data.flags = MMC_DATA_READ;
  618. else
  619. data.flags = MMC_DATA_WRITE;
  620. data.sg = &sg;
  621. data.sg_len = 1;
  622. mmc_set_data_timeout(&data, card);
  623. sg_init_one(&sg, data_buf, len);
  624. mmc_wait_for_req(host, &mrq);
  625. err = 0;
  626. if (opcode == MMC_BUS_TEST_R) {
  627. for (i = 0; i < len / 4; i++)
  628. if ((test_buf[i] ^ data_buf[i]) != 0xff) {
  629. err = -EIO;
  630. break;
  631. }
  632. }
  633. kfree(data_buf);
  634. if (cmd.error)
  635. return cmd.error;
  636. if (data.error)
  637. return data.error;
  638. return err;
  639. }
  640. int mmc_bus_test(struct mmc_card *card, u8 bus_width)
  641. {
  642. int width;
  643. if (bus_width == MMC_BUS_WIDTH_8)
  644. width = 8;
  645. else if (bus_width == MMC_BUS_WIDTH_4)
  646. width = 4;
  647. else if (bus_width == MMC_BUS_WIDTH_1)
  648. return 0; /* no need for test */
  649. else
  650. return -EINVAL;
  651. /*
  652. * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
  653. * is a problem. This improves chances that the test will work.
  654. */
  655. mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
  656. return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
  657. }
  658. static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
  659. {
  660. struct mmc_command cmd = {};
  661. unsigned int opcode;
  662. int err;
  663. if (!card->ext_csd.hpi) {
  664. pr_warn("%s: Card didn't support HPI command\n",
  665. mmc_hostname(card->host));
  666. return -EINVAL;
  667. }
  668. opcode = card->ext_csd.hpi_cmd;
  669. if (opcode == MMC_STOP_TRANSMISSION)
  670. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  671. else if (opcode == MMC_SEND_STATUS)
  672. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  673. cmd.opcode = opcode;
  674. cmd.arg = card->rca << 16 | 1;
  675. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  676. if (err) {
  677. pr_warn("%s: error %d interrupting operation. "
  678. "HPI command response %#x\n", mmc_hostname(card->host),
  679. err, cmd.resp[0]);
  680. return err;
  681. }
  682. if (status)
  683. *status = cmd.resp[0];
  684. return 0;
  685. }
  686. /**
  687. * mmc_interrupt_hpi - Issue for High priority Interrupt
  688. * @card: the MMC card associated with the HPI transfer
  689. *
  690. * Issued High Priority Interrupt, and check for card status
  691. * until out-of prg-state.
  692. */
  693. int mmc_interrupt_hpi(struct mmc_card *card)
  694. {
  695. int err;
  696. u32 status;
  697. unsigned long prg_wait;
  698. if (!card->ext_csd.hpi_en) {
  699. pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
  700. return 1;
  701. }
  702. err = mmc_send_status(card, &status);
  703. if (err) {
  704. pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
  705. goto out;
  706. }
  707. switch (R1_CURRENT_STATE(status)) {
  708. case R1_STATE_IDLE:
  709. case R1_STATE_READY:
  710. case R1_STATE_STBY:
  711. case R1_STATE_TRAN:
  712. /*
  713. * In idle and transfer states, HPI is not needed and the caller
  714. * can issue the next intended command immediately
  715. */
  716. goto out;
  717. case R1_STATE_PRG:
  718. break;
  719. default:
  720. /* In all other states, it's illegal to issue HPI */
  721. pr_debug("%s: HPI cannot be sent. Card state=%d\n",
  722. mmc_hostname(card->host), R1_CURRENT_STATE(status));
  723. err = -EINVAL;
  724. goto out;
  725. }
  726. err = mmc_send_hpi_cmd(card, &status);
  727. if (err)
  728. goto out;
  729. prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
  730. do {
  731. err = mmc_send_status(card, &status);
  732. if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
  733. break;
  734. if (time_after(jiffies, prg_wait))
  735. err = -ETIMEDOUT;
  736. } while (!err);
  737. out:
  738. return err;
  739. }
  740. int mmc_can_ext_csd(struct mmc_card *card)
  741. {
  742. return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
  743. }
  744. /**
  745. * mmc_stop_bkops - stop ongoing BKOPS
  746. * @card: MMC card to check BKOPS
  747. *
  748. * Send HPI command to stop ongoing background operations to
  749. * allow rapid servicing of foreground operations, e.g. read/
  750. * writes. Wait until the card comes out of the programming state
  751. * to avoid errors in servicing read/write requests.
  752. */
  753. int mmc_stop_bkops(struct mmc_card *card)
  754. {
  755. int err = 0;
  756. err = mmc_interrupt_hpi(card);
  757. /*
  758. * If err is EINVAL, we can't issue an HPI.
  759. * It should complete the BKOPS.
  760. */
  761. if (!err || (err == -EINVAL)) {
  762. mmc_card_clr_doing_bkops(card);
  763. mmc_retune_release(card->host);
  764. err = 0;
  765. }
  766. return err;
  767. }
  768. static int mmc_read_bkops_status(struct mmc_card *card)
  769. {
  770. int err;
  771. u8 *ext_csd;
  772. err = mmc_get_ext_csd(card, &ext_csd);
  773. if (err)
  774. return err;
  775. card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
  776. card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
  777. kfree(ext_csd);
  778. return 0;
  779. }
  780. /**
  781. * mmc_start_bkops - start BKOPS for supported cards
  782. * @card: MMC card to start BKOPS
  783. * @from_exception: A flag to indicate if this function was
  784. * called due to an exception raised by the card
  785. *
  786. * Start background operations whenever requested.
  787. * When the urgent BKOPS bit is set in a R1 command response
  788. * then background operations should be started immediately.
  789. */
  790. void mmc_start_bkops(struct mmc_card *card, bool from_exception)
  791. {
  792. int err;
  793. int timeout;
  794. bool use_busy_signal;
  795. if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
  796. return;
  797. err = mmc_read_bkops_status(card);
  798. if (err) {
  799. pr_err("%s: Failed to read bkops status: %d\n",
  800. mmc_hostname(card->host), err);
  801. return;
  802. }
  803. if (!card->ext_csd.raw_bkops_status)
  804. return;
  805. if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
  806. from_exception)
  807. return;
  808. if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
  809. timeout = MMC_OPS_TIMEOUT_MS;
  810. use_busy_signal = true;
  811. } else {
  812. timeout = 0;
  813. use_busy_signal = false;
  814. }
  815. mmc_retune_hold(card->host);
  816. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  817. EXT_CSD_BKOPS_START, 1, timeout, 0,
  818. use_busy_signal, true, false);
  819. if (err) {
  820. pr_warn("%s: Error %d starting bkops\n",
  821. mmc_hostname(card->host), err);
  822. mmc_retune_release(card->host);
  823. return;
  824. }
  825. /*
  826. * For urgent bkops status (LEVEL_2 and more)
  827. * bkops executed synchronously, otherwise
  828. * the operation is in progress
  829. */
  830. if (!use_busy_signal)
  831. mmc_card_set_doing_bkops(card);
  832. else
  833. mmc_retune_release(card->host);
  834. }
  835. EXPORT_SYMBOL(mmc_start_bkops);
  836. /*
  837. * Flush the cache to the non-volatile storage.
  838. */
  839. int mmc_flush_cache(struct mmc_card *card)
  840. {
  841. int err = 0;
  842. if (mmc_cache_enabled(card->host)) {
  843. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  844. EXT_CSD_FLUSH_CACHE, 1, 0);
  845. if (err)
  846. pr_err("%s: cache flush error %d\n",
  847. mmc_hostname(card->host), err);
  848. }
  849. return err;
  850. }
  851. EXPORT_SYMBOL(mmc_flush_cache);
  852. static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
  853. {
  854. u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
  855. int err;
  856. if (!card->ext_csd.cmdq_support)
  857. return -EOPNOTSUPP;
  858. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
  859. val, card->ext_csd.generic_cmd6_time);
  860. if (!err)
  861. card->ext_csd.cmdq_en = enable;
  862. return err;
  863. }
  864. int mmc_cmdq_enable(struct mmc_card *card)
  865. {
  866. return mmc_cmdq_switch(card, true);
  867. }
  868. EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
  869. int mmc_cmdq_disable(struct mmc_card *card)
  870. {
  871. return mmc_cmdq_switch(card, false);
  872. }
  873. EXPORT_SYMBOL_GPL(mmc_cmdq_disable);