mmc_ops.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * linux/drivers/mmc/core/mmc_ops.h
  4. *
  5. * Copyright 2006-2007 Pierre Ossman
  6. */
  7. #include <linux/slab.h>
  8. #include <linux/export.h>
  9. #include <linux/types.h>
  10. #include <linux/scatterlist.h>
  11. #include <linux/mmc/host.h>
  12. #include <linux/mmc/card.h>
  13. #include <linux/mmc/mmc.h>
  14. #include "core.h"
  15. #include "card.h"
  16. #include "host.h"
  17. #include "mmc_ops.h"
  18. #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
  19. #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
  20. #define MMC_OP_COND_PERIOD_US (4 * 1000) /* 4ms */
  21. #define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */
  22. static const u8 tuning_blk_pattern_4bit[] = {
  23. 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  24. 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  25. 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  26. 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  27. 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  28. 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  29. 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  30. 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  31. };
  32. static const u8 tuning_blk_pattern_8bit[] = {
  33. 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  34. 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  35. 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  36. 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  37. 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  38. 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  39. 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  40. 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  41. 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  42. 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  43. 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  44. 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  45. 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  46. 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  47. 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  48. 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  49. };
  50. struct mmc_busy_data {
  51. struct mmc_card *card;
  52. bool retry_crc_err;
  53. enum mmc_busy_cmd busy_cmd;
  54. };
  55. struct mmc_op_cond_busy_data {
  56. struct mmc_host *host;
  57. u32 ocr;
  58. struct mmc_command *cmd;
  59. };
  60. int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
  61. {
  62. int err;
  63. struct mmc_command cmd = {};
  64. cmd.opcode = MMC_SEND_STATUS;
  65. if (!mmc_host_is_spi(card->host))
  66. cmd.arg = card->rca << 16;
  67. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  68. err = mmc_wait_for_cmd(card->host, &cmd, retries);
  69. if (err)
  70. return err;
  71. /* NOTE: callers are required to understand the difference
  72. * between "native" and SPI format status words!
  73. */
  74. if (status)
  75. *status = cmd.resp[0];
  76. return 0;
  77. }
  78. EXPORT_SYMBOL_GPL(__mmc_send_status);
  79. int mmc_send_status(struct mmc_card *card, u32 *status)
  80. {
  81. return __mmc_send_status(card, status, MMC_CMD_RETRIES);
  82. }
  83. EXPORT_SYMBOL_GPL(mmc_send_status);
  84. static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  85. {
  86. struct mmc_command cmd = {};
  87. cmd.opcode = MMC_SELECT_CARD;
  88. if (card) {
  89. cmd.arg = card->rca << 16;
  90. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  91. } else {
  92. cmd.arg = 0;
  93. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  94. }
  95. return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  96. }
  97. int mmc_select_card(struct mmc_card *card)
  98. {
  99. return _mmc_select_card(card->host, card);
  100. }
  101. int mmc_deselect_cards(struct mmc_host *host)
  102. {
  103. return _mmc_select_card(host, NULL);
  104. }
  105. /*
  106. * Write the value specified in the device tree or board code into the optional
  107. * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
  108. * drive strength of the DAT and CMD outputs. The actual meaning of a given
  109. * value is hardware dependant.
  110. * The presence of the DSR register can be determined from the CSD register,
  111. * bit 76.
  112. */
  113. int mmc_set_dsr(struct mmc_host *host)
  114. {
  115. struct mmc_command cmd = {};
  116. cmd.opcode = MMC_SET_DSR;
  117. cmd.arg = (host->dsr << 16) | 0xffff;
  118. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  119. return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  120. }
  121. int mmc_go_idle(struct mmc_host *host)
  122. {
  123. int err;
  124. struct mmc_command cmd = {};
  125. /*
  126. * Non-SPI hosts need to prevent chipselect going active during
  127. * GO_IDLE; that would put chips into SPI mode. Remind them of
  128. * that in case of hardware that won't pull up DAT3/nCS otherwise.
  129. *
  130. * SPI hosts ignore ios.chip_select; it's managed according to
  131. * rules that must accommodate non-MMC slaves which this layer
  132. * won't even know about.
  133. */
  134. if (!mmc_host_is_spi(host)) {
  135. mmc_set_chip_select(host, MMC_CS_HIGH);
  136. mmc_delay(1);
  137. }
  138. cmd.opcode = MMC_GO_IDLE_STATE;
  139. cmd.arg = 0;
  140. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
  141. err = mmc_wait_for_cmd(host, &cmd, 0);
  142. mmc_delay(1);
  143. if (!mmc_host_is_spi(host)) {
  144. mmc_set_chip_select(host, MMC_CS_DONTCARE);
  145. mmc_delay(1);
  146. }
  147. host->use_spi_crc = 0;
  148. return err;
  149. }
  150. static int __mmc_send_op_cond_cb(void *cb_data, bool *busy)
  151. {
  152. struct mmc_op_cond_busy_data *data = cb_data;
  153. struct mmc_host *host = data->host;
  154. struct mmc_command *cmd = data->cmd;
  155. u32 ocr = data->ocr;
  156. int err = 0;
  157. err = mmc_wait_for_cmd(host, cmd, 0);
  158. if (err)
  159. return err;
  160. if (mmc_host_is_spi(host)) {
  161. if (!(cmd->resp[0] & R1_SPI_IDLE)) {
  162. *busy = false;
  163. return 0;
  164. }
  165. } else {
  166. if (cmd->resp[0] & MMC_CARD_BUSY) {
  167. *busy = false;
  168. return 0;
  169. }
  170. }
  171. *busy = true;
  172. /*
  173. * According to eMMC specification v5.1 section 6.4.3, we
  174. * should issue CMD1 repeatedly in the idle state until
  175. * the eMMC is ready. Otherwise some eMMC devices seem to enter
  176. * the inactive mode after mmc_init_card() issued CMD0 when
  177. * the eMMC device is busy.
  178. */
  179. if (!ocr && !mmc_host_is_spi(host))
  180. cmd->arg = cmd->resp[0] | BIT(30);
  181. return 0;
  182. }
  183. int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
  184. {
  185. struct mmc_command cmd = {};
  186. int err = 0;
  187. struct mmc_op_cond_busy_data cb_data = {
  188. .host = host,
  189. .ocr = ocr,
  190. .cmd = &cmd
  191. };
  192. cmd.opcode = MMC_SEND_OP_COND;
  193. cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
  194. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
  195. err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US,
  196. MMC_OP_COND_TIMEOUT_MS,
  197. &__mmc_send_op_cond_cb, &cb_data);
  198. if (err)
  199. return err;
  200. if (rocr && !mmc_host_is_spi(host))
  201. *rocr = cmd.resp[0];
  202. return err;
  203. }
  204. int mmc_set_relative_addr(struct mmc_card *card)
  205. {
  206. struct mmc_command cmd = {};
  207. cmd.opcode = MMC_SET_RELATIVE_ADDR;
  208. cmd.arg = card->rca << 16;
  209. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  210. return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  211. }
  212. static int
  213. mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
  214. {
  215. int err;
  216. struct mmc_command cmd = {};
  217. cmd.opcode = opcode;
  218. cmd.arg = arg;
  219. cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
  220. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  221. if (err)
  222. return err;
  223. memcpy(cxd, cmd.resp, sizeof(u32) * 4);
  224. return 0;
  225. }
  226. /*
  227. * NOTE: void *buf, caller for the buf is required to use DMA-capable
  228. * buffer or on-stack buffer (with some overhead in callee).
  229. */
  230. int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
  231. u32 args, void *buf, unsigned len)
  232. {
  233. struct mmc_request mrq = {};
  234. struct mmc_command cmd = {};
  235. struct mmc_data data = {};
  236. struct scatterlist sg;
  237. mrq.cmd = &cmd;
  238. mrq.data = &data;
  239. cmd.opcode = opcode;
  240. cmd.arg = args;
  241. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  242. * rely on callers to never use this with "native" calls for reading
  243. * CSD or CID. Native versions of those commands use the R2 type,
  244. * not R1 plus a data block.
  245. */
  246. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  247. data.blksz = len;
  248. data.blocks = 1;
  249. data.flags = MMC_DATA_READ;
  250. data.sg = &sg;
  251. data.sg_len = 1;
  252. sg_init_one(&sg, buf, len);
  253. if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
  254. /*
  255. * The spec states that CSR and CID accesses have a timeout
  256. * of 64 clock cycles.
  257. */
  258. data.timeout_ns = 0;
  259. data.timeout_clks = 64;
  260. } else
  261. mmc_set_data_timeout(&data, card);
  262. mmc_wait_for_req(host, &mrq);
  263. if (cmd.error)
  264. return cmd.error;
  265. if (data.error)
  266. return data.error;
  267. return 0;
  268. }
  269. static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
  270. {
  271. int ret, i;
  272. __be32 *cxd_tmp;
  273. cxd_tmp = kzalloc(16, GFP_KERNEL);
  274. if (!cxd_tmp)
  275. return -ENOMEM;
  276. ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
  277. if (ret)
  278. goto err;
  279. for (i = 0; i < 4; i++)
  280. cxd[i] = be32_to_cpu(cxd_tmp[i]);
  281. err:
  282. kfree(cxd_tmp);
  283. return ret;
  284. }
  285. int mmc_send_csd(struct mmc_card *card, u32 *csd)
  286. {
  287. if (mmc_host_is_spi(card->host))
  288. return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
  289. return mmc_send_cxd_native(card->host, card->rca << 16, csd,
  290. MMC_SEND_CSD);
  291. }
  292. int mmc_send_cid(struct mmc_host *host, u32 *cid)
  293. {
  294. if (mmc_host_is_spi(host))
  295. return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
  296. return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
  297. }
  298. int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
  299. {
  300. int err;
  301. u8 *ext_csd;
  302. if (!card || !new_ext_csd)
  303. return -EINVAL;
  304. if (!mmc_can_ext_csd(card))
  305. return -EOPNOTSUPP;
  306. /*
  307. * As the ext_csd is so large and mostly unused, we don't store the
  308. * raw block in mmc_card.
  309. */
  310. ext_csd = kzalloc(512, GFP_KERNEL);
  311. if (!ext_csd)
  312. return -ENOMEM;
  313. err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
  314. 512);
  315. if (err)
  316. kfree(ext_csd);
  317. else
  318. *new_ext_csd = ext_csd;
  319. return err;
  320. }
  321. EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
  322. int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
  323. {
  324. struct mmc_command cmd = {};
  325. int err;
  326. cmd.opcode = MMC_SPI_READ_OCR;
  327. cmd.arg = highcap ? (1 << 30) : 0;
  328. cmd.flags = MMC_RSP_SPI_R3;
  329. err = mmc_wait_for_cmd(host, &cmd, 0);
  330. *ocrp = cmd.resp[1];
  331. return err;
  332. }
  333. int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
  334. {
  335. struct mmc_command cmd = {};
  336. int err;
  337. cmd.opcode = MMC_SPI_CRC_ON_OFF;
  338. cmd.flags = MMC_RSP_SPI_R1;
  339. cmd.arg = use_crc;
  340. err = mmc_wait_for_cmd(host, &cmd, 0);
  341. if (!err)
  342. host->use_spi_crc = use_crc;
  343. return err;
  344. }
  345. static int mmc_switch_status_error(struct mmc_host *host, u32 status)
  346. {
  347. if (mmc_host_is_spi(host)) {
  348. if (status & R1_SPI_ILLEGAL_COMMAND)
  349. return -EBADMSG;
  350. } else {
  351. if (R1_STATUS(status))
  352. pr_warn("%s: unexpected status %#x after switch\n",
  353. mmc_hostname(host), status);
  354. if (status & R1_SWITCH_ERROR)
  355. return -EBADMSG;
  356. }
  357. return 0;
  358. }
  359. /* Caller must hold re-tuning */
  360. int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
  361. {
  362. u32 status;
  363. int err;
  364. err = mmc_send_status(card, &status);
  365. if (!crc_err_fatal && err == -EILSEQ)
  366. return 0;
  367. if (err)
  368. return err;
  369. return mmc_switch_status_error(card->host, status);
  370. }
  371. static int mmc_busy_cb(void *cb_data, bool *busy)
  372. {
  373. struct mmc_busy_data *data = cb_data;
  374. struct mmc_host *host = data->card->host;
  375. u32 status = 0;
  376. int err;
  377. if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
  378. *busy = host->ops->card_busy(host);
  379. return 0;
  380. }
  381. err = mmc_send_status(data->card, &status);
  382. if (data->retry_crc_err && err == -EILSEQ) {
  383. *busy = true;
  384. return 0;
  385. }
  386. if (err)
  387. return err;
  388. switch (data->busy_cmd) {
  389. case MMC_BUSY_CMD6:
  390. err = mmc_switch_status_error(host, status);
  391. break;
  392. case MMC_BUSY_ERASE:
  393. err = R1_STATUS(status) ? -EIO : 0;
  394. break;
  395. case MMC_BUSY_HPI:
  396. case MMC_BUSY_EXTR_SINGLE:
  397. case MMC_BUSY_IO:
  398. break;
  399. default:
  400. err = -EINVAL;
  401. }
  402. if (err)
  403. return err;
  404. *busy = !mmc_ready_for_data(status);
  405. return 0;
  406. }
  407. int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
  408. unsigned int timeout_ms,
  409. int (*busy_cb)(void *cb_data, bool *busy),
  410. void *cb_data)
  411. {
  412. int err;
  413. unsigned long timeout;
  414. unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768;
  415. bool expired = false;
  416. bool busy = false;
  417. timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
  418. do {
  419. /*
  420. * Due to the possibility of being preempted while polling,
  421. * check the expiration time first.
  422. */
  423. expired = time_after(jiffies, timeout);
  424. err = (*busy_cb)(cb_data, &busy);
  425. if (err)
  426. return err;
  427. /* Timeout if the device still remains busy. */
  428. if (expired && busy) {
  429. pr_err("%s: Card stuck being busy! %s\n",
  430. mmc_hostname(host), __func__);
  431. return -ETIMEDOUT;
  432. }
  433. /* Throttle the polling rate to avoid hogging the CPU. */
  434. if (busy) {
  435. usleep_range(udelay, udelay * 2);
  436. if (udelay < udelay_max)
  437. udelay *= 2;
  438. }
  439. } while (busy);
  440. return 0;
  441. }
  442. EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
  443. int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
  444. bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
  445. {
  446. struct mmc_host *host = card->host;
  447. struct mmc_busy_data cb_data;
  448. cb_data.card = card;
  449. cb_data.retry_crc_err = retry_crc_err;
  450. cb_data.busy_cmd = busy_cmd;
  451. return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data);
  452. }
  453. EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
  454. bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
  455. unsigned int timeout_ms)
  456. {
  457. /*
  458. * If the max_busy_timeout of the host is specified, make sure it's
  459. * enough to fit the used timeout_ms. In case it's not, let's instruct
  460. * the host to avoid HW busy detection, by converting to a R1 response
  461. * instead of a R1B. Note, some hosts requires R1B, which also means
  462. * they are on their own when it comes to deal with the busy timeout.
  463. */
  464. if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
  465. (timeout_ms > host->max_busy_timeout)) {
  466. cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
  467. return false;
  468. }
  469. cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
  470. cmd->busy_timeout = timeout_ms;
  471. return true;
  472. }
  473. EXPORT_SYMBOL_GPL(mmc_prepare_busy_cmd);
  474. /**
  475. * __mmc_switch - modify EXT_CSD register
  476. * @card: the MMC card associated with the data transfer
  477. * @set: cmd set values
  478. * @index: EXT_CSD register index
  479. * @value: value to program into EXT_CSD register
  480. * @timeout_ms: timeout (ms) for operation performed by register write,
  481. * timeout of zero implies maximum possible timeout
  482. * @timing: new timing to change to
  483. * @send_status: send status cmd to poll for busy
  484. * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
  485. * @retries: number of retries
  486. *
  487. * Modifies the EXT_CSD register for selected card.
  488. */
  489. int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  490. unsigned int timeout_ms, unsigned char timing,
  491. bool send_status, bool retry_crc_err, unsigned int retries)
  492. {
  493. struct mmc_host *host = card->host;
  494. int err;
  495. struct mmc_command cmd = {};
  496. bool use_r1b_resp;
  497. unsigned char old_timing = host->ios.timing;
  498. mmc_retune_hold(host);
  499. if (!timeout_ms) {
  500. pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
  501. mmc_hostname(host));
  502. timeout_ms = card->ext_csd.generic_cmd6_time;
  503. }
  504. cmd.opcode = MMC_SWITCH;
  505. cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
  506. (index << 16) |
  507. (value << 8) |
  508. set;
  509. use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
  510. err = mmc_wait_for_cmd(host, &cmd, retries);
  511. if (err)
  512. goto out;
  513. /*If SPI or used HW busy detection above, then we don't need to poll. */
  514. if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
  515. mmc_host_is_spi(host))
  516. goto out_tim;
  517. /*
  518. * If the host doesn't support HW polling via the ->card_busy() ops and
  519. * when it's not allowed to poll by using CMD13, then we need to rely on
  520. * waiting the stated timeout to be sufficient.
  521. */
  522. if (!send_status && !host->ops->card_busy) {
  523. mmc_delay(timeout_ms);
  524. goto out_tim;
  525. }
  526. /* Let's try to poll to find out when the command is completed. */
  527. err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
  528. if (err)
  529. goto out;
  530. out_tim:
  531. /* Switch to new timing before check switch status. */
  532. if (timing)
  533. mmc_set_timing(host, timing);
  534. if (send_status) {
  535. err = mmc_switch_status(card, true);
  536. if (err && timing)
  537. mmc_set_timing(host, old_timing);
  538. }
  539. out:
  540. mmc_retune_release(host);
  541. return err;
  542. }
  543. int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  544. unsigned int timeout_ms)
  545. {
  546. return __mmc_switch(card, set, index, value, timeout_ms, 0,
  547. true, false, MMC_CMD_RETRIES);
  548. }
  549. EXPORT_SYMBOL_GPL(mmc_switch);
  550. int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
  551. {
  552. struct mmc_request mrq = {};
  553. struct mmc_command cmd = {};
  554. struct mmc_data data = {};
  555. struct scatterlist sg;
  556. struct mmc_ios *ios = &host->ios;
  557. const u8 *tuning_block_pattern;
  558. int size, err = 0;
  559. u8 *data_buf;
  560. if (ios->bus_width == MMC_BUS_WIDTH_8) {
  561. tuning_block_pattern = tuning_blk_pattern_8bit;
  562. size = sizeof(tuning_blk_pattern_8bit);
  563. } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
  564. tuning_block_pattern = tuning_blk_pattern_4bit;
  565. size = sizeof(tuning_blk_pattern_4bit);
  566. } else
  567. return -EINVAL;
  568. data_buf = kzalloc(size, GFP_KERNEL);
  569. if (!data_buf)
  570. return -ENOMEM;
  571. mrq.cmd = &cmd;
  572. mrq.data = &data;
  573. cmd.opcode = opcode;
  574. cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
  575. data.blksz = size;
  576. data.blocks = 1;
  577. data.flags = MMC_DATA_READ;
  578. /*
  579. * According to the tuning specs, Tuning process
  580. * is normally shorter 40 executions of CMD19,
  581. * and timeout value should be shorter than 150 ms
  582. */
  583. data.timeout_ns = 150 * NSEC_PER_MSEC;
  584. data.sg = &sg;
  585. data.sg_len = 1;
  586. sg_init_one(&sg, data_buf, size);
  587. mmc_wait_for_req(host, &mrq);
  588. if (cmd_error)
  589. *cmd_error = cmd.error;
  590. if (cmd.error) {
  591. err = cmd.error;
  592. goto out;
  593. }
  594. if (data.error) {
  595. err = data.error;
  596. goto out;
  597. }
  598. if (memcmp(data_buf, tuning_block_pattern, size))
  599. err = -EIO;
  600. out:
  601. kfree(data_buf);
  602. return err;
  603. }
  604. EXPORT_SYMBOL_GPL(mmc_send_tuning);
  605. int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
  606. {
  607. struct mmc_command cmd = {};
  608. /*
  609. * eMMC specification specifies that CMD12 can be used to stop a tuning
  610. * command, but SD specification does not, so do nothing unless it is
  611. * eMMC.
  612. */
  613. if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
  614. return 0;
  615. cmd.opcode = MMC_STOP_TRANSMISSION;
  616. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  617. /*
  618. * For drivers that override R1 to R1b, set an arbitrary timeout based
  619. * on the tuning timeout i.e. 150ms.
  620. */
  621. cmd.busy_timeout = 150;
  622. return mmc_wait_for_cmd(host, &cmd, 0);
  623. }
  624. EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
  625. static int
  626. mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
  627. u8 len)
  628. {
  629. struct mmc_request mrq = {};
  630. struct mmc_command cmd = {};
  631. struct mmc_data data = {};
  632. struct scatterlist sg;
  633. u8 *data_buf;
  634. u8 *test_buf;
  635. int i, err;
  636. static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
  637. static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
  638. /* dma onto stack is unsafe/nonportable, but callers to this
  639. * routine normally provide temporary on-stack buffers ...
  640. */
  641. data_buf = kmalloc(len, GFP_KERNEL);
  642. if (!data_buf)
  643. return -ENOMEM;
  644. if (len == 8)
  645. test_buf = testdata_8bit;
  646. else if (len == 4)
  647. test_buf = testdata_4bit;
  648. else {
  649. pr_err("%s: Invalid bus_width %d\n",
  650. mmc_hostname(host), len);
  651. kfree(data_buf);
  652. return -EINVAL;
  653. }
  654. if (opcode == MMC_BUS_TEST_W)
  655. memcpy(data_buf, test_buf, len);
  656. mrq.cmd = &cmd;
  657. mrq.data = &data;
  658. cmd.opcode = opcode;
  659. cmd.arg = 0;
  660. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  661. * rely on callers to never use this with "native" calls for reading
  662. * CSD or CID. Native versions of those commands use the R2 type,
  663. * not R1 plus a data block.
  664. */
  665. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  666. data.blksz = len;
  667. data.blocks = 1;
  668. if (opcode == MMC_BUS_TEST_R)
  669. data.flags = MMC_DATA_READ;
  670. else
  671. data.flags = MMC_DATA_WRITE;
  672. data.sg = &sg;
  673. data.sg_len = 1;
  674. mmc_set_data_timeout(&data, card);
  675. sg_init_one(&sg, data_buf, len);
  676. mmc_wait_for_req(host, &mrq);
  677. err = 0;
  678. if (opcode == MMC_BUS_TEST_R) {
  679. for (i = 0; i < len / 4; i++)
  680. if ((test_buf[i] ^ data_buf[i]) != 0xff) {
  681. err = -EIO;
  682. break;
  683. }
  684. }
  685. kfree(data_buf);
  686. if (cmd.error)
  687. return cmd.error;
  688. if (data.error)
  689. return data.error;
  690. return err;
  691. }
  692. int mmc_bus_test(struct mmc_card *card, u8 bus_width)
  693. {
  694. int width;
  695. if (bus_width == MMC_BUS_WIDTH_8)
  696. width = 8;
  697. else if (bus_width == MMC_BUS_WIDTH_4)
  698. width = 4;
  699. else if (bus_width == MMC_BUS_WIDTH_1)
  700. return 0; /* no need for test */
  701. else
  702. return -EINVAL;
  703. /*
  704. * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
  705. * is a problem. This improves chances that the test will work.
  706. */
  707. mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
  708. return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
  709. }
  710. static int mmc_send_hpi_cmd(struct mmc_card *card)
  711. {
  712. unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
  713. struct mmc_host *host = card->host;
  714. bool use_r1b_resp = false;
  715. struct mmc_command cmd = {};
  716. int err;
  717. cmd.opcode = card->ext_csd.hpi_cmd;
  718. cmd.arg = card->rca << 16 | 1;
  719. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  720. if (cmd.opcode == MMC_STOP_TRANSMISSION)
  721. use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
  722. busy_timeout_ms);
  723. err = mmc_wait_for_cmd(host, &cmd, 0);
  724. if (err) {
  725. pr_warn("%s: HPI error %d. Command response %#x\n",
  726. mmc_hostname(host), err, cmd.resp[0]);
  727. return err;
  728. }
  729. /* No need to poll when using HW busy detection. */
  730. if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
  731. return 0;
  732. /* Let's poll to find out when the HPI request completes. */
  733. return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
  734. }
  735. /**
  736. * mmc_interrupt_hpi - Issue for High priority Interrupt
  737. * @card: the MMC card associated with the HPI transfer
  738. *
  739. * Issued High Priority Interrupt, and check for card status
  740. * until out-of prg-state.
  741. */
  742. static int mmc_interrupt_hpi(struct mmc_card *card)
  743. {
  744. int err;
  745. u32 status;
  746. if (!card->ext_csd.hpi_en) {
  747. pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
  748. return 1;
  749. }
  750. err = mmc_send_status(card, &status);
  751. if (err) {
  752. pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
  753. goto out;
  754. }
  755. switch (R1_CURRENT_STATE(status)) {
  756. case R1_STATE_IDLE:
  757. case R1_STATE_READY:
  758. case R1_STATE_STBY:
  759. case R1_STATE_TRAN:
  760. /*
  761. * In idle and transfer states, HPI is not needed and the caller
  762. * can issue the next intended command immediately
  763. */
  764. goto out;
  765. case R1_STATE_PRG:
  766. break;
  767. default:
  768. /* In all other states, it's illegal to issue HPI */
  769. pr_debug("%s: HPI cannot be sent. Card state=%d\n",
  770. mmc_hostname(card->host), R1_CURRENT_STATE(status));
  771. err = -EINVAL;
  772. goto out;
  773. }
  774. err = mmc_send_hpi_cmd(card);
  775. out:
  776. return err;
  777. }
  778. int mmc_can_ext_csd(struct mmc_card *card)
  779. {
  780. return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
  781. }
  782. static int mmc_read_bkops_status(struct mmc_card *card)
  783. {
  784. int err;
  785. u8 *ext_csd;
  786. err = mmc_get_ext_csd(card, &ext_csd);
  787. if (err)
  788. return err;
  789. card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
  790. card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
  791. kfree(ext_csd);
  792. return 0;
  793. }
  794. /**
  795. * mmc_run_bkops - Run BKOPS for supported cards
  796. * @card: MMC card to run BKOPS for
  797. *
  798. * Run background operations synchronously for cards having manual BKOPS
  799. * enabled and in case it reports urgent BKOPS level.
  800. */
  801. void mmc_run_bkops(struct mmc_card *card)
  802. {
  803. int err;
  804. if (!card->ext_csd.man_bkops_en)
  805. return;
  806. err = mmc_read_bkops_status(card);
  807. if (err) {
  808. pr_err("%s: Failed to read bkops status: %d\n",
  809. mmc_hostname(card->host), err);
  810. return;
  811. }
  812. if (!card->ext_csd.raw_bkops_status ||
  813. card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
  814. return;
  815. mmc_retune_hold(card->host);
  816. /*
  817. * For urgent BKOPS status, LEVEL_2 and higher, let's execute
  818. * synchronously. Future wise, we may consider to start BKOPS, for less
  819. * urgent levels by using an asynchronous background task, when idle.
  820. */
  821. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  822. EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
  823. /*
  824. * If the BKOPS timed out, the card is probably still busy in the
  825. * R1_STATE_PRG. Rather than continue to wait, let's try to abort
  826. * it with a HPI command to get back into R1_STATE_TRAN.
  827. */
  828. if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
  829. pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
  830. else if (err)
  831. pr_warn("%s: Error %d running bkops\n",
  832. mmc_hostname(card->host), err);
  833. mmc_retune_release(card->host);
  834. }
  835. EXPORT_SYMBOL(mmc_run_bkops);
  836. static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
  837. {
  838. u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
  839. int err;
  840. if (!card->ext_csd.cmdq_support)
  841. return -EOPNOTSUPP;
  842. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
  843. val, card->ext_csd.generic_cmd6_time);
  844. if (!err)
  845. card->ext_csd.cmdq_en = enable;
  846. return err;
  847. }
  848. int mmc_cmdq_enable(struct mmc_card *card)
  849. {
  850. return mmc_cmdq_switch(card, true);
  851. }
  852. EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
  853. int mmc_cmdq_disable(struct mmc_card *card)
  854. {
  855. return mmc_cmdq_switch(card, false);
  856. }
  857. EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
  858. int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
  859. {
  860. struct mmc_host *host = card->host;
  861. int err;
  862. if (!mmc_can_sanitize(card)) {
  863. pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
  864. return -EOPNOTSUPP;
  865. }
  866. if (!timeout_ms)
  867. timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
  868. pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
  869. mmc_retune_hold(host);
  870. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
  871. 1, timeout_ms, 0, true, false, 0);
  872. if (err)
  873. pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
  874. /*
  875. * If the sanitize operation timed out, the card is probably still busy
  876. * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
  877. * it with a HPI command to get back into R1_STATE_TRAN.
  878. */
  879. if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
  880. pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
  881. mmc_retune_release(host);
  882. pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
  883. return err;
  884. }
  885. EXPORT_SYMBOL_GPL(mmc_sanitize);