mmc.c 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272
  1. /*
  2. * linux/drivers/mmc/core/mmc.c
  3. *
  4. * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
  5. * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
  6. * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/err.h>
  13. #include <linux/of.h>
  14. #include <linux/slab.h>
  15. #include <linux/stat.h>
  16. #include <linux/pm_runtime.h>
  17. #include <linux/mmc/host.h>
  18. #include <linux/mmc/card.h>
  19. #include <linux/mmc/mmc.h>
  20. #include "core.h"
  21. #include "card.h"
  22. #include "host.h"
  23. #include "bus.h"
  24. #include "mmc_ops.h"
  25. #include "quirks.h"
  26. #include "sd_ops.h"
  27. #include "pwrseq.h"
  28. #define DEFAULT_CMD6_TIMEOUT_MS 500
  29. #define MIN_CACHE_EN_TIMEOUT_MS 1600
  30. static const unsigned int tran_exp[] = {
  31. 10000, 100000, 1000000, 10000000,
  32. 0, 0, 0, 0
  33. };
  34. static const unsigned char tran_mant[] = {
  35. 0, 10, 12, 13, 15, 20, 25, 30,
  36. 35, 40, 45, 50, 55, 60, 70, 80,
  37. };
  38. static const unsigned int taac_exp[] = {
  39. 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
  40. };
  41. static const unsigned int taac_mant[] = {
  42. 0, 10, 12, 13, 15, 20, 25, 30,
  43. 35, 40, 45, 50, 55, 60, 70, 80,
  44. };
  45. #define UNSTUFF_BITS(resp,start,size) \
  46. ({ \
  47. const int __size = size; \
  48. const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
  49. const int __off = 3 - ((start) / 32); \
  50. const int __shft = (start) & 31; \
  51. u32 __res; \
  52. \
  53. __res = resp[__off] >> __shft; \
  54. if (__size + __shft > 32) \
  55. __res |= resp[__off-1] << ((32 - __shft) % 32); \
  56. __res & __mask; \
  57. })
  58. /*
  59. * Given the decoded CSD structure, decode the raw CID to our CID structure.
  60. */
  61. static int mmc_decode_cid(struct mmc_card *card)
  62. {
  63. u32 *resp = card->raw_cid;
  64. /*
  65. * The selection of the format here is based upon published
  66. * specs from sandisk and from what people have reported.
  67. */
  68. switch (card->csd.mmca_vsn) {
  69. case 0: /* MMC v1.0 - v1.2 */
  70. case 1: /* MMC v1.4 */
  71. card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
  72. card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
  73. card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
  74. card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
  75. card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
  76. card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
  77. card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
  78. card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
  79. card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
  80. card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
  81. card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
  82. card->cid.month = UNSTUFF_BITS(resp, 12, 4);
  83. card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
  84. break;
  85. case 2: /* MMC v2.0 - v2.2 */
  86. case 3: /* MMC v3.1 - v3.3 */
  87. case 4: /* MMC v4 */
  88. card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
  89. card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
  90. card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
  91. card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
  92. card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
  93. card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
  94. card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
  95. card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
  96. card->cid.prv = UNSTUFF_BITS(resp, 48, 8);
  97. card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
  98. card->cid.month = UNSTUFF_BITS(resp, 12, 4);
  99. card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
  100. break;
  101. default:
  102. pr_err("%s: card has unknown MMCA version %d\n",
  103. mmc_hostname(card->host), card->csd.mmca_vsn);
  104. return -EINVAL;
  105. }
  106. return 0;
  107. }
  108. static void mmc_set_erase_size(struct mmc_card *card)
  109. {
  110. if (card->ext_csd.erase_group_def & 1)
  111. card->erase_size = card->ext_csd.hc_erase_size;
  112. else
  113. card->erase_size = card->csd.erase_size;
  114. mmc_init_erase(card);
  115. }
  116. /*
  117. * Given a 128-bit response, decode to our card CSD structure.
  118. */
  119. static int mmc_decode_csd(struct mmc_card *card)
  120. {
  121. struct mmc_csd *csd = &card->csd;
  122. unsigned int e, m, a, b;
  123. u32 *resp = card->raw_csd;
  124. /*
  125. * We only understand CSD structure v1.1 and v1.2.
  126. * v1.2 has extra information in bits 15, 11 and 10.
  127. * We also support eMMC v4.4 & v4.41.
  128. */
  129. csd->structure = UNSTUFF_BITS(resp, 126, 2);
  130. if (csd->structure == 0) {
  131. pr_err("%s: unrecognised CSD structure version %d\n",
  132. mmc_hostname(card->host), csd->structure);
  133. return -EINVAL;
  134. }
  135. csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
  136. m = UNSTUFF_BITS(resp, 115, 4);
  137. e = UNSTUFF_BITS(resp, 112, 3);
  138. csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10;
  139. csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
  140. m = UNSTUFF_BITS(resp, 99, 4);
  141. e = UNSTUFF_BITS(resp, 96, 3);
  142. csd->max_dtr = tran_exp[e] * tran_mant[m];
  143. csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
  144. e = UNSTUFF_BITS(resp, 47, 3);
  145. m = UNSTUFF_BITS(resp, 62, 12);
  146. csd->capacity = (1 + m) << (e + 2);
  147. csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
  148. csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
  149. csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
  150. csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
  151. csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
  152. csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
  153. csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
  154. csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
  155. if (csd->write_blkbits >= 9) {
  156. a = UNSTUFF_BITS(resp, 42, 5);
  157. b = UNSTUFF_BITS(resp, 37, 5);
  158. csd->erase_size = (a + 1) * (b + 1);
  159. csd->erase_size <<= csd->write_blkbits - 9;
  160. }
  161. return 0;
  162. }
  163. static void mmc_select_card_type(struct mmc_card *card)
  164. {
  165. struct mmc_host *host = card->host;
  166. u8 card_type = card->ext_csd.raw_card_type;
  167. u32 caps = host->caps, caps2 = host->caps2;
  168. unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
  169. unsigned int avail_type = 0;
  170. if (caps & MMC_CAP_MMC_HIGHSPEED &&
  171. card_type & EXT_CSD_CARD_TYPE_HS_26) {
  172. hs_max_dtr = MMC_HIGH_26_MAX_DTR;
  173. avail_type |= EXT_CSD_CARD_TYPE_HS_26;
  174. }
  175. if (caps & MMC_CAP_MMC_HIGHSPEED &&
  176. card_type & EXT_CSD_CARD_TYPE_HS_52) {
  177. hs_max_dtr = MMC_HIGH_52_MAX_DTR;
  178. avail_type |= EXT_CSD_CARD_TYPE_HS_52;
  179. }
  180. if (caps & (MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR) &&
  181. card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
  182. hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
  183. avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
  184. }
  185. if (caps & MMC_CAP_1_2V_DDR &&
  186. card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
  187. hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
  188. avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
  189. }
  190. if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
  191. card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
  192. hs200_max_dtr = MMC_HS200_MAX_DTR;
  193. avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
  194. }
  195. if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
  196. card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
  197. hs200_max_dtr = MMC_HS200_MAX_DTR;
  198. avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
  199. }
  200. if (caps2 & MMC_CAP2_HS400_1_8V &&
  201. card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
  202. hs200_max_dtr = MMC_HS200_MAX_DTR;
  203. avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
  204. }
  205. if (caps2 & MMC_CAP2_HS400_1_2V &&
  206. card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
  207. hs200_max_dtr = MMC_HS200_MAX_DTR;
  208. avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
  209. }
  210. if ((caps2 & MMC_CAP2_HS400_ES) &&
  211. card->ext_csd.strobe_support &&
  212. (avail_type & EXT_CSD_CARD_TYPE_HS400))
  213. avail_type |= EXT_CSD_CARD_TYPE_HS400ES;
  214. card->ext_csd.hs_max_dtr = hs_max_dtr;
  215. card->ext_csd.hs200_max_dtr = hs200_max_dtr;
  216. card->mmc_avail_type = avail_type;
  217. }
  218. static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
  219. {
  220. u8 hc_erase_grp_sz, hc_wp_grp_sz;
  221. /*
  222. * Disable these attributes by default
  223. */
  224. card->ext_csd.enhanced_area_offset = -EINVAL;
  225. card->ext_csd.enhanced_area_size = -EINVAL;
  226. /*
  227. * Enhanced area feature support -- check whether the eMMC
  228. * card has the Enhanced area enabled. If so, export enhanced
  229. * area offset and size to user by adding sysfs interface.
  230. */
  231. if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
  232. (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
  233. if (card->ext_csd.partition_setting_completed) {
  234. hc_erase_grp_sz =
  235. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
  236. hc_wp_grp_sz =
  237. ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
  238. /*
  239. * calculate the enhanced data area offset, in bytes
  240. */
  241. card->ext_csd.enhanced_area_offset =
  242. (((unsigned long long)ext_csd[139]) << 24) +
  243. (((unsigned long long)ext_csd[138]) << 16) +
  244. (((unsigned long long)ext_csd[137]) << 8) +
  245. (((unsigned long long)ext_csd[136]));
  246. if (mmc_card_blockaddr(card))
  247. card->ext_csd.enhanced_area_offset <<= 9;
  248. /*
  249. * calculate the enhanced data area size, in kilobytes
  250. */
  251. card->ext_csd.enhanced_area_size =
  252. (ext_csd[142] << 16) + (ext_csd[141] << 8) +
  253. ext_csd[140];
  254. card->ext_csd.enhanced_area_size *=
  255. (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
  256. card->ext_csd.enhanced_area_size <<= 9;
  257. } else {
  258. pr_warn("%s: defines enhanced area without partition setting complete\n",
  259. mmc_hostname(card->host));
  260. }
  261. }
  262. }
  263. static void mmc_part_add(struct mmc_card *card, u64 size,
  264. unsigned int part_cfg, char *name, int idx, bool ro,
  265. int area_type)
  266. {
  267. card->part[card->nr_parts].size = size;
  268. card->part[card->nr_parts].part_cfg = part_cfg;
  269. sprintf(card->part[card->nr_parts].name, name, idx);
  270. card->part[card->nr_parts].force_ro = ro;
  271. card->part[card->nr_parts].area_type = area_type;
  272. card->nr_parts++;
  273. }
  274. static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
  275. {
  276. int idx;
  277. u8 hc_erase_grp_sz, hc_wp_grp_sz;
  278. u64 part_size;
  279. /*
  280. * General purpose partition feature support --
  281. * If ext_csd has the size of general purpose partitions,
  282. * set size, part_cfg, partition name in mmc_part.
  283. */
  284. if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
  285. EXT_CSD_PART_SUPPORT_PART_EN) {
  286. hc_erase_grp_sz =
  287. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
  288. hc_wp_grp_sz =
  289. ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
  290. for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
  291. if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
  292. !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
  293. !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
  294. continue;
  295. if (card->ext_csd.partition_setting_completed == 0) {
  296. pr_warn("%s: has partition size defined without partition complete\n",
  297. mmc_hostname(card->host));
  298. break;
  299. }
  300. part_size =
  301. (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
  302. << 16) +
  303. (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
  304. << 8) +
  305. ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
  306. part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
  307. mmc_part_add(card, part_size << 19,
  308. EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
  309. "gp%d", idx, false,
  310. MMC_BLK_DATA_AREA_GP);
  311. }
  312. }
  313. }
  314. /* Minimum partition switch timeout in milliseconds */
  315. #define MMC_MIN_PART_SWITCH_TIME 300
  316. /*
  317. * Decode extended CSD.
  318. */
  319. static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
  320. {
  321. int err = 0, idx;
  322. u64 part_size;
  323. struct device_node *np;
  324. bool broken_hpi = false;
  325. /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
  326. card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
  327. if (card->csd.structure == 3) {
  328. if (card->ext_csd.raw_ext_csd_structure > 2) {
  329. pr_err("%s: unrecognised EXT_CSD structure "
  330. "version %d\n", mmc_hostname(card->host),
  331. card->ext_csd.raw_ext_csd_structure);
  332. err = -EINVAL;
  333. goto out;
  334. }
  335. }
  336. np = mmc_of_find_child_device(card->host, 0);
  337. if (np && of_device_is_compatible(np, "mmc-card"))
  338. broken_hpi = of_property_read_bool(np, "broken-hpi");
  339. of_node_put(np);
  340. /*
  341. * The EXT_CSD format is meant to be forward compatible. As long
  342. * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
  343. * are authorized, see JEDEC JESD84-B50 section B.8.
  344. */
  345. card->ext_csd.rev = ext_csd[EXT_CSD_REV];
  346. /* fixup device after ext_csd revision field is updated */
  347. mmc_fixup_device(card, mmc_ext_csd_fixups);
  348. card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
  349. card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
  350. card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
  351. card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
  352. if (card->ext_csd.rev >= 2) {
  353. card->ext_csd.sectors =
  354. ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
  355. ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
  356. ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
  357. ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
  358. /* Cards with density > 2GiB are sector addressed */
  359. if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
  360. mmc_card_set_blockaddr(card);
  361. }
  362. card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
  363. card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
  364. mmc_select_card_type(card);
  365. card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
  366. card->ext_csd.raw_erase_timeout_mult =
  367. ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
  368. card->ext_csd.raw_hc_erase_grp_size =
  369. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
  370. if (card->ext_csd.rev >= 3) {
  371. u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
  372. card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
  373. /* EXT_CSD value is in units of 10ms, but we store in ms */
  374. card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
  375. /* Sleep / awake timeout in 100ns units */
  376. if (sa_shift > 0 && sa_shift <= 0x17)
  377. card->ext_csd.sa_timeout =
  378. 1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
  379. card->ext_csd.erase_group_def =
  380. ext_csd[EXT_CSD_ERASE_GROUP_DEF];
  381. card->ext_csd.hc_erase_timeout = 300 *
  382. ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
  383. card->ext_csd.hc_erase_size =
  384. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
  385. card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
  386. /*
  387. * There are two boot regions of equal size, defined in
  388. * multiples of 128K.
  389. */
  390. if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
  391. for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
  392. part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
  393. mmc_part_add(card, part_size,
  394. EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
  395. "boot%d", idx, true,
  396. MMC_BLK_DATA_AREA_BOOT);
  397. }
  398. }
  399. }
  400. card->ext_csd.raw_hc_erase_gap_size =
  401. ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
  402. card->ext_csd.raw_sec_trim_mult =
  403. ext_csd[EXT_CSD_SEC_TRIM_MULT];
  404. card->ext_csd.raw_sec_erase_mult =
  405. ext_csd[EXT_CSD_SEC_ERASE_MULT];
  406. card->ext_csd.raw_sec_feature_support =
  407. ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
  408. card->ext_csd.raw_trim_mult =
  409. ext_csd[EXT_CSD_TRIM_MULT];
  410. card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
  411. card->ext_csd.raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH];
  412. if (card->ext_csd.rev >= 4) {
  413. if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
  414. EXT_CSD_PART_SETTING_COMPLETED)
  415. card->ext_csd.partition_setting_completed = 1;
  416. else
  417. card->ext_csd.partition_setting_completed = 0;
  418. mmc_manage_enhanced_area(card, ext_csd);
  419. mmc_manage_gp_partitions(card, ext_csd);
  420. card->ext_csd.sec_trim_mult =
  421. ext_csd[EXT_CSD_SEC_TRIM_MULT];
  422. card->ext_csd.sec_erase_mult =
  423. ext_csd[EXT_CSD_SEC_ERASE_MULT];
  424. card->ext_csd.sec_feature_support =
  425. ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
  426. card->ext_csd.trim_timeout = 300 *
  427. ext_csd[EXT_CSD_TRIM_MULT];
  428. /*
  429. * Note that the call to mmc_part_add above defaults to read
  430. * only. If this default assumption is changed, the call must
  431. * take into account the value of boot_locked below.
  432. */
  433. card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
  434. card->ext_csd.boot_ro_lockable = true;
  435. /* Save power class values */
  436. card->ext_csd.raw_pwr_cl_52_195 =
  437. ext_csd[EXT_CSD_PWR_CL_52_195];
  438. card->ext_csd.raw_pwr_cl_26_195 =
  439. ext_csd[EXT_CSD_PWR_CL_26_195];
  440. card->ext_csd.raw_pwr_cl_52_360 =
  441. ext_csd[EXT_CSD_PWR_CL_52_360];
  442. card->ext_csd.raw_pwr_cl_26_360 =
  443. ext_csd[EXT_CSD_PWR_CL_26_360];
  444. card->ext_csd.raw_pwr_cl_200_195 =
  445. ext_csd[EXT_CSD_PWR_CL_200_195];
  446. card->ext_csd.raw_pwr_cl_200_360 =
  447. ext_csd[EXT_CSD_PWR_CL_200_360];
  448. card->ext_csd.raw_pwr_cl_ddr_52_195 =
  449. ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
  450. card->ext_csd.raw_pwr_cl_ddr_52_360 =
  451. ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
  452. card->ext_csd.raw_pwr_cl_ddr_200_360 =
  453. ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
  454. }
  455. if (card->ext_csd.rev >= 5) {
  456. /* Adjust production date as per JEDEC JESD84-B451 */
  457. if (card->cid.year < 2010)
  458. card->cid.year += 16;
  459. /* check whether the eMMC card supports BKOPS */
  460. if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
  461. card->ext_csd.bkops = 1;
  462. card->ext_csd.man_bkops_en =
  463. (ext_csd[EXT_CSD_BKOPS_EN] &
  464. EXT_CSD_MANUAL_BKOPS_MASK);
  465. card->ext_csd.raw_bkops_status =
  466. ext_csd[EXT_CSD_BKOPS_STATUS];
  467. if (card->ext_csd.man_bkops_en)
  468. pr_debug("%s: MAN_BKOPS_EN bit is set\n",
  469. mmc_hostname(card->host));
  470. card->ext_csd.auto_bkops_en =
  471. (ext_csd[EXT_CSD_BKOPS_EN] &
  472. EXT_CSD_AUTO_BKOPS_MASK);
  473. if (card->ext_csd.auto_bkops_en)
  474. pr_debug("%s: AUTO_BKOPS_EN bit is set\n",
  475. mmc_hostname(card->host));
  476. }
  477. /* check whether the eMMC card supports HPI */
  478. if (!mmc_card_broken_hpi(card) &&
  479. !broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) {
  480. card->ext_csd.hpi = 1;
  481. if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
  482. card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
  483. else
  484. card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
  485. /*
  486. * Indicate the maximum timeout to close
  487. * a command interrupted by HPI
  488. */
  489. card->ext_csd.out_of_int_time =
  490. ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
  491. }
  492. card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
  493. card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
  494. /*
  495. * RPMB regions are defined in multiples of 128K.
  496. */
  497. card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
  498. if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
  499. mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
  500. EXT_CSD_PART_CONFIG_ACC_RPMB,
  501. "rpmb", 0, false,
  502. MMC_BLK_DATA_AREA_RPMB);
  503. }
  504. }
  505. card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
  506. if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
  507. card->erased_byte = 0xFF;
  508. else
  509. card->erased_byte = 0x0;
  510. /* eMMC v4.5 or later */
  511. card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
  512. if (card->ext_csd.rev >= 6) {
  513. card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
  514. card->ext_csd.generic_cmd6_time = 10 *
  515. ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
  516. card->ext_csd.power_off_longtime = 10 *
  517. ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
  518. card->ext_csd.cache_size =
  519. ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
  520. ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
  521. ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
  522. ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
  523. if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
  524. card->ext_csd.data_sector_size = 4096;
  525. else
  526. card->ext_csd.data_sector_size = 512;
  527. if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
  528. (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
  529. card->ext_csd.data_tag_unit_size =
  530. ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
  531. (card->ext_csd.data_sector_size);
  532. } else {
  533. card->ext_csd.data_tag_unit_size = 0;
  534. }
  535. card->ext_csd.max_packed_writes =
  536. ext_csd[EXT_CSD_MAX_PACKED_WRITES];
  537. card->ext_csd.max_packed_reads =
  538. ext_csd[EXT_CSD_MAX_PACKED_READS];
  539. } else {
  540. card->ext_csd.data_sector_size = 512;
  541. }
  542. /*
  543. * GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined
  544. * when accessing a specific field", so use it here if there is no
  545. * PARTITION_SWITCH_TIME.
  546. */
  547. if (!card->ext_csd.part_time)
  548. card->ext_csd.part_time = card->ext_csd.generic_cmd6_time;
  549. /* Some eMMC set the value too low so set a minimum */
  550. if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
  551. card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
  552. /* eMMC v5 or later */
  553. if (card->ext_csd.rev >= 7) {
  554. memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
  555. MMC_FIRMWARE_LEN);
  556. card->ext_csd.ffu_capable =
  557. (ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
  558. !(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
  559. card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
  560. card->ext_csd.device_life_time_est_typ_a =
  561. ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
  562. card->ext_csd.device_life_time_est_typ_b =
  563. ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
  564. }
  565. /* eMMC v5.1 or later */
  566. if (card->ext_csd.rev >= 8) {
  567. card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT] &
  568. EXT_CSD_CMDQ_SUPPORTED;
  569. card->ext_csd.cmdq_depth = (ext_csd[EXT_CSD_CMDQ_DEPTH] &
  570. EXT_CSD_CMDQ_DEPTH_MASK) + 1;
  571. /* Exclude inefficiently small queue depths */
  572. if (card->ext_csd.cmdq_depth <= 2) {
  573. card->ext_csd.cmdq_support = false;
  574. card->ext_csd.cmdq_depth = 0;
  575. }
  576. if (card->ext_csd.cmdq_support) {
  577. pr_debug("%s: Command Queue supported depth %u\n",
  578. mmc_hostname(card->host),
  579. card->ext_csd.cmdq_depth);
  580. }
  581. }
  582. out:
  583. return err;
  584. }
  585. static int mmc_read_ext_csd(struct mmc_card *card)
  586. {
  587. u8 *ext_csd;
  588. int err;
  589. if (!mmc_can_ext_csd(card))
  590. return 0;
  591. err = mmc_get_ext_csd(card, &ext_csd);
  592. if (err) {
  593. /* If the host or the card can't do the switch,
  594. * fail more gracefully. */
  595. if ((err != -EINVAL)
  596. && (err != -ENOSYS)
  597. && (err != -EFAULT))
  598. return err;
  599. /*
  600. * High capacity cards should have this "magic" size
  601. * stored in their CSD.
  602. */
  603. if (card->csd.capacity == (4096 * 512)) {
  604. pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
  605. mmc_hostname(card->host));
  606. } else {
  607. pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
  608. mmc_hostname(card->host));
  609. err = 0;
  610. }
  611. return err;
  612. }
  613. err = mmc_decode_ext_csd(card, ext_csd);
  614. kfree(ext_csd);
  615. return err;
  616. }
  617. static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
  618. {
  619. u8 *bw_ext_csd;
  620. int err;
  621. if (bus_width == MMC_BUS_WIDTH_1)
  622. return 0;
  623. err = mmc_get_ext_csd(card, &bw_ext_csd);
  624. if (err)
  625. return err;
  626. /* only compare read only fields */
  627. err = !((card->ext_csd.raw_partition_support ==
  628. bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
  629. (card->ext_csd.raw_erased_mem_count ==
  630. bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
  631. (card->ext_csd.rev ==
  632. bw_ext_csd[EXT_CSD_REV]) &&
  633. (card->ext_csd.raw_ext_csd_structure ==
  634. bw_ext_csd[EXT_CSD_STRUCTURE]) &&
  635. (card->ext_csd.raw_card_type ==
  636. bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
  637. (card->ext_csd.raw_s_a_timeout ==
  638. bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
  639. (card->ext_csd.raw_hc_erase_gap_size ==
  640. bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
  641. (card->ext_csd.raw_erase_timeout_mult ==
  642. bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
  643. (card->ext_csd.raw_hc_erase_grp_size ==
  644. bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
  645. (card->ext_csd.raw_sec_trim_mult ==
  646. bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
  647. (card->ext_csd.raw_sec_erase_mult ==
  648. bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
  649. (card->ext_csd.raw_sec_feature_support ==
  650. bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
  651. (card->ext_csd.raw_trim_mult ==
  652. bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
  653. (card->ext_csd.raw_sectors[0] ==
  654. bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
  655. (card->ext_csd.raw_sectors[1] ==
  656. bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
  657. (card->ext_csd.raw_sectors[2] ==
  658. bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
  659. (card->ext_csd.raw_sectors[3] ==
  660. bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
  661. (card->ext_csd.raw_pwr_cl_52_195 ==
  662. bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
  663. (card->ext_csd.raw_pwr_cl_26_195 ==
  664. bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
  665. (card->ext_csd.raw_pwr_cl_52_360 ==
  666. bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
  667. (card->ext_csd.raw_pwr_cl_26_360 ==
  668. bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
  669. (card->ext_csd.raw_pwr_cl_200_195 ==
  670. bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
  671. (card->ext_csd.raw_pwr_cl_200_360 ==
  672. bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
  673. (card->ext_csd.raw_pwr_cl_ddr_52_195 ==
  674. bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
  675. (card->ext_csd.raw_pwr_cl_ddr_52_360 ==
  676. bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
  677. (card->ext_csd.raw_pwr_cl_ddr_200_360 ==
  678. bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
  679. if (err)
  680. err = -EINVAL;
  681. kfree(bw_ext_csd);
  682. return err;
  683. }
  684. MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
  685. card->raw_cid[2], card->raw_cid[3]);
  686. MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
  687. card->raw_csd[2], card->raw_csd[3]);
  688. MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
  689. MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
  690. MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
  691. MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
  692. MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
  693. MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
  694. MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
  695. MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
  696. MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
  697. MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
  698. MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
  699. MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
  700. card->ext_csd.device_life_time_est_typ_a,
  701. card->ext_csd.device_life_time_est_typ_b);
  702. MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
  703. MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
  704. card->ext_csd.enhanced_area_offset);
  705. MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
  706. MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
  707. MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
  708. MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
  709. MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
  710. MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
  711. static ssize_t mmc_fwrev_show(struct device *dev,
  712. struct device_attribute *attr,
  713. char *buf)
  714. {
  715. struct mmc_card *card = mmc_dev_to_card(dev);
  716. if (card->ext_csd.rev < 7) {
  717. return sprintf(buf, "0x%x\n", card->cid.fwrev);
  718. } else {
  719. return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
  720. card->ext_csd.fwrev);
  721. }
  722. }
  723. static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
  724. static ssize_t mmc_dsr_show(struct device *dev,
  725. struct device_attribute *attr,
  726. char *buf)
  727. {
  728. struct mmc_card *card = mmc_dev_to_card(dev);
  729. struct mmc_host *host = card->host;
  730. if (card->csd.dsr_imp && host->dsr_req)
  731. return sprintf(buf, "0x%x\n", host->dsr);
  732. else
  733. /* return default DSR value */
  734. return sprintf(buf, "0x%x\n", 0x404);
  735. }
  736. static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
  737. static struct attribute *mmc_std_attrs[] = {
  738. &dev_attr_cid.attr,
  739. &dev_attr_csd.attr,
  740. &dev_attr_date.attr,
  741. &dev_attr_erase_size.attr,
  742. &dev_attr_preferred_erase_size.attr,
  743. &dev_attr_fwrev.attr,
  744. &dev_attr_ffu_capable.attr,
  745. &dev_attr_hwrev.attr,
  746. &dev_attr_manfid.attr,
  747. &dev_attr_name.attr,
  748. &dev_attr_oemid.attr,
  749. &dev_attr_prv.attr,
  750. &dev_attr_rev.attr,
  751. &dev_attr_pre_eol_info.attr,
  752. &dev_attr_life_time.attr,
  753. &dev_attr_serial.attr,
  754. &dev_attr_enhanced_area_offset.attr,
  755. &dev_attr_enhanced_area_size.attr,
  756. &dev_attr_raw_rpmb_size_mult.attr,
  757. &dev_attr_rel_sectors.attr,
  758. &dev_attr_ocr.attr,
  759. &dev_attr_rca.attr,
  760. &dev_attr_dsr.attr,
  761. &dev_attr_cmdq_en.attr,
  762. NULL,
  763. };
  764. ATTRIBUTE_GROUPS(mmc_std);
  765. static struct device_type mmc_type = {
  766. .groups = mmc_std_groups,
  767. };
  768. /*
  769. * Select the PowerClass for the current bus width
  770. * If power class is defined for 4/8 bit bus in the
  771. * extended CSD register, select it by executing the
  772. * mmc_switch command.
  773. */
  774. static int __mmc_select_powerclass(struct mmc_card *card,
  775. unsigned int bus_width)
  776. {
  777. struct mmc_host *host = card->host;
  778. struct mmc_ext_csd *ext_csd = &card->ext_csd;
  779. unsigned int pwrclass_val = 0;
  780. int err = 0;
  781. switch (1 << host->ios.vdd) {
  782. case MMC_VDD_165_195:
  783. if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
  784. pwrclass_val = ext_csd->raw_pwr_cl_26_195;
  785. else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
  786. pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
  787. ext_csd->raw_pwr_cl_52_195 :
  788. ext_csd->raw_pwr_cl_ddr_52_195;
  789. else if (host->ios.clock <= MMC_HS200_MAX_DTR)
  790. pwrclass_val = ext_csd->raw_pwr_cl_200_195;
  791. break;
  792. case MMC_VDD_27_28:
  793. case MMC_VDD_28_29:
  794. case MMC_VDD_29_30:
  795. case MMC_VDD_30_31:
  796. case MMC_VDD_31_32:
  797. case MMC_VDD_32_33:
  798. case MMC_VDD_33_34:
  799. case MMC_VDD_34_35:
  800. case MMC_VDD_35_36:
  801. if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
  802. pwrclass_val = ext_csd->raw_pwr_cl_26_360;
  803. else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
  804. pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
  805. ext_csd->raw_pwr_cl_52_360 :
  806. ext_csd->raw_pwr_cl_ddr_52_360;
  807. else if (host->ios.clock <= MMC_HS200_MAX_DTR)
  808. pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
  809. ext_csd->raw_pwr_cl_ddr_200_360 :
  810. ext_csd->raw_pwr_cl_200_360;
  811. break;
  812. default:
  813. pr_warn("%s: Voltage range not supported for power class\n",
  814. mmc_hostname(host));
  815. return -EINVAL;
  816. }
  817. if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
  818. pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
  819. EXT_CSD_PWR_CL_8BIT_SHIFT;
  820. else
  821. pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
  822. EXT_CSD_PWR_CL_4BIT_SHIFT;
  823. /* If the power class is different from the default value */
  824. if (pwrclass_val > 0) {
  825. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  826. EXT_CSD_POWER_CLASS,
  827. pwrclass_val,
  828. card->ext_csd.generic_cmd6_time);
  829. }
  830. return err;
  831. }
  832. static int mmc_select_powerclass(struct mmc_card *card)
  833. {
  834. struct mmc_host *host = card->host;
  835. u32 bus_width, ext_csd_bits;
  836. int err, ddr;
  837. /* Power class selection is supported for versions >= 4.0 */
  838. if (!mmc_can_ext_csd(card))
  839. return 0;
  840. bus_width = host->ios.bus_width;
  841. /* Power class values are defined only for 4/8 bit bus */
  842. if (bus_width == MMC_BUS_WIDTH_1)
  843. return 0;
  844. ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
  845. if (ddr)
  846. ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
  847. EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
  848. else
  849. ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
  850. EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
  851. err = __mmc_select_powerclass(card, ext_csd_bits);
  852. if (err)
  853. pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
  854. mmc_hostname(host), 1 << bus_width, ddr);
  855. return err;
  856. }
  857. /*
  858. * Set the bus speed for the selected speed mode.
  859. */
  860. static void mmc_set_bus_speed(struct mmc_card *card)
  861. {
  862. unsigned int max_dtr = (unsigned int)-1;
  863. if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
  864. max_dtr > card->ext_csd.hs200_max_dtr)
  865. max_dtr = card->ext_csd.hs200_max_dtr;
  866. else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
  867. max_dtr = card->ext_csd.hs_max_dtr;
  868. else if (max_dtr > card->csd.max_dtr)
  869. max_dtr = card->csd.max_dtr;
  870. mmc_set_clock(card->host, max_dtr);
  871. }
  872. /*
  873. * Select the bus width amoung 4-bit and 8-bit(SDR).
  874. * If the bus width is changed successfully, return the selected width value.
  875. * Zero is returned instead of error value if the wide width is not supported.
  876. */
  877. static int mmc_select_bus_width(struct mmc_card *card)
  878. {
  879. static unsigned ext_csd_bits[] = {
  880. EXT_CSD_BUS_WIDTH_8,
  881. EXT_CSD_BUS_WIDTH_4,
  882. };
  883. static unsigned bus_widths[] = {
  884. MMC_BUS_WIDTH_8,
  885. MMC_BUS_WIDTH_4,
  886. };
  887. struct mmc_host *host = card->host;
  888. unsigned idx, bus_width = 0;
  889. int err = 0;
  890. if (!mmc_can_ext_csd(card) ||
  891. !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
  892. return 0;
  893. idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
  894. /*
  895. * Unlike SD, MMC cards dont have a configuration register to notify
  896. * supported bus width. So bus test command should be run to identify
  897. * the supported bus width or compare the ext csd values of current
  898. * bus width and ext csd values of 1 bit mode read earlier.
  899. */
  900. for (; idx < ARRAY_SIZE(bus_widths); idx++) {
  901. /*
  902. * Host is capable of 8bit transfer, then switch
  903. * the device to work in 8bit transfer mode. If the
  904. * mmc switch command returns error then switch to
  905. * 4bit transfer mode. On success set the corresponding
  906. * bus width on the host.
  907. */
  908. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  909. EXT_CSD_BUS_WIDTH,
  910. ext_csd_bits[idx],
  911. card->ext_csd.generic_cmd6_time);
  912. if (err)
  913. continue;
  914. bus_width = bus_widths[idx];
  915. mmc_set_bus_width(host, bus_width);
  916. /*
  917. * If controller can't handle bus width test,
  918. * compare ext_csd previously read in 1 bit mode
  919. * against ext_csd at new bus width
  920. */
  921. if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
  922. err = mmc_compare_ext_csds(card, bus_width);
  923. else
  924. err = mmc_bus_test(card, bus_width);
  925. if (!err) {
  926. err = bus_width;
  927. break;
  928. } else {
  929. pr_warn("%s: switch to bus width %d failed\n",
  930. mmc_hostname(host), 1 << bus_width);
  931. }
  932. }
  933. return err;
  934. }
  935. /*
  936. * Switch to the high-speed mode
  937. */
  938. static int mmc_select_hs(struct mmc_card *card)
  939. {
  940. int err;
  941. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  942. EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
  943. card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS,
  944. true, true, true);
  945. if (err)
  946. pr_warn("%s: switch to high-speed failed, err:%d\n",
  947. mmc_hostname(card->host), err);
  948. return err;
  949. }
  950. /*
  951. * Activate wide bus and DDR if supported.
  952. */
  953. static int mmc_select_hs_ddr(struct mmc_card *card)
  954. {
  955. struct mmc_host *host = card->host;
  956. u32 bus_width, ext_csd_bits;
  957. int err = 0;
  958. if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
  959. return 0;
  960. bus_width = host->ios.bus_width;
  961. if (bus_width == MMC_BUS_WIDTH_1)
  962. return 0;
  963. ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
  964. EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
  965. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  966. EXT_CSD_BUS_WIDTH,
  967. ext_csd_bits,
  968. card->ext_csd.generic_cmd6_time,
  969. MMC_TIMING_MMC_DDR52,
  970. true, true, true);
  971. if (err) {
  972. pr_err("%s: switch to bus width %d ddr failed\n",
  973. mmc_hostname(host), 1 << bus_width);
  974. return err;
  975. }
  976. /*
  977. * eMMC cards can support 3.3V to 1.2V i/o (vccq)
  978. * signaling.
  979. *
  980. * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
  981. *
  982. * 1.8V vccq at 3.3V core voltage (vcc) is not required
  983. * in the JEDEC spec for DDR.
  984. *
  985. * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
  986. * host controller can support this, like some of the SDHCI
  987. * controller which connect to an eMMC device. Some of these
  988. * host controller still needs to use 1.8v vccq for supporting
  989. * DDR mode.
  990. *
  991. * So the sequence will be:
  992. * if (host and device can both support 1.2v IO)
  993. * use 1.2v IO;
  994. * else if (host and device can both support 1.8v IO)
  995. * use 1.8v IO;
  996. * so if host and device can only support 3.3v IO, this is the
  997. * last choice.
  998. *
  999. * WARNING: eMMC rules are NOT the same as SD DDR
  1000. */
  1001. if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
  1002. err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
  1003. if (!err)
  1004. return 0;
  1005. }
  1006. if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V &&
  1007. host->caps & MMC_CAP_1_8V_DDR)
  1008. err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
  1009. /* make sure vccq is 3.3v after switching disaster */
  1010. if (err)
  1011. err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
  1012. return err;
  1013. }
  1014. static int mmc_select_hs400(struct mmc_card *card)
  1015. {
  1016. struct mmc_host *host = card->host;
  1017. unsigned int max_dtr;
  1018. int err = 0;
  1019. u8 val;
  1020. /*
  1021. * HS400 mode requires 8-bit bus width
  1022. */
  1023. if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
  1024. host->ios.bus_width == MMC_BUS_WIDTH_8))
  1025. return 0;
  1026. /* Switch card to HS mode */
  1027. val = EXT_CSD_TIMING_HS;
  1028. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1029. EXT_CSD_HS_TIMING, val,
  1030. card->ext_csd.generic_cmd6_time, 0,
  1031. true, false, true);
  1032. if (err) {
  1033. pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
  1034. mmc_hostname(host), err);
  1035. return err;
  1036. }
  1037. /* Set host controller to HS timing */
  1038. mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
  1039. /* Prepare host to downgrade to HS timing */
  1040. if (host->ops->hs400_downgrade)
  1041. host->ops->hs400_downgrade(host);
  1042. /* Reduce frequency to HS frequency */
  1043. max_dtr = card->ext_csd.hs_max_dtr;
  1044. mmc_set_clock(host, max_dtr);
  1045. err = mmc_switch_status(card);
  1046. if (err)
  1047. goto out_err;
  1048. /* Switch card to DDR */
  1049. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1050. EXT_CSD_BUS_WIDTH,
  1051. EXT_CSD_DDR_BUS_WIDTH_8,
  1052. card->ext_csd.generic_cmd6_time);
  1053. if (err) {
  1054. pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
  1055. mmc_hostname(host), err);
  1056. return err;
  1057. }
  1058. /* Switch card to HS400 */
  1059. val = EXT_CSD_TIMING_HS400 |
  1060. card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
  1061. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1062. EXT_CSD_HS_TIMING, val,
  1063. card->ext_csd.generic_cmd6_time, 0,
  1064. true, false, true);
  1065. if (err) {
  1066. pr_err("%s: switch to hs400 failed, err:%d\n",
  1067. mmc_hostname(host), err);
  1068. return err;
  1069. }
  1070. /* Set host controller to HS400 timing and frequency */
  1071. mmc_set_timing(host, MMC_TIMING_MMC_HS400);
  1072. mmc_set_bus_speed(card);
  1073. if (host->ops->hs400_complete)
  1074. host->ops->hs400_complete(host);
  1075. err = mmc_switch_status(card);
  1076. if (err)
  1077. goto out_err;
  1078. return 0;
  1079. out_err:
  1080. pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
  1081. __func__, err);
  1082. return err;
  1083. }
  1084. int mmc_hs200_to_hs400(struct mmc_card *card)
  1085. {
  1086. return mmc_select_hs400(card);
  1087. }
  1088. int mmc_hs400_to_hs200(struct mmc_card *card)
  1089. {
  1090. struct mmc_host *host = card->host;
  1091. unsigned int max_dtr;
  1092. int err;
  1093. u8 val;
  1094. /* Reduce frequency to HS */
  1095. max_dtr = card->ext_csd.hs_max_dtr;
  1096. mmc_set_clock(host, max_dtr);
  1097. /* Switch HS400 to HS DDR */
  1098. val = EXT_CSD_TIMING_HS;
  1099. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
  1100. val, card->ext_csd.generic_cmd6_time, 0,
  1101. true, false, true);
  1102. if (err)
  1103. goto out_err;
  1104. mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
  1105. err = mmc_switch_status(card);
  1106. if (err)
  1107. goto out_err;
  1108. /* Switch HS DDR to HS */
  1109. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
  1110. EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
  1111. 0, true, false, true);
  1112. if (err)
  1113. goto out_err;
  1114. mmc_set_timing(host, MMC_TIMING_MMC_HS);
  1115. if (host->ops->hs400_downgrade)
  1116. host->ops->hs400_downgrade(host);
  1117. err = mmc_switch_status(card);
  1118. if (err)
  1119. goto out_err;
  1120. /* Switch HS to HS200 */
  1121. val = EXT_CSD_TIMING_HS200 |
  1122. card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
  1123. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
  1124. val, card->ext_csd.generic_cmd6_time, 0,
  1125. true, false, true);
  1126. if (err)
  1127. goto out_err;
  1128. mmc_set_timing(host, MMC_TIMING_MMC_HS200);
  1129. /*
  1130. * For HS200, CRC errors are not a reliable way to know the switch
  1131. * failed. If there really is a problem, we would expect tuning will
  1132. * fail and the result ends up the same.
  1133. */
  1134. err = __mmc_switch_status(card, false);
  1135. if (err)
  1136. goto out_err;
  1137. mmc_set_bus_speed(card);
  1138. /* Prepare tuning for HS400 mode. */
  1139. if (host->ops->prepare_hs400_tuning)
  1140. host->ops->prepare_hs400_tuning(host, &host->ios);
  1141. return 0;
  1142. out_err:
  1143. pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
  1144. __func__, err);
  1145. return err;
  1146. }
  1147. static void mmc_select_driver_type(struct mmc_card *card)
  1148. {
  1149. int card_drv_type, drive_strength, drv_type = 0;
  1150. int fixed_drv_type = card->host->fixed_drv_type;
  1151. card_drv_type = card->ext_csd.raw_driver_strength |
  1152. mmc_driver_type_mask(0);
  1153. if (fixed_drv_type >= 0)
  1154. drive_strength = card_drv_type & mmc_driver_type_mask(fixed_drv_type)
  1155. ? fixed_drv_type : 0;
  1156. else
  1157. drive_strength = mmc_select_drive_strength(card,
  1158. card->ext_csd.hs200_max_dtr,
  1159. card_drv_type, &drv_type);
  1160. card->drive_strength = drive_strength;
  1161. if (drv_type)
  1162. mmc_set_driver_type(card->host, drv_type);
  1163. }
  1164. static int mmc_select_hs400es(struct mmc_card *card)
  1165. {
  1166. struct mmc_host *host = card->host;
  1167. int err = -EINVAL;
  1168. u8 val;
  1169. if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
  1170. err = -ENOTSUPP;
  1171. goto out_err;
  1172. }
  1173. if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
  1174. err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
  1175. if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
  1176. err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
  1177. /* If fails try again during next card power cycle */
  1178. if (err)
  1179. goto out_err;
  1180. err = mmc_select_bus_width(card);
  1181. if (err != MMC_BUS_WIDTH_8) {
  1182. pr_err("%s: switch to 8bit bus width failed, err:%d\n",
  1183. mmc_hostname(host), err);
  1184. err = err < 0 ? err : -ENOTSUPP;
  1185. goto out_err;
  1186. }
  1187. /* Switch card to HS mode */
  1188. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1189. EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
  1190. card->ext_csd.generic_cmd6_time, 0,
  1191. true, false, true);
  1192. if (err) {
  1193. pr_err("%s: switch to hs for hs400es failed, err:%d\n",
  1194. mmc_hostname(host), err);
  1195. goto out_err;
  1196. }
  1197. mmc_set_timing(host, MMC_TIMING_MMC_HS);
  1198. err = mmc_switch_status(card);
  1199. if (err)
  1200. goto out_err;
  1201. mmc_set_clock(host, card->ext_csd.hs_max_dtr);
  1202. /* Switch card to DDR with strobe bit */
  1203. val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
  1204. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1205. EXT_CSD_BUS_WIDTH,
  1206. val,
  1207. card->ext_csd.generic_cmd6_time);
  1208. if (err) {
  1209. pr_err("%s: switch to bus width for hs400es failed, err:%d\n",
  1210. mmc_hostname(host), err);
  1211. goto out_err;
  1212. }
  1213. mmc_select_driver_type(card);
  1214. /* Switch card to HS400 */
  1215. val = EXT_CSD_TIMING_HS400 |
  1216. card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
  1217. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1218. EXT_CSD_HS_TIMING, val,
  1219. card->ext_csd.generic_cmd6_time, 0,
  1220. true, false, true);
  1221. if (err) {
  1222. pr_err("%s: switch to hs400es failed, err:%d\n",
  1223. mmc_hostname(host), err);
  1224. goto out_err;
  1225. }
  1226. /* Set host controller to HS400 timing and frequency */
  1227. mmc_set_timing(host, MMC_TIMING_MMC_HS400);
  1228. /* Controller enable enhanced strobe function */
  1229. host->ios.enhanced_strobe = true;
  1230. if (host->ops->hs400_enhanced_strobe)
  1231. host->ops->hs400_enhanced_strobe(host, &host->ios);
  1232. err = mmc_switch_status(card);
  1233. if (err)
  1234. goto out_err;
  1235. return 0;
  1236. out_err:
  1237. pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
  1238. __func__, err);
  1239. return err;
  1240. }
  1241. /*
  1242. * For device supporting HS200 mode, the following sequence
  1243. * should be done before executing the tuning process.
  1244. * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
  1245. * 2. switch to HS200 mode
  1246. * 3. set the clock to > 52Mhz and <=200MHz
  1247. */
  1248. static int mmc_select_hs200(struct mmc_card *card)
  1249. {
  1250. struct mmc_host *host = card->host;
  1251. unsigned int old_timing, old_signal_voltage;
  1252. int err = -EINVAL;
  1253. u8 val;
  1254. old_signal_voltage = host->ios.signal_voltage;
  1255. if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
  1256. err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
  1257. if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
  1258. err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
  1259. /* If fails try again during next card power cycle */
  1260. if (err)
  1261. return err;
  1262. mmc_select_driver_type(card);
  1263. /*
  1264. * Set the bus width(4 or 8) with host's support and
  1265. * switch to HS200 mode if bus width is set successfully.
  1266. */
  1267. err = mmc_select_bus_width(card);
  1268. if (err > 0) {
  1269. val = EXT_CSD_TIMING_HS200 |
  1270. card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
  1271. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1272. EXT_CSD_HS_TIMING, val,
  1273. card->ext_csd.generic_cmd6_time, 0,
  1274. true, false, true);
  1275. if (err)
  1276. goto err;
  1277. old_timing = host->ios.timing;
  1278. mmc_set_timing(host, MMC_TIMING_MMC_HS200);
  1279. /*
  1280. * For HS200, CRC errors are not a reliable way to know the
  1281. * switch failed. If there really is a problem, we would expect
  1282. * tuning will fail and the result ends up the same.
  1283. */
  1284. err = __mmc_switch_status(card, false);
  1285. /*
  1286. * mmc_select_timing() assumes timing has not changed if
  1287. * it is a switch error.
  1288. */
  1289. if (err == -EBADMSG)
  1290. mmc_set_timing(host, old_timing);
  1291. }
  1292. err:
  1293. if (err) {
  1294. /* fall back to the old signal voltage, if fails report error */
  1295. if (mmc_set_signal_voltage(host, old_signal_voltage))
  1296. err = -EIO;
  1297. pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
  1298. __func__, err);
  1299. }
  1300. return err;
  1301. }
  1302. /*
  1303. * Activate High Speed, HS200 or HS400ES mode if supported.
  1304. */
  1305. static int mmc_select_timing(struct mmc_card *card)
  1306. {
  1307. int err = 0;
  1308. if (!mmc_can_ext_csd(card))
  1309. goto bus_speed;
  1310. if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES)
  1311. err = mmc_select_hs400es(card);
  1312. else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
  1313. err = mmc_select_hs200(card);
  1314. else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
  1315. err = mmc_select_hs(card);
  1316. if (err && err != -EBADMSG)
  1317. return err;
  1318. bus_speed:
  1319. /*
  1320. * Set the bus speed to the selected bus timing.
  1321. * If timing is not selected, backward compatible is the default.
  1322. */
  1323. mmc_set_bus_speed(card);
  1324. return 0;
  1325. }
  1326. /*
  1327. * Execute tuning sequence to seek the proper bus operating
  1328. * conditions for HS200 and HS400, which sends CMD21 to the device.
  1329. */
  1330. static int mmc_hs200_tuning(struct mmc_card *card)
  1331. {
  1332. struct mmc_host *host = card->host;
  1333. /*
  1334. * Timing should be adjusted to the HS400 target
  1335. * operation frequency for tuning process
  1336. */
  1337. if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
  1338. host->ios.bus_width == MMC_BUS_WIDTH_8)
  1339. if (host->ops->prepare_hs400_tuning)
  1340. host->ops->prepare_hs400_tuning(host, &host->ios);
  1341. return mmc_execute_tuning(card);
  1342. }
  1343. /*
  1344. * Handle the detection and initialisation of a card.
  1345. *
  1346. * In the case of a resume, "oldcard" will contain the card
  1347. * we're trying to reinitialise.
  1348. */
  1349. static int mmc_init_card(struct mmc_host *host, u32 ocr,
  1350. struct mmc_card *oldcard)
  1351. {
  1352. struct mmc_card *card;
  1353. int err;
  1354. u32 cid[4];
  1355. u32 rocr;
  1356. WARN_ON(!host->claimed);
  1357. /* Set correct bus mode for MMC before attempting init */
  1358. if (!mmc_host_is_spi(host))
  1359. mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
  1360. /*
  1361. * Since we're changing the OCR value, we seem to
  1362. * need to tell some cards to go back to the idle
  1363. * state. We wait 1ms to give cards time to
  1364. * respond.
  1365. * mmc_go_idle is needed for eMMC that are asleep
  1366. */
  1367. mmc_go_idle(host);
  1368. /* The extra bit indicates that we support high capacity */
  1369. err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
  1370. if (err)
  1371. goto err;
  1372. /*
  1373. * For SPI, enable CRC as appropriate.
  1374. */
  1375. if (mmc_host_is_spi(host)) {
  1376. err = mmc_spi_set_crc(host, use_spi_crc);
  1377. if (err)
  1378. goto err;
  1379. }
  1380. /*
  1381. * Fetch CID from card.
  1382. */
  1383. err = mmc_send_cid(host, cid);
  1384. if (err)
  1385. goto err;
  1386. if (oldcard) {
  1387. if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
  1388. err = -ENOENT;
  1389. goto err;
  1390. }
  1391. card = oldcard;
  1392. } else {
  1393. /*
  1394. * Allocate card structure.
  1395. */
  1396. card = mmc_alloc_card(host, &mmc_type);
  1397. if (IS_ERR(card)) {
  1398. err = PTR_ERR(card);
  1399. goto err;
  1400. }
  1401. card->ocr = ocr;
  1402. card->type = MMC_TYPE_MMC;
  1403. card->rca = 1;
  1404. memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
  1405. }
  1406. /*
  1407. * Call the optional HC's init_card function to handle quirks.
  1408. */
  1409. if (host->ops->init_card)
  1410. host->ops->init_card(host, card);
  1411. /*
  1412. * For native busses: set card RCA and quit open drain mode.
  1413. */
  1414. if (!mmc_host_is_spi(host)) {
  1415. err = mmc_set_relative_addr(card);
  1416. if (err)
  1417. goto free_card;
  1418. mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
  1419. }
  1420. if (!oldcard) {
  1421. /*
  1422. * Fetch CSD from card.
  1423. */
  1424. err = mmc_send_csd(card, card->raw_csd);
  1425. if (err)
  1426. goto free_card;
  1427. err = mmc_decode_csd(card);
  1428. if (err)
  1429. goto free_card;
  1430. err = mmc_decode_cid(card);
  1431. if (err)
  1432. goto free_card;
  1433. }
  1434. /*
  1435. * handling only for cards supporting DSR and hosts requesting
  1436. * DSR configuration
  1437. */
  1438. if (card->csd.dsr_imp && host->dsr_req)
  1439. mmc_set_dsr(host);
  1440. /*
  1441. * Select card, as all following commands rely on that.
  1442. */
  1443. if (!mmc_host_is_spi(host)) {
  1444. err = mmc_select_card(card);
  1445. if (err)
  1446. goto free_card;
  1447. }
  1448. if (!oldcard) {
  1449. /* Read extended CSD. */
  1450. err = mmc_read_ext_csd(card);
  1451. if (err)
  1452. goto free_card;
  1453. /*
  1454. * If doing byte addressing, check if required to do sector
  1455. * addressing. Handle the case of <2GB cards needing sector
  1456. * addressing. See section 8.1 JEDEC Standard JED84-A441;
  1457. * ocr register has bit 30 set for sector addressing.
  1458. */
  1459. if (rocr & BIT(30))
  1460. mmc_card_set_blockaddr(card);
  1461. /* Erase size depends on CSD and Extended CSD */
  1462. mmc_set_erase_size(card);
  1463. }
  1464. /* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */
  1465. if (card->ext_csd.rev >= 3) {
  1466. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1467. EXT_CSD_ERASE_GROUP_DEF, 1,
  1468. card->ext_csd.generic_cmd6_time);
  1469. if (err && err != -EBADMSG)
  1470. goto free_card;
  1471. if (err) {
  1472. err = 0;
  1473. /*
  1474. * Just disable enhanced area off & sz
  1475. * will try to enable ERASE_GROUP_DEF
  1476. * during next time reinit
  1477. */
  1478. card->ext_csd.enhanced_area_offset = -EINVAL;
  1479. card->ext_csd.enhanced_area_size = -EINVAL;
  1480. } else {
  1481. card->ext_csd.erase_group_def = 1;
  1482. /*
  1483. * enable ERASE_GRP_DEF successfully.
  1484. * This will affect the erase size, so
  1485. * here need to reset erase size
  1486. */
  1487. mmc_set_erase_size(card);
  1488. }
  1489. }
  1490. /*
  1491. * Ensure eMMC user default partition is enabled
  1492. */
  1493. if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
  1494. card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
  1495. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
  1496. card->ext_csd.part_config,
  1497. card->ext_csd.part_time);
  1498. if (err && err != -EBADMSG)
  1499. goto free_card;
  1500. }
  1501. /*
  1502. * Enable power_off_notification byte in the ext_csd register
  1503. */
  1504. if (card->ext_csd.rev >= 6) {
  1505. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1506. EXT_CSD_POWER_OFF_NOTIFICATION,
  1507. EXT_CSD_POWER_ON,
  1508. card->ext_csd.generic_cmd6_time);
  1509. if (err && err != -EBADMSG)
  1510. goto free_card;
  1511. /*
  1512. * The err can be -EBADMSG or 0,
  1513. * so check for success and update the flag
  1514. */
  1515. if (!err)
  1516. card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
  1517. }
  1518. /*
  1519. * Select timing interface
  1520. */
  1521. err = mmc_select_timing(card);
  1522. if (err)
  1523. goto free_card;
  1524. if (mmc_card_hs200(card)) {
  1525. err = mmc_hs200_tuning(card);
  1526. if (err)
  1527. goto free_card;
  1528. err = mmc_select_hs400(card);
  1529. if (err)
  1530. goto free_card;
  1531. } else if (!mmc_card_hs400es(card)) {
  1532. /* Select the desired bus width optionally */
  1533. err = mmc_select_bus_width(card);
  1534. if (err > 0 && mmc_card_hs(card)) {
  1535. err = mmc_select_hs_ddr(card);
  1536. if (err)
  1537. goto free_card;
  1538. }
  1539. }
  1540. /*
  1541. * Choose the power class with selected bus interface
  1542. */
  1543. mmc_select_powerclass(card);
  1544. /*
  1545. * Enable HPI feature (if supported)
  1546. */
  1547. if (card->ext_csd.hpi) {
  1548. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1549. EXT_CSD_HPI_MGMT, 1,
  1550. card->ext_csd.generic_cmd6_time);
  1551. if (err && err != -EBADMSG)
  1552. goto free_card;
  1553. if (err) {
  1554. pr_warn("%s: Enabling HPI failed\n",
  1555. mmc_hostname(card->host));
  1556. card->ext_csd.hpi_en = 0;
  1557. err = 0;
  1558. } else {
  1559. card->ext_csd.hpi_en = 1;
  1560. }
  1561. }
  1562. /*
  1563. * If cache size is higher than 0, this indicates the existence of cache
  1564. * and it can be turned on. Note that some eMMCs from Micron has been
  1565. * reported to need ~800 ms timeout, while enabling the cache after
  1566. * sudden power failure tests. Let's extend the timeout to a minimum of
  1567. * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
  1568. */
  1569. if (card->ext_csd.cache_size > 0) {
  1570. unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
  1571. timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
  1572. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1573. EXT_CSD_CACHE_CTRL, 1, timeout_ms);
  1574. if (err && err != -EBADMSG)
  1575. goto free_card;
  1576. /*
  1577. * Only if no error, cache is turned on successfully.
  1578. */
  1579. if (err) {
  1580. pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
  1581. mmc_hostname(card->host), err);
  1582. card->ext_csd.cache_ctrl = 0;
  1583. err = 0;
  1584. } else {
  1585. card->ext_csd.cache_ctrl = 1;
  1586. }
  1587. }
  1588. /*
  1589. * Enable Command Queue if supported. Note that Packed Commands cannot
  1590. * be used with Command Queue.
  1591. */
  1592. card->ext_csd.cmdq_en = false;
  1593. if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
  1594. err = mmc_cmdq_enable(card);
  1595. if (err && err != -EBADMSG)
  1596. goto free_card;
  1597. if (err) {
  1598. pr_warn("%s: Enabling CMDQ failed\n",
  1599. mmc_hostname(card->host));
  1600. card->ext_csd.cmdq_support = false;
  1601. card->ext_csd.cmdq_depth = 0;
  1602. err = 0;
  1603. }
  1604. }
  1605. /*
  1606. * In some cases (e.g. RPMB or mmc_test), the Command Queue must be
  1607. * disabled for a time, so a flag is needed to indicate to re-enable the
  1608. * Command Queue.
  1609. */
  1610. card->reenable_cmdq = card->ext_csd.cmdq_en;
  1611. if (card->ext_csd.cmdq_en && !host->cqe_enabled) {
  1612. err = host->cqe_ops->cqe_enable(host, card);
  1613. if (err) {
  1614. pr_err("%s: Failed to enable CQE, error %d\n",
  1615. mmc_hostname(host), err);
  1616. } else {
  1617. host->cqe_enabled = true;
  1618. pr_info("%s: Command Queue Engine enabled\n",
  1619. mmc_hostname(host));
  1620. }
  1621. }
  1622. if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
  1623. host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
  1624. pr_err("%s: Host failed to negotiate down from 3.3V\n",
  1625. mmc_hostname(host));
  1626. err = -EINVAL;
  1627. goto free_card;
  1628. }
  1629. if (!oldcard)
  1630. host->card = card;
  1631. return 0;
  1632. free_card:
  1633. if (!oldcard)
  1634. mmc_remove_card(card);
  1635. err:
  1636. return err;
  1637. }
  1638. static int mmc_can_sleep(struct mmc_card *card)
  1639. {
  1640. return (card && card->ext_csd.rev >= 3);
  1641. }
  1642. static int mmc_sleep(struct mmc_host *host)
  1643. {
  1644. struct mmc_command cmd = {};
  1645. struct mmc_card *card = host->card;
  1646. unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
  1647. int err;
  1648. /* Re-tuning can't be done once the card is deselected */
  1649. mmc_retune_hold(host);
  1650. err = mmc_deselect_cards(host);
  1651. if (err)
  1652. goto out_release;
  1653. cmd.opcode = MMC_SLEEP_AWAKE;
  1654. cmd.arg = card->rca << 16;
  1655. cmd.arg |= 1 << 15;
  1656. /*
  1657. * If the max_busy_timeout of the host is specified, validate it against
  1658. * the sleep cmd timeout. A failure means we need to prevent the host
  1659. * from doing hw busy detection, which is done by converting to a R1
  1660. * response instead of a R1B. Note, some hosts requires R1B, which also
  1661. * means they are on their own when it comes to deal with the busy
  1662. * timeout.
  1663. */
  1664. if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
  1665. (timeout_ms > host->max_busy_timeout)) {
  1666. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  1667. } else {
  1668. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  1669. cmd.busy_timeout = timeout_ms;
  1670. }
  1671. err = mmc_wait_for_cmd(host, &cmd, 0);
  1672. if (err)
  1673. goto out_release;
  1674. /*
  1675. * If the host does not wait while the card signals busy, then we will
  1676. * will have to wait the sleep/awake timeout. Note, we cannot use the
  1677. * SEND_STATUS command to poll the status because that command (and most
  1678. * others) is invalid while the card sleeps.
  1679. */
  1680. if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
  1681. mmc_delay(timeout_ms);
  1682. out_release:
  1683. mmc_retune_release(host);
  1684. return err;
  1685. }
  1686. static int mmc_can_poweroff_notify(const struct mmc_card *card)
  1687. {
  1688. return card &&
  1689. mmc_card_mmc(card) &&
  1690. (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
  1691. }
  1692. static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
  1693. {
  1694. unsigned int timeout = card->ext_csd.generic_cmd6_time;
  1695. int err;
  1696. /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
  1697. if (notify_type == EXT_CSD_POWER_OFF_LONG)
  1698. timeout = card->ext_csd.power_off_longtime;
  1699. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1700. EXT_CSD_POWER_OFF_NOTIFICATION,
  1701. notify_type, timeout, 0, true, false, false);
  1702. if (err)
  1703. pr_err("%s: Power Off Notification timed out, %u\n",
  1704. mmc_hostname(card->host), timeout);
  1705. /* Disable the power off notification after the switch operation. */
  1706. card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
  1707. return err;
  1708. }
  1709. /*
  1710. * Host is being removed. Free up the current card.
  1711. */
  1712. static void mmc_remove(struct mmc_host *host)
  1713. {
  1714. mmc_remove_card(host->card);
  1715. host->card = NULL;
  1716. }
  1717. /*
  1718. * Card detection - card is alive.
  1719. */
  1720. static int mmc_alive(struct mmc_host *host)
  1721. {
  1722. return mmc_send_status(host->card, NULL);
  1723. }
  1724. /*
  1725. * Card detection callback from host.
  1726. */
  1727. static void mmc_detect(struct mmc_host *host)
  1728. {
  1729. int err;
  1730. mmc_get_card(host->card, NULL);
  1731. /*
  1732. * Just check if our card has been removed.
  1733. */
  1734. err = _mmc_detect_card_removed(host);
  1735. mmc_put_card(host->card, NULL);
  1736. if (err) {
  1737. mmc_remove(host);
  1738. mmc_claim_host(host);
  1739. mmc_detach_bus(host);
  1740. mmc_power_off(host);
  1741. mmc_release_host(host);
  1742. }
  1743. }
  1744. static bool _mmc_cache_enabled(struct mmc_host *host)
  1745. {
  1746. return host->card->ext_csd.cache_size > 0 &&
  1747. host->card->ext_csd.cache_ctrl & 1;
  1748. }
  1749. static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
  1750. {
  1751. int err = 0;
  1752. unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
  1753. EXT_CSD_POWER_OFF_LONG;
  1754. mmc_claim_host(host);
  1755. if (mmc_card_suspended(host->card))
  1756. goto out;
  1757. if (mmc_card_doing_bkops(host->card)) {
  1758. err = mmc_stop_bkops(host->card);
  1759. if (err)
  1760. goto out;
  1761. }
  1762. err = mmc_flush_cache(host->card);
  1763. if (err)
  1764. goto out;
  1765. if (mmc_can_poweroff_notify(host->card) &&
  1766. ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
  1767. err = mmc_poweroff_notify(host->card, notify_type);
  1768. else if (mmc_can_sleep(host->card))
  1769. err = mmc_sleep(host);
  1770. else if (!mmc_host_is_spi(host))
  1771. err = mmc_deselect_cards(host);
  1772. if (!err) {
  1773. mmc_power_off(host);
  1774. mmc_card_set_suspended(host->card);
  1775. }
  1776. out:
  1777. mmc_release_host(host);
  1778. return err;
  1779. }
  1780. /*
  1781. * Suspend callback
  1782. */
  1783. static int mmc_suspend(struct mmc_host *host)
  1784. {
  1785. int err;
  1786. err = _mmc_suspend(host, true);
  1787. if (!err) {
  1788. pm_runtime_disable(&host->card->dev);
  1789. pm_runtime_set_suspended(&host->card->dev);
  1790. }
  1791. return err;
  1792. }
  1793. /*
  1794. * This function tries to determine if the same card is still present
  1795. * and, if so, restore all state to it.
  1796. */
  1797. static int _mmc_resume(struct mmc_host *host)
  1798. {
  1799. int err = 0;
  1800. mmc_claim_host(host);
  1801. if (!mmc_card_suspended(host->card))
  1802. goto out;
  1803. mmc_power_up(host, host->card->ocr);
  1804. err = mmc_init_card(host, host->card->ocr, host->card);
  1805. mmc_card_clr_suspended(host->card);
  1806. out:
  1807. mmc_release_host(host);
  1808. return err;
  1809. }
  1810. /*
  1811. * Shutdown callback
  1812. */
  1813. static int mmc_shutdown(struct mmc_host *host)
  1814. {
  1815. int err = 0;
  1816. /*
  1817. * In a specific case for poweroff notify, we need to resume the card
  1818. * before we can shutdown it properly.
  1819. */
  1820. if (mmc_can_poweroff_notify(host->card) &&
  1821. !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
  1822. err = _mmc_resume(host);
  1823. if (!err)
  1824. err = _mmc_suspend(host, false);
  1825. return err;
  1826. }
  1827. /*
  1828. * Callback for resume.
  1829. */
  1830. static int mmc_resume(struct mmc_host *host)
  1831. {
  1832. pm_runtime_enable(&host->card->dev);
  1833. return 0;
  1834. }
  1835. /*
  1836. * Callback for runtime_suspend.
  1837. */
  1838. static int mmc_runtime_suspend(struct mmc_host *host)
  1839. {
  1840. int err;
  1841. if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
  1842. return 0;
  1843. err = _mmc_suspend(host, true);
  1844. if (err)
  1845. pr_err("%s: error %d doing aggressive suspend\n",
  1846. mmc_hostname(host), err);
  1847. return err;
  1848. }
  1849. /*
  1850. * Callback for runtime_resume.
  1851. */
  1852. static int mmc_runtime_resume(struct mmc_host *host)
  1853. {
  1854. int err;
  1855. err = _mmc_resume(host);
  1856. if (err && err != -ENOMEDIUM)
  1857. pr_err("%s: error %d doing runtime resume\n",
  1858. mmc_hostname(host), err);
  1859. return 0;
  1860. }
  1861. static int mmc_can_reset(struct mmc_card *card)
  1862. {
  1863. u8 rst_n_function;
  1864. rst_n_function = card->ext_csd.rst_n_function;
  1865. if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
  1866. return 0;
  1867. return 1;
  1868. }
  1869. static int _mmc_hw_reset(struct mmc_host *host)
  1870. {
  1871. struct mmc_card *card = host->card;
  1872. /*
  1873. * In the case of recovery, we can't expect flushing the cache to work
  1874. * always, but we have a go and ignore errors.
  1875. */
  1876. mmc_flush_cache(host->card);
  1877. if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
  1878. mmc_can_reset(card)) {
  1879. /* If the card accept RST_n signal, send it. */
  1880. mmc_set_clock(host, host->f_init);
  1881. host->ops->hw_reset(host);
  1882. /* Set initial state and call mmc_set_ios */
  1883. mmc_set_initial_state(host);
  1884. } else {
  1885. /* Do a brute force power cycle */
  1886. mmc_power_cycle(host, card->ocr);
  1887. mmc_pwrseq_reset(host);
  1888. }
  1889. return mmc_init_card(host, card->ocr, card);
  1890. }
  1891. static const struct mmc_bus_ops mmc_ops = {
  1892. .remove = mmc_remove,
  1893. .detect = mmc_detect,
  1894. .suspend = mmc_suspend,
  1895. .resume = mmc_resume,
  1896. .runtime_suspend = mmc_runtime_suspend,
  1897. .runtime_resume = mmc_runtime_resume,
  1898. .alive = mmc_alive,
  1899. .shutdown = mmc_shutdown,
  1900. .hw_reset = _mmc_hw_reset,
  1901. .cache_enabled = _mmc_cache_enabled,
  1902. };
  1903. /*
  1904. * Starting point for MMC card init.
  1905. */
  1906. int mmc_attach_mmc(struct mmc_host *host)
  1907. {
  1908. int err;
  1909. u32 ocr, rocr;
  1910. WARN_ON(!host->claimed);
  1911. /* Set correct bus mode for MMC before attempting attach */
  1912. if (!mmc_host_is_spi(host))
  1913. mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
  1914. err = mmc_send_op_cond(host, 0, &ocr);
  1915. if (err)
  1916. return err;
  1917. mmc_attach_bus(host, &mmc_ops);
  1918. if (host->ocr_avail_mmc)
  1919. host->ocr_avail = host->ocr_avail_mmc;
  1920. /*
  1921. * We need to get OCR a different way for SPI.
  1922. */
  1923. if (mmc_host_is_spi(host)) {
  1924. err = mmc_spi_read_ocr(host, 1, &ocr);
  1925. if (err)
  1926. goto err;
  1927. }
  1928. rocr = mmc_select_voltage(host, ocr);
  1929. /*
  1930. * Can we support the voltage of the card?
  1931. */
  1932. if (!rocr) {
  1933. err = -EINVAL;
  1934. goto err;
  1935. }
  1936. /*
  1937. * Detect and init the card.
  1938. */
  1939. err = mmc_init_card(host, rocr, NULL);
  1940. if (err)
  1941. goto err;
  1942. mmc_release_host(host);
  1943. err = mmc_add_card(host->card);
  1944. if (err)
  1945. goto remove_card;
  1946. mmc_claim_host(host);
  1947. return 0;
  1948. remove_card:
  1949. mmc_remove_card(host->card);
  1950. mmc_claim_host(host);
  1951. host->card = NULL;
  1952. err:
  1953. mmc_detach_bus(host);
  1954. pr_err("%s: error %d whilst initialising MMC card\n",
  1955. mmc_hostname(host), err);
  1956. return err;
  1957. }