ms_block.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * ms_block.c - Sony MemoryStick (legacy) storage support
  4. * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
  5. *
  6. * Minor portions of the driver were copied from mspro_block.c which is
  7. * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
  8. */
  9. #define DRIVER_NAME "ms_block"
  10. #define pr_fmt(fmt) DRIVER_NAME ": " fmt
  11. #include <linux/module.h>
  12. #include <linux/blk-mq.h>
  13. #include <linux/memstick.h>
  14. #include <linux/idr.h>
  15. #include <linux/hdreg.h>
  16. #include <linux/delay.h>
  17. #include <linux/slab.h>
  18. #include <linux/random.h>
  19. #include <linux/bitmap.h>
  20. #include <linux/scatterlist.h>
  21. #include <linux/jiffies.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/mutex.h>
  24. #include "ms_block.h"
  25. static int debug;
  26. static int cache_flush_timeout = 1000;
  27. static bool verify_writes;
  28. /*
  29. * Copies section of 'sg_from' starting from offset 'offset' and with length
  30. * 'len' To another scatterlist of to_nents enties
  31. */
  32. static size_t msb_sg_copy(struct scatterlist *sg_from,
  33. struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
  34. {
  35. size_t copied = 0;
  36. while (offset > 0) {
  37. if (offset >= sg_from->length) {
  38. if (sg_is_last(sg_from))
  39. return 0;
  40. offset -= sg_from->length;
  41. sg_from = sg_next(sg_from);
  42. continue;
  43. }
  44. copied = min(len, sg_from->length - offset);
  45. sg_set_page(sg_to, sg_page(sg_from),
  46. copied, sg_from->offset + offset);
  47. len -= copied;
  48. offset = 0;
  49. if (sg_is_last(sg_from) || !len)
  50. goto out;
  51. sg_to = sg_next(sg_to);
  52. to_nents--;
  53. sg_from = sg_next(sg_from);
  54. }
  55. while (len > sg_from->length && to_nents--) {
  56. len -= sg_from->length;
  57. copied += sg_from->length;
  58. sg_set_page(sg_to, sg_page(sg_from),
  59. sg_from->length, sg_from->offset);
  60. if (sg_is_last(sg_from) || !len)
  61. goto out;
  62. sg_from = sg_next(sg_from);
  63. sg_to = sg_next(sg_to);
  64. }
  65. if (len && to_nents) {
  66. sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
  67. copied += len;
  68. }
  69. out:
  70. sg_mark_end(sg_to);
  71. return copied;
  72. }
  73. /*
  74. * Compares section of 'sg' starting from offset 'offset' and with length 'len'
  75. * to linear buffer of length 'len' at address 'buffer'
  76. * Returns 0 if equal and -1 otherwice
  77. */
  78. static int msb_sg_compare_to_buffer(struct scatterlist *sg,
  79. size_t offset, u8 *buffer, size_t len)
  80. {
  81. int retval = 0, cmplen;
  82. struct sg_mapping_iter miter;
  83. sg_miter_start(&miter, sg, sg_nents(sg),
  84. SG_MITER_ATOMIC | SG_MITER_FROM_SG);
  85. while (sg_miter_next(&miter) && len > 0) {
  86. if (offset >= miter.length) {
  87. offset -= miter.length;
  88. continue;
  89. }
  90. cmplen = min(miter.length - offset, len);
  91. retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
  92. if (retval)
  93. break;
  94. buffer += cmplen;
  95. len -= cmplen;
  96. offset = 0;
  97. }
  98. if (!retval && len)
  99. retval = -1;
  100. sg_miter_stop(&miter);
  101. return retval;
  102. }
  103. /* Get zone at which block with logical address 'lba' lives
  104. * Flash is broken into zones.
  105. * Each zone consists of 512 eraseblocks, out of which in first
  106. * zone 494 are used and 496 are for all following zones.
  107. * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
  108. */
  109. static int msb_get_zone_from_lba(int lba)
  110. {
  111. if (lba < 494)
  112. return 0;
  113. return ((lba - 494) / 496) + 1;
  114. }
  115. /* Get zone of physical block. Trivial */
  116. static int msb_get_zone_from_pba(int pba)
  117. {
  118. return pba / MS_BLOCKS_IN_ZONE;
  119. }
  120. /* Debug test to validate free block counts */
  121. static int msb_validate_used_block_bitmap(struct msb_data *msb)
  122. {
  123. int total_free_blocks = 0;
  124. int i;
  125. if (!debug)
  126. return 0;
  127. for (i = 0; i < msb->zone_count; i++)
  128. total_free_blocks += msb->free_block_count[i];
  129. if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
  130. msb->block_count) == total_free_blocks)
  131. return 0;
  132. pr_err("BUG: free block counts don't match the bitmap");
  133. msb->read_only = true;
  134. return -EINVAL;
  135. }
  136. /* Mark physical block as used */
  137. static void msb_mark_block_used(struct msb_data *msb, int pba)
  138. {
  139. int zone = msb_get_zone_from_pba(pba);
  140. if (test_bit(pba, msb->used_blocks_bitmap)) {
  141. pr_err(
  142. "BUG: attempt to mark already used pba %d as used", pba);
  143. msb->read_only = true;
  144. return;
  145. }
  146. if (msb_validate_used_block_bitmap(msb))
  147. return;
  148. /* No races because all IO is single threaded */
  149. __set_bit(pba, msb->used_blocks_bitmap);
  150. msb->free_block_count[zone]--;
  151. }
  152. /* Mark physical block as free */
  153. static void msb_mark_block_unused(struct msb_data *msb, int pba)
  154. {
  155. int zone = msb_get_zone_from_pba(pba);
  156. if (!test_bit(pba, msb->used_blocks_bitmap)) {
  157. pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
  158. msb->read_only = true;
  159. return;
  160. }
  161. if (msb_validate_used_block_bitmap(msb))
  162. return;
  163. /* No races because all IO is single threaded */
  164. __clear_bit(pba, msb->used_blocks_bitmap);
  165. msb->free_block_count[zone]++;
  166. }
  167. /* Invalidate current register window */
  168. static void msb_invalidate_reg_window(struct msb_data *msb)
  169. {
  170. msb->reg_addr.w_offset = offsetof(struct ms_register, id);
  171. msb->reg_addr.w_length = sizeof(struct ms_id_register);
  172. msb->reg_addr.r_offset = offsetof(struct ms_register, id);
  173. msb->reg_addr.r_length = sizeof(struct ms_id_register);
  174. msb->addr_valid = false;
  175. }
  176. /* Start a state machine */
  177. static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
  178. (struct memstick_dev *card, struct memstick_request **req))
  179. {
  180. struct memstick_dev *card = msb->card;
  181. WARN_ON(msb->state != -1);
  182. msb->int_polling = false;
  183. msb->state = 0;
  184. msb->exit_error = 0;
  185. memset(&card->current_mrq, 0, sizeof(card->current_mrq));
  186. card->next_request = state_func;
  187. memstick_new_req(card->host);
  188. wait_for_completion(&card->mrq_complete);
  189. WARN_ON(msb->state != -1);
  190. return msb->exit_error;
  191. }
  192. /* State machines call that to exit */
  193. static int msb_exit_state_machine(struct msb_data *msb, int error)
  194. {
  195. WARN_ON(msb->state == -1);
  196. msb->state = -1;
  197. msb->exit_error = error;
  198. msb->card->next_request = h_msb_default_bad;
  199. /* Invalidate reg window on errors */
  200. if (error)
  201. msb_invalidate_reg_window(msb);
  202. complete(&msb->card->mrq_complete);
  203. return -ENXIO;
  204. }
  205. /* read INT register */
  206. static int msb_read_int_reg(struct msb_data *msb, long timeout)
  207. {
  208. struct memstick_request *mrq = &msb->card->current_mrq;
  209. WARN_ON(msb->state == -1);
  210. if (!msb->int_polling) {
  211. msb->int_timeout = jiffies +
  212. msecs_to_jiffies(timeout == -1 ? 500 : timeout);
  213. msb->int_polling = true;
  214. } else if (time_after(jiffies, msb->int_timeout)) {
  215. mrq->data[0] = MEMSTICK_INT_CMDNAK;
  216. return 0;
  217. }
  218. if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
  219. mrq->need_card_int && !mrq->error) {
  220. mrq->data[0] = mrq->int_reg;
  221. mrq->need_card_int = false;
  222. return 0;
  223. } else {
  224. memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
  225. return 1;
  226. }
  227. }
  228. /* Read a register */
  229. static int msb_read_regs(struct msb_data *msb, int offset, int len)
  230. {
  231. struct memstick_request *req = &msb->card->current_mrq;
  232. if (msb->reg_addr.r_offset != offset ||
  233. msb->reg_addr.r_length != len || !msb->addr_valid) {
  234. msb->reg_addr.r_offset = offset;
  235. msb->reg_addr.r_length = len;
  236. msb->addr_valid = true;
  237. memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
  238. &msb->reg_addr, sizeof(msb->reg_addr));
  239. return 0;
  240. }
  241. memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
  242. return 1;
  243. }
  244. /* Write a card register */
  245. static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
  246. {
  247. struct memstick_request *req = &msb->card->current_mrq;
  248. if (msb->reg_addr.w_offset != offset ||
  249. msb->reg_addr.w_length != len || !msb->addr_valid) {
  250. msb->reg_addr.w_offset = offset;
  251. msb->reg_addr.w_length = len;
  252. msb->addr_valid = true;
  253. memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
  254. &msb->reg_addr, sizeof(msb->reg_addr));
  255. return 0;
  256. }
  257. memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
  258. return 1;
  259. }
  260. /* Handler for absence of IO */
  261. static int h_msb_default_bad(struct memstick_dev *card,
  262. struct memstick_request **mrq)
  263. {
  264. return -ENXIO;
  265. }
  266. /*
  267. * This function is a handler for reads of one page from device.
  268. * Writes output to msb->current_sg, takes sector address from msb->reg.param
  269. * Can also be used to read extra data only. Set params accordintly.
  270. */
  271. static int h_msb_read_page(struct memstick_dev *card,
  272. struct memstick_request **out_mrq)
  273. {
  274. struct msb_data *msb = memstick_get_drvdata(card);
  275. struct memstick_request *mrq = *out_mrq = &card->current_mrq;
  276. struct scatterlist sg[2];
  277. u8 command, intreg;
  278. if (mrq->error) {
  279. dbg("read_page, unknown error");
  280. return msb_exit_state_machine(msb, mrq->error);
  281. }
  282. again:
  283. switch (msb->state) {
  284. case MSB_RP_SEND_BLOCK_ADDRESS:
  285. /* msb_write_regs sometimes "fails" because it needs to update
  286. * the reg window, and thus it returns request for that.
  287. * Then we stay in this state and retry
  288. */
  289. if (!msb_write_regs(msb,
  290. offsetof(struct ms_register, param),
  291. sizeof(struct ms_param_register),
  292. (unsigned char *)&msb->regs.param))
  293. return 0;
  294. msb->state = MSB_RP_SEND_READ_COMMAND;
  295. return 0;
  296. case MSB_RP_SEND_READ_COMMAND:
  297. command = MS_CMD_BLOCK_READ;
  298. memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
  299. msb->state = MSB_RP_SEND_INT_REQ;
  300. return 0;
  301. case MSB_RP_SEND_INT_REQ:
  302. msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
  303. /* If dont actually need to send the int read request (only in
  304. * serial mode), then just fall through
  305. */
  306. if (msb_read_int_reg(msb, -1))
  307. return 0;
  308. fallthrough;
  309. case MSB_RP_RECEIVE_INT_REQ_RESULT:
  310. intreg = mrq->data[0];
  311. msb->regs.status.interrupt = intreg;
  312. if (intreg & MEMSTICK_INT_CMDNAK)
  313. return msb_exit_state_machine(msb, -EIO);
  314. if (!(intreg & MEMSTICK_INT_CED)) {
  315. msb->state = MSB_RP_SEND_INT_REQ;
  316. goto again;
  317. }
  318. msb->int_polling = false;
  319. msb->state = (intreg & MEMSTICK_INT_ERR) ?
  320. MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
  321. goto again;
  322. case MSB_RP_SEND_READ_STATUS_REG:
  323. /* read the status register to understand source of the INT_ERR */
  324. if (!msb_read_regs(msb,
  325. offsetof(struct ms_register, status),
  326. sizeof(struct ms_status_register)))
  327. return 0;
  328. msb->state = MSB_RP_RECEIVE_STATUS_REG;
  329. return 0;
  330. case MSB_RP_RECEIVE_STATUS_REG:
  331. msb->regs.status = *(struct ms_status_register *)mrq->data;
  332. msb->state = MSB_RP_SEND_OOB_READ;
  333. fallthrough;
  334. case MSB_RP_SEND_OOB_READ:
  335. if (!msb_read_regs(msb,
  336. offsetof(struct ms_register, extra_data),
  337. sizeof(struct ms_extra_data_register)))
  338. return 0;
  339. msb->state = MSB_RP_RECEIVE_OOB_READ;
  340. return 0;
  341. case MSB_RP_RECEIVE_OOB_READ:
  342. msb->regs.extra_data =
  343. *(struct ms_extra_data_register *) mrq->data;
  344. msb->state = MSB_RP_SEND_READ_DATA;
  345. fallthrough;
  346. case MSB_RP_SEND_READ_DATA:
  347. /* Skip that state if we only read the oob */
  348. if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
  349. msb->state = MSB_RP_RECEIVE_READ_DATA;
  350. goto again;
  351. }
  352. sg_init_table(sg, ARRAY_SIZE(sg));
  353. msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
  354. msb->current_sg_offset,
  355. msb->page_size);
  356. memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
  357. msb->state = MSB_RP_RECEIVE_READ_DATA;
  358. return 0;
  359. case MSB_RP_RECEIVE_READ_DATA:
  360. if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
  361. msb->current_sg_offset += msb->page_size;
  362. return msb_exit_state_machine(msb, 0);
  363. }
  364. if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
  365. dbg("read_page: uncorrectable error");
  366. return msb_exit_state_machine(msb, -EBADMSG);
  367. }
  368. if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
  369. dbg("read_page: correctable error");
  370. msb->current_sg_offset += msb->page_size;
  371. return msb_exit_state_machine(msb, -EUCLEAN);
  372. } else {
  373. dbg("read_page: INT error, but no status error bits");
  374. return msb_exit_state_machine(msb, -EIO);
  375. }
  376. }
  377. BUG();
  378. }
  379. /*
  380. * Handler of writes of exactly one block.
  381. * Takes address from msb->regs.param.
  382. * Writes same extra data to blocks, also taken
  383. * from msb->regs.extra
  384. * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
  385. * device refuses to take the command or something else
  386. */
  387. static int h_msb_write_block(struct memstick_dev *card,
  388. struct memstick_request **out_mrq)
  389. {
  390. struct msb_data *msb = memstick_get_drvdata(card);
  391. struct memstick_request *mrq = *out_mrq = &card->current_mrq;
  392. struct scatterlist sg[2];
  393. u8 intreg, command;
  394. if (mrq->error)
  395. return msb_exit_state_machine(msb, mrq->error);
  396. again:
  397. switch (msb->state) {
  398. /* HACK: Jmicon handling of TPCs between 8 and
  399. * sizeof(memstick_request.data) is broken due to hardware
  400. * bug in PIO mode that is used for these TPCs
  401. * Therefore split the write
  402. */
  403. case MSB_WB_SEND_WRITE_PARAMS:
  404. if (!msb_write_regs(msb,
  405. offsetof(struct ms_register, param),
  406. sizeof(struct ms_param_register),
  407. &msb->regs.param))
  408. return 0;
  409. msb->state = MSB_WB_SEND_WRITE_OOB;
  410. return 0;
  411. case MSB_WB_SEND_WRITE_OOB:
  412. if (!msb_write_regs(msb,
  413. offsetof(struct ms_register, extra_data),
  414. sizeof(struct ms_extra_data_register),
  415. &msb->regs.extra_data))
  416. return 0;
  417. msb->state = MSB_WB_SEND_WRITE_COMMAND;
  418. return 0;
  419. case MSB_WB_SEND_WRITE_COMMAND:
  420. command = MS_CMD_BLOCK_WRITE;
  421. memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
  422. msb->state = MSB_WB_SEND_INT_REQ;
  423. return 0;
  424. case MSB_WB_SEND_INT_REQ:
  425. msb->state = MSB_WB_RECEIVE_INT_REQ;
  426. if (msb_read_int_reg(msb, -1))
  427. return 0;
  428. fallthrough;
  429. case MSB_WB_RECEIVE_INT_REQ:
  430. intreg = mrq->data[0];
  431. msb->regs.status.interrupt = intreg;
  432. /* errors mean out of here, and fast... */
  433. if (intreg & (MEMSTICK_INT_CMDNAK))
  434. return msb_exit_state_machine(msb, -EIO);
  435. if (intreg & MEMSTICK_INT_ERR)
  436. return msb_exit_state_machine(msb, -EBADMSG);
  437. /* for last page we need to poll CED */
  438. if (msb->current_page == msb->pages_in_block) {
  439. if (intreg & MEMSTICK_INT_CED)
  440. return msb_exit_state_machine(msb, 0);
  441. msb->state = MSB_WB_SEND_INT_REQ;
  442. goto again;
  443. }
  444. /* for non-last page we need BREQ before writing next chunk */
  445. if (!(intreg & MEMSTICK_INT_BREQ)) {
  446. msb->state = MSB_WB_SEND_INT_REQ;
  447. goto again;
  448. }
  449. msb->int_polling = false;
  450. msb->state = MSB_WB_SEND_WRITE_DATA;
  451. fallthrough;
  452. case MSB_WB_SEND_WRITE_DATA:
  453. sg_init_table(sg, ARRAY_SIZE(sg));
  454. if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
  455. msb->current_sg_offset,
  456. msb->page_size) < msb->page_size)
  457. return msb_exit_state_machine(msb, -EIO);
  458. memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
  459. mrq->need_card_int = 1;
  460. msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
  461. return 0;
  462. case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
  463. msb->current_page++;
  464. msb->current_sg_offset += msb->page_size;
  465. msb->state = MSB_WB_SEND_INT_REQ;
  466. goto again;
  467. default:
  468. BUG();
  469. }
  470. return 0;
  471. }
  472. /*
  473. * This function is used to send simple IO requests to device that consist
  474. * of register write + command
  475. */
  476. static int h_msb_send_command(struct memstick_dev *card,
  477. struct memstick_request **out_mrq)
  478. {
  479. struct msb_data *msb = memstick_get_drvdata(card);
  480. struct memstick_request *mrq = *out_mrq = &card->current_mrq;
  481. u8 intreg;
  482. if (mrq->error) {
  483. dbg("send_command: unknown error");
  484. return msb_exit_state_machine(msb, mrq->error);
  485. }
  486. again:
  487. switch (msb->state) {
  488. /* HACK: see h_msb_write_block */
  489. case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
  490. if (!msb_write_regs(msb,
  491. offsetof(struct ms_register, param),
  492. sizeof(struct ms_param_register),
  493. &msb->regs.param))
  494. return 0;
  495. msb->state = MSB_SC_SEND_WRITE_OOB;
  496. return 0;
  497. case MSB_SC_SEND_WRITE_OOB:
  498. if (!msb->command_need_oob) {
  499. msb->state = MSB_SC_SEND_COMMAND;
  500. goto again;
  501. }
  502. if (!msb_write_regs(msb,
  503. offsetof(struct ms_register, extra_data),
  504. sizeof(struct ms_extra_data_register),
  505. &msb->regs.extra_data))
  506. return 0;
  507. msb->state = MSB_SC_SEND_COMMAND;
  508. return 0;
  509. case MSB_SC_SEND_COMMAND:
  510. memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
  511. msb->state = MSB_SC_SEND_INT_REQ;
  512. return 0;
  513. case MSB_SC_SEND_INT_REQ:
  514. msb->state = MSB_SC_RECEIVE_INT_REQ;
  515. if (msb_read_int_reg(msb, -1))
  516. return 0;
  517. fallthrough;
  518. case MSB_SC_RECEIVE_INT_REQ:
  519. intreg = mrq->data[0];
  520. if (intreg & MEMSTICK_INT_CMDNAK)
  521. return msb_exit_state_machine(msb, -EIO);
  522. if (intreg & MEMSTICK_INT_ERR)
  523. return msb_exit_state_machine(msb, -EBADMSG);
  524. if (!(intreg & MEMSTICK_INT_CED)) {
  525. msb->state = MSB_SC_SEND_INT_REQ;
  526. goto again;
  527. }
  528. return msb_exit_state_machine(msb, 0);
  529. }
  530. BUG();
  531. }
  532. /* Small handler for card reset */
  533. static int h_msb_reset(struct memstick_dev *card,
  534. struct memstick_request **out_mrq)
  535. {
  536. u8 command = MS_CMD_RESET;
  537. struct msb_data *msb = memstick_get_drvdata(card);
  538. struct memstick_request *mrq = *out_mrq = &card->current_mrq;
  539. if (mrq->error)
  540. return msb_exit_state_machine(msb, mrq->error);
  541. switch (msb->state) {
  542. case MSB_RS_SEND:
  543. memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
  544. mrq->need_card_int = 0;
  545. msb->state = MSB_RS_CONFIRM;
  546. return 0;
  547. case MSB_RS_CONFIRM:
  548. return msb_exit_state_machine(msb, 0);
  549. }
  550. BUG();
  551. }
  552. /* This handler is used to do serial->parallel switch */
  553. static int h_msb_parallel_switch(struct memstick_dev *card,
  554. struct memstick_request **out_mrq)
  555. {
  556. struct msb_data *msb = memstick_get_drvdata(card);
  557. struct memstick_request *mrq = *out_mrq = &card->current_mrq;
  558. struct memstick_host *host = card->host;
  559. if (mrq->error) {
  560. dbg("parallel_switch: error");
  561. msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
  562. return msb_exit_state_machine(msb, mrq->error);
  563. }
  564. switch (msb->state) {
  565. case MSB_PS_SEND_SWITCH_COMMAND:
  566. /* Set the parallel interface on memstick side */
  567. msb->regs.param.system |= MEMSTICK_SYS_PAM;
  568. if (!msb_write_regs(msb,
  569. offsetof(struct ms_register, param),
  570. 1,
  571. (unsigned char *)&msb->regs.param))
  572. return 0;
  573. msb->state = MSB_PS_SWICH_HOST;
  574. return 0;
  575. case MSB_PS_SWICH_HOST:
  576. /* Set parallel interface on our side + send a dummy request
  577. * to see if card responds
  578. */
  579. host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
  580. memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
  581. msb->state = MSB_PS_CONFIRM;
  582. return 0;
  583. case MSB_PS_CONFIRM:
  584. return msb_exit_state_machine(msb, 0);
  585. }
  586. BUG();
  587. }
  588. static int msb_switch_to_parallel(struct msb_data *msb);
  589. /* Reset the card, to guard against hw errors beeing treated as bad blocks */
  590. static int msb_reset(struct msb_data *msb, bool full)
  591. {
  592. bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
  593. struct memstick_dev *card = msb->card;
  594. struct memstick_host *host = card->host;
  595. int error;
  596. /* Reset the card */
  597. msb->regs.param.system = MEMSTICK_SYS_BAMD;
  598. if (full) {
  599. error = host->set_param(host,
  600. MEMSTICK_POWER, MEMSTICK_POWER_OFF);
  601. if (error)
  602. goto out_error;
  603. msb_invalidate_reg_window(msb);
  604. error = host->set_param(host,
  605. MEMSTICK_POWER, MEMSTICK_POWER_ON);
  606. if (error)
  607. goto out_error;
  608. error = host->set_param(host,
  609. MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
  610. if (error) {
  611. out_error:
  612. dbg("Failed to reset the host controller");
  613. msb->read_only = true;
  614. return -EFAULT;
  615. }
  616. }
  617. error = msb_run_state_machine(msb, h_msb_reset);
  618. if (error) {
  619. dbg("Failed to reset the card");
  620. msb->read_only = true;
  621. return -ENODEV;
  622. }
  623. /* Set parallel mode */
  624. if (was_parallel)
  625. msb_switch_to_parallel(msb);
  626. return 0;
  627. }
  628. /* Attempts to switch interface to parallel mode */
  629. static int msb_switch_to_parallel(struct msb_data *msb)
  630. {
  631. int error;
  632. error = msb_run_state_machine(msb, h_msb_parallel_switch);
  633. if (error) {
  634. pr_err("Switch to parallel failed");
  635. msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
  636. msb_reset(msb, true);
  637. return -EFAULT;
  638. }
  639. msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
  640. return 0;
  641. }
  642. /* Changes overwrite flag on a page */
  643. static int msb_set_overwrite_flag(struct msb_data *msb,
  644. u16 pba, u8 page, u8 flag)
  645. {
  646. if (msb->read_only)
  647. return -EROFS;
  648. msb->regs.param.block_address = cpu_to_be16(pba);
  649. msb->regs.param.page_address = page;
  650. msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
  651. msb->regs.extra_data.overwrite_flag = flag;
  652. msb->command_value = MS_CMD_BLOCK_WRITE;
  653. msb->command_need_oob = true;
  654. dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
  655. flag, pba, page);
  656. return msb_run_state_machine(msb, h_msb_send_command);
  657. }
  658. static int msb_mark_bad(struct msb_data *msb, int pba)
  659. {
  660. pr_notice("marking pba %d as bad", pba);
  661. msb_reset(msb, true);
  662. return msb_set_overwrite_flag(
  663. msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
  664. }
  665. static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
  666. {
  667. dbg("marking page %d of pba %d as bad", page, pba);
  668. msb_reset(msb, true);
  669. return msb_set_overwrite_flag(msb,
  670. pba, page, ~MEMSTICK_OVERWRITE_PGST0);
  671. }
  672. /* Erases one physical block */
  673. static int msb_erase_block(struct msb_data *msb, u16 pba)
  674. {
  675. int error, try;
  676. if (msb->read_only)
  677. return -EROFS;
  678. dbg_verbose("erasing pba %d", pba);
  679. for (try = 1; try < 3; try++) {
  680. msb->regs.param.block_address = cpu_to_be16(pba);
  681. msb->regs.param.page_address = 0;
  682. msb->regs.param.cp = MEMSTICK_CP_BLOCK;
  683. msb->command_value = MS_CMD_BLOCK_ERASE;
  684. msb->command_need_oob = false;
  685. error = msb_run_state_machine(msb, h_msb_send_command);
  686. if (!error || msb_reset(msb, true))
  687. break;
  688. }
  689. if (error) {
  690. pr_err("erase failed, marking pba %d as bad", pba);
  691. msb_mark_bad(msb, pba);
  692. }
  693. dbg_verbose("erase success, marking pba %d as unused", pba);
  694. msb_mark_block_unused(msb, pba);
  695. __set_bit(pba, msb->erased_blocks_bitmap);
  696. return error;
  697. }
  698. /* Reads one page from device */
  699. static int msb_read_page(struct msb_data *msb,
  700. u16 pba, u8 page, struct ms_extra_data_register *extra,
  701. struct scatterlist *sg, int offset)
  702. {
  703. int try, error;
  704. if (pba == MS_BLOCK_INVALID) {
  705. unsigned long flags;
  706. struct sg_mapping_iter miter;
  707. size_t len = msb->page_size;
  708. dbg_verbose("read unmapped sector. returning 0xFF");
  709. local_irq_save(flags);
  710. sg_miter_start(&miter, sg, sg_nents(sg),
  711. SG_MITER_ATOMIC | SG_MITER_TO_SG);
  712. while (sg_miter_next(&miter) && len > 0) {
  713. int chunklen;
  714. if (offset && offset >= miter.length) {
  715. offset -= miter.length;
  716. continue;
  717. }
  718. chunklen = min(miter.length - offset, len);
  719. memset(miter.addr + offset, 0xFF, chunklen);
  720. len -= chunklen;
  721. offset = 0;
  722. }
  723. sg_miter_stop(&miter);
  724. local_irq_restore(flags);
  725. if (offset)
  726. return -EFAULT;
  727. if (extra)
  728. memset(extra, 0xFF, sizeof(*extra));
  729. return 0;
  730. }
  731. if (pba >= msb->block_count) {
  732. pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
  733. return -EINVAL;
  734. }
  735. for (try = 1; try < 3; try++) {
  736. msb->regs.param.block_address = cpu_to_be16(pba);
  737. msb->regs.param.page_address = page;
  738. msb->regs.param.cp = MEMSTICK_CP_PAGE;
  739. msb->current_sg = sg;
  740. msb->current_sg_offset = offset;
  741. error = msb_run_state_machine(msb, h_msb_read_page);
  742. if (error == -EUCLEAN) {
  743. pr_notice("correctable error on pba %d, page %d",
  744. pba, page);
  745. error = 0;
  746. }
  747. if (!error && extra)
  748. *extra = msb->regs.extra_data;
  749. if (!error || msb_reset(msb, true))
  750. break;
  751. }
  752. /* Mark bad pages */
  753. if (error == -EBADMSG) {
  754. pr_err("uncorrectable error on read of pba %d, page %d",
  755. pba, page);
  756. if (msb->regs.extra_data.overwrite_flag &
  757. MEMSTICK_OVERWRITE_PGST0)
  758. msb_mark_page_bad(msb, pba, page);
  759. return -EBADMSG;
  760. }
  761. if (error)
  762. pr_err("read of pba %d, page %d failed with error %d",
  763. pba, page, error);
  764. return error;
  765. }
  766. /* Reads oob of page only */
  767. static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
  768. struct ms_extra_data_register *extra)
  769. {
  770. int error;
  771. BUG_ON(!extra);
  772. msb->regs.param.block_address = cpu_to_be16(pba);
  773. msb->regs.param.page_address = page;
  774. msb->regs.param.cp = MEMSTICK_CP_EXTRA;
  775. if (pba > msb->block_count) {
  776. pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
  777. return -EINVAL;
  778. }
  779. error = msb_run_state_machine(msb, h_msb_read_page);
  780. *extra = msb->regs.extra_data;
  781. if (error == -EUCLEAN) {
  782. pr_notice("correctable error on pba %d, page %d",
  783. pba, page);
  784. return 0;
  785. }
  786. return error;
  787. }
  788. /* Reads a block and compares it with data contained in scatterlist orig_sg */
  789. static int msb_verify_block(struct msb_data *msb, u16 pba,
  790. struct scatterlist *orig_sg, int offset)
  791. {
  792. struct scatterlist sg;
  793. int page = 0, error;
  794. sg_init_one(&sg, msb->block_buffer, msb->block_size);
  795. while (page < msb->pages_in_block) {
  796. error = msb_read_page(msb, pba, page,
  797. NULL, &sg, page * msb->page_size);
  798. if (error)
  799. return error;
  800. page++;
  801. }
  802. if (msb_sg_compare_to_buffer(orig_sg, offset,
  803. msb->block_buffer, msb->block_size))
  804. return -EIO;
  805. return 0;
  806. }
  807. /* Writes exectly one block + oob */
  808. static int msb_write_block(struct msb_data *msb,
  809. u16 pba, u32 lba, struct scatterlist *sg, int offset)
  810. {
  811. int error, current_try = 1;
  812. BUG_ON(sg->length < msb->page_size);
  813. if (msb->read_only)
  814. return -EROFS;
  815. if (pba == MS_BLOCK_INVALID) {
  816. pr_err(
  817. "BUG: write: attempt to write MS_BLOCK_INVALID block");
  818. return -EINVAL;
  819. }
  820. if (pba >= msb->block_count || lba >= msb->logical_block_count) {
  821. pr_err(
  822. "BUG: write: attempt to write beyond the end of device");
  823. return -EINVAL;
  824. }
  825. if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
  826. pr_err("BUG: write: lba zone mismatch");
  827. return -EINVAL;
  828. }
  829. if (pba == msb->boot_block_locations[0] ||
  830. pba == msb->boot_block_locations[1]) {
  831. pr_err("BUG: write: attempt to write to boot blocks!");
  832. return -EINVAL;
  833. }
  834. while (1) {
  835. if (msb->read_only)
  836. return -EROFS;
  837. msb->regs.param.cp = MEMSTICK_CP_BLOCK;
  838. msb->regs.param.page_address = 0;
  839. msb->regs.param.block_address = cpu_to_be16(pba);
  840. msb->regs.extra_data.management_flag = 0xFF;
  841. msb->regs.extra_data.overwrite_flag = 0xF8;
  842. msb->regs.extra_data.logical_address = cpu_to_be16(lba);
  843. msb->current_sg = sg;
  844. msb->current_sg_offset = offset;
  845. msb->current_page = 0;
  846. error = msb_run_state_machine(msb, h_msb_write_block);
  847. /* Sector we just wrote to is assumed erased since its pba
  848. * was erased. If it wasn't erased, write will succeed
  849. * and will just clear the bits that were set in the block
  850. * thus test that what we have written,
  851. * matches what we expect.
  852. * We do trust the blocks that we erased
  853. */
  854. if (!error && (verify_writes ||
  855. !test_bit(pba, msb->erased_blocks_bitmap)))
  856. error = msb_verify_block(msb, pba, sg, offset);
  857. if (!error)
  858. break;
  859. if (current_try > 1 || msb_reset(msb, true))
  860. break;
  861. pr_err("write failed, trying to erase the pba %d", pba);
  862. error = msb_erase_block(msb, pba);
  863. if (error)
  864. break;
  865. current_try++;
  866. }
  867. return error;
  868. }
  869. /* Finds a free block for write replacement */
  870. static u16 msb_get_free_block(struct msb_data *msb, int zone)
  871. {
  872. u16 pos;
  873. int pba = zone * MS_BLOCKS_IN_ZONE;
  874. int i;
  875. get_random_bytes(&pos, sizeof(pos));
  876. if (!msb->free_block_count[zone]) {
  877. pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
  878. msb->read_only = true;
  879. return MS_BLOCK_INVALID;
  880. }
  881. pos %= msb->free_block_count[zone];
  882. dbg_verbose("have %d choices for a free block, selected randomly: %d",
  883. msb->free_block_count[zone], pos);
  884. pba = find_next_zero_bit(msb->used_blocks_bitmap,
  885. msb->block_count, pba);
  886. for (i = 0; i < pos; ++i)
  887. pba = find_next_zero_bit(msb->used_blocks_bitmap,
  888. msb->block_count, pba + 1);
  889. dbg_verbose("result of the free blocks scan: pba %d", pba);
  890. if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
  891. pr_err("BUG: can't get a free block");
  892. msb->read_only = true;
  893. return MS_BLOCK_INVALID;
  894. }
  895. msb_mark_block_used(msb, pba);
  896. return pba;
  897. }
  898. static int msb_update_block(struct msb_data *msb, u16 lba,
  899. struct scatterlist *sg, int offset)
  900. {
  901. u16 pba, new_pba;
  902. int error, try;
  903. pba = msb->lba_to_pba_table[lba];
  904. dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
  905. if (pba != MS_BLOCK_INVALID) {
  906. dbg_verbose("setting the update flag on the block");
  907. msb_set_overwrite_flag(msb, pba, 0,
  908. 0xFF & ~MEMSTICK_OVERWRITE_UDST);
  909. }
  910. for (try = 0; try < 3; try++) {
  911. new_pba = msb_get_free_block(msb,
  912. msb_get_zone_from_lba(lba));
  913. if (new_pba == MS_BLOCK_INVALID) {
  914. error = -EIO;
  915. goto out;
  916. }
  917. dbg_verbose("block update: writing updated block to the pba %d",
  918. new_pba);
  919. error = msb_write_block(msb, new_pba, lba, sg, offset);
  920. if (error == -EBADMSG) {
  921. msb_mark_bad(msb, new_pba);
  922. continue;
  923. }
  924. if (error)
  925. goto out;
  926. dbg_verbose("block update: erasing the old block");
  927. msb_erase_block(msb, pba);
  928. msb->lba_to_pba_table[lba] = new_pba;
  929. return 0;
  930. }
  931. out:
  932. if (error) {
  933. pr_err("block update error after %d tries, switching to r/o mode", try);
  934. msb->read_only = true;
  935. }
  936. return error;
  937. }
  938. /* Converts endiannes in the boot block for easy use */
  939. static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
  940. {
  941. p->header.block_id = be16_to_cpu(p->header.block_id);
  942. p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
  943. p->entry.disabled_block.start_addr
  944. = be32_to_cpu(p->entry.disabled_block.start_addr);
  945. p->entry.disabled_block.data_size
  946. = be32_to_cpu(p->entry.disabled_block.data_size);
  947. p->entry.cis_idi.start_addr
  948. = be32_to_cpu(p->entry.cis_idi.start_addr);
  949. p->entry.cis_idi.data_size
  950. = be32_to_cpu(p->entry.cis_idi.data_size);
  951. p->attr.block_size = be16_to_cpu(p->attr.block_size);
  952. p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
  953. p->attr.number_of_effective_blocks
  954. = be16_to_cpu(p->attr.number_of_effective_blocks);
  955. p->attr.page_size = be16_to_cpu(p->attr.page_size);
  956. p->attr.memory_manufacturer_code
  957. = be16_to_cpu(p->attr.memory_manufacturer_code);
  958. p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
  959. p->attr.implemented_capacity
  960. = be16_to_cpu(p->attr.implemented_capacity);
  961. p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
  962. p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
  963. }
  964. static int msb_read_boot_blocks(struct msb_data *msb)
  965. {
  966. int pba = 0;
  967. struct scatterlist sg;
  968. struct ms_extra_data_register extra;
  969. struct ms_boot_page *page;
  970. msb->boot_block_locations[0] = MS_BLOCK_INVALID;
  971. msb->boot_block_locations[1] = MS_BLOCK_INVALID;
  972. msb->boot_block_count = 0;
  973. dbg_verbose("Start of a scan for the boot blocks");
  974. if (!msb->boot_page) {
  975. page = kmalloc_array(2, sizeof(struct ms_boot_page),
  976. GFP_KERNEL);
  977. if (!page)
  978. return -ENOMEM;
  979. msb->boot_page = page;
  980. } else
  981. page = msb->boot_page;
  982. msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
  983. for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
  984. sg_init_one(&sg, page, sizeof(*page));
  985. if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
  986. dbg("boot scan: can't read pba %d", pba);
  987. continue;
  988. }
  989. if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
  990. dbg("management flag doesn't indicate boot block %d",
  991. pba);
  992. continue;
  993. }
  994. if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
  995. dbg("the pba at %d doesn't contain boot block ID", pba);
  996. continue;
  997. }
  998. msb_fix_boot_page_endianness(page);
  999. msb->boot_block_locations[msb->boot_block_count] = pba;
  1000. page++;
  1001. msb->boot_block_count++;
  1002. if (msb->boot_block_count == 2)
  1003. break;
  1004. }
  1005. if (!msb->boot_block_count) {
  1006. pr_err("media doesn't contain master page, aborting");
  1007. return -EIO;
  1008. }
  1009. dbg_verbose("End of scan for boot blocks");
  1010. return 0;
  1011. }
  1012. static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
  1013. {
  1014. struct ms_boot_page *boot_block;
  1015. struct scatterlist sg;
  1016. u16 *buffer = NULL;
  1017. int offset = 0;
  1018. int i, error = 0;
  1019. int data_size, data_offset, page, page_offset, size_to_read;
  1020. u16 pba;
  1021. BUG_ON(block_nr > 1);
  1022. boot_block = &msb->boot_page[block_nr];
  1023. pba = msb->boot_block_locations[block_nr];
  1024. if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
  1025. return -EINVAL;
  1026. data_size = boot_block->entry.disabled_block.data_size;
  1027. data_offset = sizeof(struct ms_boot_page) +
  1028. boot_block->entry.disabled_block.start_addr;
  1029. if (!data_size)
  1030. return 0;
  1031. page = data_offset / msb->page_size;
  1032. page_offset = data_offset % msb->page_size;
  1033. size_to_read =
  1034. DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
  1035. msb->page_size;
  1036. dbg("reading bad block of boot block at pba %d, offset %d len %d",
  1037. pba, data_offset, data_size);
  1038. buffer = kzalloc(size_to_read, GFP_KERNEL);
  1039. if (!buffer)
  1040. return -ENOMEM;
  1041. /* Read the buffer */
  1042. sg_init_one(&sg, buffer, size_to_read);
  1043. while (offset < size_to_read) {
  1044. error = msb_read_page(msb, pba, page, NULL, &sg, offset);
  1045. if (error)
  1046. goto out;
  1047. page++;
  1048. offset += msb->page_size;
  1049. if (page == msb->pages_in_block) {
  1050. pr_err(
  1051. "bad block table extends beyond the boot block");
  1052. break;
  1053. }
  1054. }
  1055. /* Process the bad block table */
  1056. for (i = page_offset; i < data_size / sizeof(u16); i++) {
  1057. u16 bad_block = be16_to_cpu(buffer[i]);
  1058. if (bad_block >= msb->block_count) {
  1059. dbg("bad block table contains invalid block %d",
  1060. bad_block);
  1061. continue;
  1062. }
  1063. if (test_bit(bad_block, msb->used_blocks_bitmap)) {
  1064. dbg("duplicate bad block %d in the table",
  1065. bad_block);
  1066. continue;
  1067. }
  1068. dbg("block %d is marked as factory bad", bad_block);
  1069. msb_mark_block_used(msb, bad_block);
  1070. }
  1071. out:
  1072. kfree(buffer);
  1073. return error;
  1074. }
  1075. static int msb_ftl_initialize(struct msb_data *msb)
  1076. {
  1077. int i;
  1078. if (msb->ftl_initialized)
  1079. return 0;
  1080. msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
  1081. msb->logical_block_count = msb->zone_count * 496 - 2;
  1082. msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
  1083. msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
  1084. msb->lba_to_pba_table =
  1085. kmalloc_array(msb->logical_block_count, sizeof(u16),
  1086. GFP_KERNEL);
  1087. if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
  1088. !msb->erased_blocks_bitmap) {
  1089. bitmap_free(msb->used_blocks_bitmap);
  1090. bitmap_free(msb->erased_blocks_bitmap);
  1091. kfree(msb->lba_to_pba_table);
  1092. return -ENOMEM;
  1093. }
  1094. for (i = 0; i < msb->zone_count; i++)
  1095. msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
  1096. memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
  1097. msb->logical_block_count * sizeof(u16));
  1098. dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
  1099. msb->zone_count, msb->logical_block_count);
  1100. msb->ftl_initialized = true;
  1101. return 0;
  1102. }
  1103. static int msb_ftl_scan(struct msb_data *msb)
  1104. {
  1105. u16 pba, lba, other_block;
  1106. u8 overwrite_flag, management_flag, other_overwrite_flag;
  1107. int error;
  1108. struct ms_extra_data_register extra;
  1109. u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
  1110. if (!overwrite_flags)
  1111. return -ENOMEM;
  1112. dbg("Start of media scanning");
  1113. for (pba = 0; pba < msb->block_count; pba++) {
  1114. if (pba == msb->boot_block_locations[0] ||
  1115. pba == msb->boot_block_locations[1]) {
  1116. dbg_verbose("pba %05d -> [boot block]", pba);
  1117. msb_mark_block_used(msb, pba);
  1118. continue;
  1119. }
  1120. if (test_bit(pba, msb->used_blocks_bitmap)) {
  1121. dbg_verbose("pba %05d -> [factory bad]", pba);
  1122. continue;
  1123. }
  1124. memset(&extra, 0, sizeof(extra));
  1125. error = msb_read_oob(msb, pba, 0, &extra);
  1126. /* can't trust the page if we can't read the oob */
  1127. if (error == -EBADMSG) {
  1128. pr_notice(
  1129. "oob of pba %d damaged, will try to erase it", pba);
  1130. msb_mark_block_used(msb, pba);
  1131. msb_erase_block(msb, pba);
  1132. continue;
  1133. } else if (error) {
  1134. pr_err("unknown error %d on read of oob of pba %d - aborting",
  1135. error, pba);
  1136. kfree(overwrite_flags);
  1137. return error;
  1138. }
  1139. lba = be16_to_cpu(extra.logical_address);
  1140. management_flag = extra.management_flag;
  1141. overwrite_flag = extra.overwrite_flag;
  1142. overwrite_flags[pba] = overwrite_flag;
  1143. /* Skip bad blocks */
  1144. if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
  1145. dbg("pba %05d -> [BAD]", pba);
  1146. msb_mark_block_used(msb, pba);
  1147. continue;
  1148. }
  1149. /* Skip system/drm blocks */
  1150. if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
  1151. MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
  1152. dbg("pba %05d -> [reserved management flag %02x]",
  1153. pba, management_flag);
  1154. msb_mark_block_used(msb, pba);
  1155. continue;
  1156. }
  1157. /* Erase temporary tables */
  1158. if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
  1159. dbg("pba %05d -> [temp table] - will erase", pba);
  1160. msb_mark_block_used(msb, pba);
  1161. msb_erase_block(msb, pba);
  1162. continue;
  1163. }
  1164. if (lba == MS_BLOCK_INVALID) {
  1165. dbg_verbose("pba %05d -> [free]", pba);
  1166. continue;
  1167. }
  1168. msb_mark_block_used(msb, pba);
  1169. /* Block has LBA not according to zoning*/
  1170. if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
  1171. pr_notice("pba %05d -> [bad lba %05d] - will erase",
  1172. pba, lba);
  1173. msb_erase_block(msb, pba);
  1174. continue;
  1175. }
  1176. /* No collisions - great */
  1177. if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
  1178. dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
  1179. msb->lba_to_pba_table[lba] = pba;
  1180. continue;
  1181. }
  1182. other_block = msb->lba_to_pba_table[lba];
  1183. other_overwrite_flag = overwrite_flags[other_block];
  1184. pr_notice("Collision between pba %d and pba %d",
  1185. pba, other_block);
  1186. if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
  1187. pr_notice("pba %d is marked as stable, use it", pba);
  1188. msb_erase_block(msb, other_block);
  1189. msb->lba_to_pba_table[lba] = pba;
  1190. continue;
  1191. }
  1192. if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
  1193. pr_notice("pba %d is marked as stable, use it",
  1194. other_block);
  1195. msb_erase_block(msb, pba);
  1196. continue;
  1197. }
  1198. pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
  1199. pba, other_block, other_block);
  1200. msb_erase_block(msb, other_block);
  1201. msb->lba_to_pba_table[lba] = pba;
  1202. }
  1203. dbg("End of media scanning");
  1204. kfree(overwrite_flags);
  1205. return 0;
  1206. }
  1207. static void msb_cache_flush_timer(struct timer_list *t)
  1208. {
  1209. struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
  1210. msb->need_flush_cache = true;
  1211. queue_work(msb->io_queue, &msb->io_work);
  1212. }
  1213. static void msb_cache_discard(struct msb_data *msb)
  1214. {
  1215. if (msb->cache_block_lba == MS_BLOCK_INVALID)
  1216. return;
  1217. del_timer_sync(&msb->cache_flush_timer);
  1218. dbg_verbose("Discarding the write cache");
  1219. msb->cache_block_lba = MS_BLOCK_INVALID;
  1220. bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
  1221. }
  1222. static int msb_cache_init(struct msb_data *msb)
  1223. {
  1224. timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
  1225. if (!msb->cache)
  1226. msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
  1227. if (!msb->cache)
  1228. return -ENOMEM;
  1229. msb_cache_discard(msb);
  1230. return 0;
  1231. }
  1232. static int msb_cache_flush(struct msb_data *msb)
  1233. {
  1234. struct scatterlist sg;
  1235. struct ms_extra_data_register extra;
  1236. int page, offset, error;
  1237. u16 pba, lba;
  1238. if (msb->read_only)
  1239. return -EROFS;
  1240. if (msb->cache_block_lba == MS_BLOCK_INVALID)
  1241. return 0;
  1242. lba = msb->cache_block_lba;
  1243. pba = msb->lba_to_pba_table[lba];
  1244. dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
  1245. pba, msb->cache_block_lba);
  1246. sg_init_one(&sg, msb->cache , msb->block_size);
  1247. /* Read all missing pages in cache */
  1248. for (page = 0; page < msb->pages_in_block; page++) {
  1249. if (test_bit(page, &msb->valid_cache_bitmap))
  1250. continue;
  1251. offset = page * msb->page_size;
  1252. dbg_verbose("reading non-present sector %d of cache block %d",
  1253. page, lba);
  1254. error = msb_read_page(msb, pba, page, &extra, &sg, offset);
  1255. /* Bad pages are copied with 00 page status */
  1256. if (error == -EBADMSG) {
  1257. pr_err("read error on sector %d, contents probably damaged", page);
  1258. continue;
  1259. }
  1260. if (error)
  1261. return error;
  1262. if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
  1263. MEMSTICK_OV_PG_NORMAL) {
  1264. dbg("page %d is marked as bad", page);
  1265. continue;
  1266. }
  1267. set_bit(page, &msb->valid_cache_bitmap);
  1268. }
  1269. /* Write the cache now */
  1270. error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
  1271. pba = msb->lba_to_pba_table[msb->cache_block_lba];
  1272. /* Mark invalid pages */
  1273. if (!error) {
  1274. for (page = 0; page < msb->pages_in_block; page++) {
  1275. if (test_bit(page, &msb->valid_cache_bitmap))
  1276. continue;
  1277. dbg("marking page %d as containing damaged data",
  1278. page);
  1279. msb_set_overwrite_flag(msb,
  1280. pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
  1281. }
  1282. }
  1283. msb_cache_discard(msb);
  1284. return error;
  1285. }
  1286. static int msb_cache_write(struct msb_data *msb, int lba,
  1287. int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
  1288. {
  1289. int error;
  1290. struct scatterlist sg_tmp[10];
  1291. if (msb->read_only)
  1292. return -EROFS;
  1293. if (msb->cache_block_lba == MS_BLOCK_INVALID ||
  1294. lba != msb->cache_block_lba)
  1295. if (add_to_cache_only)
  1296. return 0;
  1297. /* If we need to write different block */
  1298. if (msb->cache_block_lba != MS_BLOCK_INVALID &&
  1299. lba != msb->cache_block_lba) {
  1300. dbg_verbose("first flush the cache");
  1301. error = msb_cache_flush(msb);
  1302. if (error)
  1303. return error;
  1304. }
  1305. if (msb->cache_block_lba == MS_BLOCK_INVALID) {
  1306. msb->cache_block_lba = lba;
  1307. mod_timer(&msb->cache_flush_timer,
  1308. jiffies + msecs_to_jiffies(cache_flush_timeout));
  1309. }
  1310. dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
  1311. sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
  1312. msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
  1313. sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
  1314. msb->cache + page * msb->page_size, msb->page_size);
  1315. set_bit(page, &msb->valid_cache_bitmap);
  1316. return 0;
  1317. }
  1318. static int msb_cache_read(struct msb_data *msb, int lba,
  1319. int page, struct scatterlist *sg, int offset)
  1320. {
  1321. int pba = msb->lba_to_pba_table[lba];
  1322. struct scatterlist sg_tmp[10];
  1323. int error = 0;
  1324. if (lba == msb->cache_block_lba &&
  1325. test_bit(page, &msb->valid_cache_bitmap)) {
  1326. dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
  1327. lba, pba, page);
  1328. sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
  1329. msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
  1330. offset, msb->page_size);
  1331. sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
  1332. msb->cache + msb->page_size * page,
  1333. msb->page_size);
  1334. } else {
  1335. dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
  1336. lba, pba, page);
  1337. error = msb_read_page(msb, pba, page, NULL, sg, offset);
  1338. if (error)
  1339. return error;
  1340. msb_cache_write(msb, lba, page, true, sg, offset);
  1341. }
  1342. return error;
  1343. }
  1344. /* Emulated geometry table
  1345. * This table content isn't that importaint,
  1346. * One could put here different values, providing that they still
  1347. * cover whole disk.
  1348. * 64 MB entry is what windows reports for my 64M memstick
  1349. */
  1350. static const struct chs_entry chs_table[] = {
  1351. /* size sectors cylynders heads */
  1352. { 4, 16, 247, 2 },
  1353. { 8, 16, 495, 2 },
  1354. { 16, 16, 495, 4 },
  1355. { 32, 16, 991, 4 },
  1356. { 64, 16, 991, 8 },
  1357. {128, 16, 991, 16 },
  1358. { 0 }
  1359. };
  1360. /* Load information about the card */
  1361. static int msb_init_card(struct memstick_dev *card)
  1362. {
  1363. struct msb_data *msb = memstick_get_drvdata(card);
  1364. struct memstick_host *host = card->host;
  1365. struct ms_boot_page *boot_block;
  1366. int error = 0, i, raw_size_in_megs;
  1367. msb->caps = 0;
  1368. if (card->id.class >= MEMSTICK_CLASS_ROM &&
  1369. card->id.class <= MEMSTICK_CLASS_ROM)
  1370. msb->read_only = true;
  1371. msb->state = -1;
  1372. error = msb_reset(msb, false);
  1373. if (error)
  1374. return error;
  1375. /* Due to a bug in Jmicron driver written by Alex Dubov,
  1376. * its serial mode barely works,
  1377. * so we switch to parallel mode right away
  1378. */
  1379. if (host->caps & MEMSTICK_CAP_PAR4)
  1380. msb_switch_to_parallel(msb);
  1381. msb->page_size = sizeof(struct ms_boot_page);
  1382. /* Read the boot page */
  1383. error = msb_read_boot_blocks(msb);
  1384. if (error)
  1385. return -EIO;
  1386. boot_block = &msb->boot_page[0];
  1387. /* Save intersting attributes from boot page */
  1388. msb->block_count = boot_block->attr.number_of_blocks;
  1389. msb->page_size = boot_block->attr.page_size;
  1390. msb->pages_in_block = boot_block->attr.block_size * 2;
  1391. msb->block_size = msb->page_size * msb->pages_in_block;
  1392. if ((size_t)msb->page_size > PAGE_SIZE) {
  1393. /* this isn't supported by linux at all, anyway*/
  1394. dbg("device page %d size isn't supported", msb->page_size);
  1395. return -EINVAL;
  1396. }
  1397. msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
  1398. if (!msb->block_buffer)
  1399. return -ENOMEM;
  1400. raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
  1401. for (i = 0; chs_table[i].size; i++) {
  1402. if (chs_table[i].size != raw_size_in_megs)
  1403. continue;
  1404. msb->geometry.cylinders = chs_table[i].cyl;
  1405. msb->geometry.heads = chs_table[i].head;
  1406. msb->geometry.sectors = chs_table[i].sec;
  1407. break;
  1408. }
  1409. if (boot_block->attr.transfer_supporting == 1)
  1410. msb->caps |= MEMSTICK_CAP_PAR4;
  1411. if (boot_block->attr.device_type & 0x03)
  1412. msb->read_only = true;
  1413. dbg("Total block count = %d", msb->block_count);
  1414. dbg("Each block consists of %d pages", msb->pages_in_block);
  1415. dbg("Page size = %d bytes", msb->page_size);
  1416. dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
  1417. dbg("Read only: %d", msb->read_only);
  1418. #if 0
  1419. /* Now we can switch the interface */
  1420. if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
  1421. msb_switch_to_parallel(msb);
  1422. #endif
  1423. error = msb_cache_init(msb);
  1424. if (error)
  1425. return error;
  1426. error = msb_ftl_initialize(msb);
  1427. if (error)
  1428. return error;
  1429. /* Read the bad block table */
  1430. error = msb_read_bad_block_table(msb, 0);
  1431. if (error && error != -ENOMEM) {
  1432. dbg("failed to read bad block table from primary boot block, trying from backup");
  1433. error = msb_read_bad_block_table(msb, 1);
  1434. }
  1435. if (error)
  1436. return error;
  1437. /* *drum roll* Scan the media */
  1438. error = msb_ftl_scan(msb);
  1439. if (error) {
  1440. pr_err("Scan of media failed");
  1441. return error;
  1442. }
  1443. return 0;
  1444. }
  1445. static int msb_do_write_request(struct msb_data *msb, int lba,
  1446. int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
  1447. {
  1448. int error = 0;
  1449. off_t offset = 0;
  1450. *sucessfuly_written = 0;
  1451. while (offset < len) {
  1452. if (page == 0 && len - offset >= msb->block_size) {
  1453. if (msb->cache_block_lba == lba)
  1454. msb_cache_discard(msb);
  1455. dbg_verbose("Writing whole lba %d", lba);
  1456. error = msb_update_block(msb, lba, sg, offset);
  1457. if (error)
  1458. return error;
  1459. offset += msb->block_size;
  1460. *sucessfuly_written += msb->block_size;
  1461. lba++;
  1462. continue;
  1463. }
  1464. error = msb_cache_write(msb, lba, page, false, sg, offset);
  1465. if (error)
  1466. return error;
  1467. offset += msb->page_size;
  1468. *sucessfuly_written += msb->page_size;
  1469. page++;
  1470. if (page == msb->pages_in_block) {
  1471. page = 0;
  1472. lba++;
  1473. }
  1474. }
  1475. return 0;
  1476. }
  1477. static int msb_do_read_request(struct msb_data *msb, int lba,
  1478. int page, struct scatterlist *sg, int len, int *sucessfuly_read)
  1479. {
  1480. int error = 0;
  1481. int offset = 0;
  1482. *sucessfuly_read = 0;
  1483. while (offset < len) {
  1484. error = msb_cache_read(msb, lba, page, sg, offset);
  1485. if (error)
  1486. return error;
  1487. offset += msb->page_size;
  1488. *sucessfuly_read += msb->page_size;
  1489. page++;
  1490. if (page == msb->pages_in_block) {
  1491. page = 0;
  1492. lba++;
  1493. }
  1494. }
  1495. return 0;
  1496. }
  1497. static void msb_io_work(struct work_struct *work)
  1498. {
  1499. struct msb_data *msb = container_of(work, struct msb_data, io_work);
  1500. int page, error, len;
  1501. sector_t lba;
  1502. struct scatterlist *sg = msb->prealloc_sg;
  1503. struct request *req;
  1504. dbg_verbose("IO: work started");
  1505. while (1) {
  1506. spin_lock_irq(&msb->q_lock);
  1507. if (msb->need_flush_cache) {
  1508. msb->need_flush_cache = false;
  1509. spin_unlock_irq(&msb->q_lock);
  1510. msb_cache_flush(msb);
  1511. continue;
  1512. }
  1513. req = msb->req;
  1514. if (!req) {
  1515. dbg_verbose("IO: no more requests exiting");
  1516. spin_unlock_irq(&msb->q_lock);
  1517. return;
  1518. }
  1519. spin_unlock_irq(&msb->q_lock);
  1520. /* process the request */
  1521. dbg_verbose("IO: processing new request");
  1522. blk_rq_map_sg(msb->queue, req, sg);
  1523. lba = blk_rq_pos(req);
  1524. sector_div(lba, msb->page_size / 512);
  1525. page = sector_div(lba, msb->pages_in_block);
  1526. if (rq_data_dir(msb->req) == READ)
  1527. error = msb_do_read_request(msb, lba, page, sg,
  1528. blk_rq_bytes(req), &len);
  1529. else
  1530. error = msb_do_write_request(msb, lba, page, sg,
  1531. blk_rq_bytes(req), &len);
  1532. if (len && !blk_update_request(req, BLK_STS_OK, len)) {
  1533. __blk_mq_end_request(req, BLK_STS_OK);
  1534. spin_lock_irq(&msb->q_lock);
  1535. msb->req = NULL;
  1536. spin_unlock_irq(&msb->q_lock);
  1537. }
  1538. if (error && msb->req) {
  1539. blk_status_t ret = errno_to_blk_status(error);
  1540. dbg_verbose("IO: ending one sector of the request with error");
  1541. blk_mq_end_request(req, ret);
  1542. spin_lock_irq(&msb->q_lock);
  1543. msb->req = NULL;
  1544. spin_unlock_irq(&msb->q_lock);
  1545. }
  1546. if (msb->req)
  1547. dbg_verbose("IO: request still pending");
  1548. }
  1549. }
  1550. static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
  1551. static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
  1552. static void msb_data_clear(struct msb_data *msb)
  1553. {
  1554. kfree(msb->boot_page);
  1555. bitmap_free(msb->used_blocks_bitmap);
  1556. bitmap_free(msb->erased_blocks_bitmap);
  1557. kfree(msb->lba_to_pba_table);
  1558. kfree(msb->cache);
  1559. msb->card = NULL;
  1560. }
  1561. static int msb_bd_getgeo(struct block_device *bdev,
  1562. struct hd_geometry *geo)
  1563. {
  1564. struct msb_data *msb = bdev->bd_disk->private_data;
  1565. *geo = msb->geometry;
  1566. return 0;
  1567. }
  1568. static void msb_bd_free_disk(struct gendisk *disk)
  1569. {
  1570. struct msb_data *msb = disk->private_data;
  1571. mutex_lock(&msb_disk_lock);
  1572. idr_remove(&msb_disk_idr, msb->disk_id);
  1573. mutex_unlock(&msb_disk_lock);
  1574. kfree(msb);
  1575. }
  1576. static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
  1577. const struct blk_mq_queue_data *bd)
  1578. {
  1579. struct memstick_dev *card = hctx->queue->queuedata;
  1580. struct msb_data *msb = memstick_get_drvdata(card);
  1581. struct request *req = bd->rq;
  1582. dbg_verbose("Submit request");
  1583. spin_lock_irq(&msb->q_lock);
  1584. if (msb->card_dead) {
  1585. dbg("Refusing requests on removed card");
  1586. WARN_ON(!msb->io_queue_stopped);
  1587. spin_unlock_irq(&msb->q_lock);
  1588. blk_mq_start_request(req);
  1589. return BLK_STS_IOERR;
  1590. }
  1591. if (msb->req) {
  1592. spin_unlock_irq(&msb->q_lock);
  1593. return BLK_STS_DEV_RESOURCE;
  1594. }
  1595. blk_mq_start_request(req);
  1596. msb->req = req;
  1597. if (!msb->io_queue_stopped)
  1598. queue_work(msb->io_queue, &msb->io_work);
  1599. spin_unlock_irq(&msb->q_lock);
  1600. return BLK_STS_OK;
  1601. }
  1602. static int msb_check_card(struct memstick_dev *card)
  1603. {
  1604. struct msb_data *msb = memstick_get_drvdata(card);
  1605. return (msb->card_dead == 0);
  1606. }
  1607. static void msb_stop(struct memstick_dev *card)
  1608. {
  1609. struct msb_data *msb = memstick_get_drvdata(card);
  1610. unsigned long flags;
  1611. dbg("Stopping all msblock IO");
  1612. blk_mq_stop_hw_queues(msb->queue);
  1613. spin_lock_irqsave(&msb->q_lock, flags);
  1614. msb->io_queue_stopped = true;
  1615. spin_unlock_irqrestore(&msb->q_lock, flags);
  1616. del_timer_sync(&msb->cache_flush_timer);
  1617. flush_workqueue(msb->io_queue);
  1618. spin_lock_irqsave(&msb->q_lock, flags);
  1619. if (msb->req) {
  1620. blk_mq_requeue_request(msb->req, false);
  1621. msb->req = NULL;
  1622. }
  1623. spin_unlock_irqrestore(&msb->q_lock, flags);
  1624. }
  1625. static void msb_start(struct memstick_dev *card)
  1626. {
  1627. struct msb_data *msb = memstick_get_drvdata(card);
  1628. unsigned long flags;
  1629. dbg("Resuming IO from msblock");
  1630. msb_invalidate_reg_window(msb);
  1631. spin_lock_irqsave(&msb->q_lock, flags);
  1632. if (!msb->io_queue_stopped || msb->card_dead) {
  1633. spin_unlock_irqrestore(&msb->q_lock, flags);
  1634. return;
  1635. }
  1636. spin_unlock_irqrestore(&msb->q_lock, flags);
  1637. /* Kick cache flush anyway, its harmless */
  1638. msb->need_flush_cache = true;
  1639. msb->io_queue_stopped = false;
  1640. blk_mq_start_hw_queues(msb->queue);
  1641. queue_work(msb->io_queue, &msb->io_work);
  1642. }
  1643. static const struct block_device_operations msb_bdops = {
  1644. .owner = THIS_MODULE,
  1645. .getgeo = msb_bd_getgeo,
  1646. .free_disk = msb_bd_free_disk,
  1647. };
  1648. static const struct blk_mq_ops msb_mq_ops = {
  1649. .queue_rq = msb_queue_rq,
  1650. };
  1651. /* Registers the block device */
  1652. static int msb_init_disk(struct memstick_dev *card)
  1653. {
  1654. struct msb_data *msb = memstick_get_drvdata(card);
  1655. struct queue_limits lim = {
  1656. .logical_block_size = msb->page_size,
  1657. .max_hw_sectors = MS_BLOCK_MAX_PAGES,
  1658. .max_segments = MS_BLOCK_MAX_SEGS,
  1659. .max_segment_size = MS_BLOCK_MAX_PAGES * msb->page_size,
  1660. };
  1661. int rc;
  1662. unsigned long capacity;
  1663. mutex_lock(&msb_disk_lock);
  1664. msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
  1665. mutex_unlock(&msb_disk_lock);
  1666. if (msb->disk_id < 0)
  1667. return msb->disk_id;
  1668. rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2,
  1669. BLK_MQ_F_SHOULD_MERGE);
  1670. if (rc)
  1671. goto out_release_id;
  1672. msb->disk = blk_mq_alloc_disk(&msb->tag_set, &lim, card);
  1673. if (IS_ERR(msb->disk)) {
  1674. rc = PTR_ERR(msb->disk);
  1675. goto out_free_tag_set;
  1676. }
  1677. msb->queue = msb->disk->queue;
  1678. sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
  1679. msb->disk->fops = &msb_bdops;
  1680. msb->disk->private_data = msb;
  1681. capacity = msb->pages_in_block * msb->logical_block_count;
  1682. capacity *= (msb->page_size / 512);
  1683. set_capacity(msb->disk, capacity);
  1684. dbg("Set total disk size to %lu sectors", capacity);
  1685. msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
  1686. if (!msb->io_queue) {
  1687. rc = -ENOMEM;
  1688. goto out_cleanup_disk;
  1689. }
  1690. INIT_WORK(&msb->io_work, msb_io_work);
  1691. sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
  1692. if (msb->read_only)
  1693. set_disk_ro(msb->disk, 1);
  1694. msb_start(card);
  1695. rc = device_add_disk(&card->dev, msb->disk, NULL);
  1696. if (rc)
  1697. goto out_destroy_workqueue;
  1698. dbg("Disk added");
  1699. return 0;
  1700. out_destroy_workqueue:
  1701. destroy_workqueue(msb->io_queue);
  1702. out_cleanup_disk:
  1703. put_disk(msb->disk);
  1704. out_free_tag_set:
  1705. blk_mq_free_tag_set(&msb->tag_set);
  1706. out_release_id:
  1707. mutex_lock(&msb_disk_lock);
  1708. idr_remove(&msb_disk_idr, msb->disk_id);
  1709. mutex_unlock(&msb_disk_lock);
  1710. return rc;
  1711. }
  1712. static int msb_probe(struct memstick_dev *card)
  1713. {
  1714. struct msb_data *msb;
  1715. int rc = 0;
  1716. msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
  1717. if (!msb)
  1718. return -ENOMEM;
  1719. memstick_set_drvdata(card, msb);
  1720. msb->card = card;
  1721. spin_lock_init(&msb->q_lock);
  1722. rc = msb_init_card(card);
  1723. if (rc)
  1724. goto out_free;
  1725. rc = msb_init_disk(card);
  1726. if (!rc) {
  1727. card->check = msb_check_card;
  1728. card->stop = msb_stop;
  1729. card->start = msb_start;
  1730. return 0;
  1731. }
  1732. out_free:
  1733. memstick_set_drvdata(card, NULL);
  1734. msb_data_clear(msb);
  1735. kfree(msb);
  1736. return rc;
  1737. }
  1738. static void msb_remove(struct memstick_dev *card)
  1739. {
  1740. struct msb_data *msb = memstick_get_drvdata(card);
  1741. unsigned long flags;
  1742. if (!msb->io_queue_stopped)
  1743. msb_stop(card);
  1744. dbg("Removing the disk device");
  1745. /* Take care of unhandled + new requests from now on */
  1746. spin_lock_irqsave(&msb->q_lock, flags);
  1747. msb->card_dead = true;
  1748. spin_unlock_irqrestore(&msb->q_lock, flags);
  1749. blk_mq_start_hw_queues(msb->queue);
  1750. /* Remove the disk */
  1751. del_gendisk(msb->disk);
  1752. blk_mq_free_tag_set(&msb->tag_set);
  1753. msb->queue = NULL;
  1754. mutex_lock(&msb_disk_lock);
  1755. msb_data_clear(msb);
  1756. mutex_unlock(&msb_disk_lock);
  1757. put_disk(msb->disk);
  1758. memstick_set_drvdata(card, NULL);
  1759. }
  1760. #ifdef CONFIG_PM
  1761. static int msb_suspend(struct memstick_dev *card, pm_message_t state)
  1762. {
  1763. msb_stop(card);
  1764. return 0;
  1765. }
  1766. static int msb_resume(struct memstick_dev *card)
  1767. {
  1768. struct msb_data *msb = memstick_get_drvdata(card);
  1769. struct msb_data *new_msb = NULL;
  1770. bool card_dead = true;
  1771. #ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
  1772. msb->card_dead = true;
  1773. return 0;
  1774. #endif
  1775. mutex_lock(&card->host->lock);
  1776. new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
  1777. if (!new_msb)
  1778. goto out;
  1779. new_msb->card = card;
  1780. memstick_set_drvdata(card, new_msb);
  1781. spin_lock_init(&new_msb->q_lock);
  1782. sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
  1783. if (msb_init_card(card))
  1784. goto out;
  1785. if (msb->block_size != new_msb->block_size)
  1786. goto out;
  1787. if (memcmp(msb->boot_page, new_msb->boot_page,
  1788. sizeof(struct ms_boot_page)))
  1789. goto out;
  1790. if (msb->logical_block_count != new_msb->logical_block_count ||
  1791. memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
  1792. msb->logical_block_count))
  1793. goto out;
  1794. if (msb->block_count != new_msb->block_count ||
  1795. !bitmap_equal(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
  1796. msb->block_count))
  1797. goto out;
  1798. card_dead = false;
  1799. out:
  1800. if (card_dead)
  1801. dbg("Card was removed/replaced during suspend");
  1802. msb->card_dead = card_dead;
  1803. memstick_set_drvdata(card, msb);
  1804. if (new_msb) {
  1805. msb_data_clear(new_msb);
  1806. kfree(new_msb);
  1807. }
  1808. msb_start(card);
  1809. mutex_unlock(&card->host->lock);
  1810. return 0;
  1811. }
  1812. #else
  1813. #define msb_suspend NULL
  1814. #define msb_resume NULL
  1815. #endif /* CONFIG_PM */
  1816. static struct memstick_device_id msb_id_tbl[] = {
  1817. {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
  1818. MEMSTICK_CLASS_FLASH},
  1819. {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
  1820. MEMSTICK_CLASS_ROM},
  1821. {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
  1822. MEMSTICK_CLASS_RO},
  1823. {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
  1824. MEMSTICK_CLASS_WP},
  1825. {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
  1826. MEMSTICK_CLASS_DUO},
  1827. {}
  1828. };
  1829. MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
  1830. static struct memstick_driver msb_driver = {
  1831. .driver = {
  1832. .name = DRIVER_NAME,
  1833. .owner = THIS_MODULE
  1834. },
  1835. .id_table = msb_id_tbl,
  1836. .probe = msb_probe,
  1837. .remove = msb_remove,
  1838. .suspend = msb_suspend,
  1839. .resume = msb_resume
  1840. };
  1841. static int __init msb_init(void)
  1842. {
  1843. int rc = memstick_register_driver(&msb_driver);
  1844. if (rc)
  1845. pr_err("failed to register memstick driver (error %d)\n", rc);
  1846. return rc;
  1847. }
  1848. static void __exit msb_exit(void)
  1849. {
  1850. memstick_unregister_driver(&msb_driver);
  1851. idr_destroy(&msb_disk_idr);
  1852. }
  1853. module_init(msb_init);
  1854. module_exit(msb_exit);
  1855. module_param(cache_flush_timeout, int, S_IRUGO);
  1856. MODULE_PARM_DESC(cache_flush_timeout,
  1857. "Cache flush timeout in msec (1000 default)");
  1858. module_param(debug, int, S_IRUGO | S_IWUSR);
  1859. MODULE_PARM_DESC(debug, "Debug level (0-2)");
  1860. module_param(verify_writes, bool, S_IRUGO);
  1861. MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
  1862. MODULE_LICENSE("GPL");
  1863. MODULE_AUTHOR("Maxim Levitsky");
  1864. MODULE_DESCRIPTION("Sony MemoryStick block device driver");