eba.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (c) International Business Machines Corp., 2006
  4. *
  5. * Author: Artem Bityutskiy (Битюцкий Артём)
  6. */
  7. /*
  8. * The UBI Eraseblock Association (EBA) sub-system.
  9. *
  10. * This sub-system is responsible for I/O to/from logical eraseblock.
  11. *
  12. * Although in this implementation the EBA table is fully kept and managed in
  13. * RAM, which assumes poor scalability, it might be (partially) maintained on
  14. * flash in future implementations.
  15. *
  16. * The EBA sub-system implements per-logical eraseblock locking. Before
  17. * accessing a logical eraseblock it is locked for reading or writing. The
  18. * per-logical eraseblock locking is implemented by means of the lock tree. The
  19. * lock tree is an RB-tree which refers all the currently locked logical
  20. * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
  21. * They are indexed by (@vol_id, @lnum) pairs.
  22. *
  23. * EBA also maintains the global sequence counter which is incremented each
  24. * time a logical eraseblock is mapped to a physical eraseblock and it is
  25. * stored in the volume identifier header. This means that each VID header has
  26. * a unique sequence number. The sequence number is only increased an we assume
  27. * 64 bits is enough to never overflow.
  28. */
  29. #ifndef __UBOOT__
  30. #include <linux/slab.h>
  31. #include <linux/crc32.h>
  32. #else
  33. #include <ubi_uboot.h>
  34. #endif
  35. #include <linux/err.h>
  36. #include "ubi.h"
  37. /* Number of physical eraseblocks reserved for atomic LEB change operation */
  38. #define EBA_RESERVED_PEBS 1
  39. /**
  40. * next_sqnum - get next sequence number.
  41. * @ubi: UBI device description object
  42. *
  43. * This function returns next sequence number to use, which is just the current
  44. * global sequence counter value. It also increases the global sequence
  45. * counter.
  46. */
  47. unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
  48. {
  49. unsigned long long sqnum;
  50. spin_lock(&ubi->ltree_lock);
  51. sqnum = ubi->global_sqnum++;
  52. spin_unlock(&ubi->ltree_lock);
  53. return sqnum;
  54. }
  55. /**
  56. * ubi_get_compat - get compatibility flags of a volume.
  57. * @ubi: UBI device description object
  58. * @vol_id: volume ID
  59. *
  60. * This function returns compatibility flags for an internal volume. User
  61. * volumes have no compatibility flags, so %0 is returned.
  62. */
  63. static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
  64. {
  65. if (vol_id == UBI_LAYOUT_VOLUME_ID)
  66. return UBI_LAYOUT_VOLUME_COMPAT;
  67. return 0;
  68. }
  69. /**
  70. * ltree_lookup - look up the lock tree.
  71. * @ubi: UBI device description object
  72. * @vol_id: volume ID
  73. * @lnum: logical eraseblock number
  74. *
  75. * This function returns a pointer to the corresponding &struct ubi_ltree_entry
  76. * object if the logical eraseblock is locked and %NULL if it is not.
  77. * @ubi->ltree_lock has to be locked.
  78. */
  79. static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
  80. int lnum)
  81. {
  82. struct rb_node *p;
  83. p = ubi->ltree.rb_node;
  84. while (p) {
  85. struct ubi_ltree_entry *le;
  86. le = rb_entry(p, struct ubi_ltree_entry, rb);
  87. if (vol_id < le->vol_id)
  88. p = p->rb_left;
  89. else if (vol_id > le->vol_id)
  90. p = p->rb_right;
  91. else {
  92. if (lnum < le->lnum)
  93. p = p->rb_left;
  94. else if (lnum > le->lnum)
  95. p = p->rb_right;
  96. else
  97. return le;
  98. }
  99. }
  100. return NULL;
  101. }
  102. /**
  103. * ltree_add_entry - add new entry to the lock tree.
  104. * @ubi: UBI device description object
  105. * @vol_id: volume ID
  106. * @lnum: logical eraseblock number
  107. *
  108. * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
  109. * lock tree. If such entry is already there, its usage counter is increased.
  110. * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
  111. * failed.
  112. */
  113. static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
  114. int vol_id, int lnum)
  115. {
  116. struct ubi_ltree_entry *le, *le1, *le_free;
  117. le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
  118. if (!le)
  119. return ERR_PTR(-ENOMEM);
  120. le->users = 0;
  121. init_rwsem(&le->mutex);
  122. le->vol_id = vol_id;
  123. le->lnum = lnum;
  124. spin_lock(&ubi->ltree_lock);
  125. le1 = ltree_lookup(ubi, vol_id, lnum);
  126. if (le1) {
  127. /*
  128. * This logical eraseblock is already locked. The newly
  129. * allocated lock entry is not needed.
  130. */
  131. le_free = le;
  132. le = le1;
  133. } else {
  134. struct rb_node **p, *parent = NULL;
  135. /*
  136. * No lock entry, add the newly allocated one to the
  137. * @ubi->ltree RB-tree.
  138. */
  139. le_free = NULL;
  140. p = &ubi->ltree.rb_node;
  141. while (*p) {
  142. parent = *p;
  143. le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
  144. if (vol_id < le1->vol_id)
  145. p = &(*p)->rb_left;
  146. else if (vol_id > le1->vol_id)
  147. p = &(*p)->rb_right;
  148. else {
  149. ubi_assert(lnum != le1->lnum);
  150. if (lnum < le1->lnum)
  151. p = &(*p)->rb_left;
  152. else
  153. p = &(*p)->rb_right;
  154. }
  155. }
  156. rb_link_node(&le->rb, parent, p);
  157. rb_insert_color(&le->rb, &ubi->ltree);
  158. }
  159. le->users += 1;
  160. spin_unlock(&ubi->ltree_lock);
  161. kfree(le_free);
  162. return le;
  163. }
  164. /**
  165. * leb_read_lock - lock logical eraseblock for reading.
  166. * @ubi: UBI device description object
  167. * @vol_id: volume ID
  168. * @lnum: logical eraseblock number
  169. *
  170. * This function locks a logical eraseblock for reading. Returns zero in case
  171. * of success and a negative error code in case of failure.
  172. */
  173. static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
  174. {
  175. struct ubi_ltree_entry *le;
  176. le = ltree_add_entry(ubi, vol_id, lnum);
  177. if (IS_ERR(le))
  178. return PTR_ERR(le);
  179. down_read(&le->mutex);
  180. return 0;
  181. }
  182. /**
  183. * leb_read_unlock - unlock logical eraseblock.
  184. * @ubi: UBI device description object
  185. * @vol_id: volume ID
  186. * @lnum: logical eraseblock number
  187. */
  188. static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
  189. {
  190. struct ubi_ltree_entry *le;
  191. spin_lock(&ubi->ltree_lock);
  192. le = ltree_lookup(ubi, vol_id, lnum);
  193. le->users -= 1;
  194. ubi_assert(le->users >= 0);
  195. up_read(&le->mutex);
  196. if (le->users == 0) {
  197. rb_erase(&le->rb, &ubi->ltree);
  198. kfree(le);
  199. }
  200. spin_unlock(&ubi->ltree_lock);
  201. }
  202. /**
  203. * leb_write_lock - lock logical eraseblock for writing.
  204. * @ubi: UBI device description object
  205. * @vol_id: volume ID
  206. * @lnum: logical eraseblock number
  207. *
  208. * This function locks a logical eraseblock for writing. Returns zero in case
  209. * of success and a negative error code in case of failure.
  210. */
  211. static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
  212. {
  213. struct ubi_ltree_entry *le;
  214. le = ltree_add_entry(ubi, vol_id, lnum);
  215. if (IS_ERR(le))
  216. return PTR_ERR(le);
  217. down_write(&le->mutex);
  218. return 0;
  219. }
  220. /**
  221. * leb_write_lock - lock logical eraseblock for writing.
  222. * @ubi: UBI device description object
  223. * @vol_id: volume ID
  224. * @lnum: logical eraseblock number
  225. *
  226. * This function locks a logical eraseblock for writing if there is no
  227. * contention and does nothing if there is contention. Returns %0 in case of
  228. * success, %1 in case of contention, and and a negative error code in case of
  229. * failure.
  230. */
  231. static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
  232. {
  233. struct ubi_ltree_entry *le;
  234. le = ltree_add_entry(ubi, vol_id, lnum);
  235. if (IS_ERR(le))
  236. return PTR_ERR(le);
  237. if (down_write_trylock(&le->mutex))
  238. return 0;
  239. /* Contention, cancel */
  240. spin_lock(&ubi->ltree_lock);
  241. le->users -= 1;
  242. ubi_assert(le->users >= 0);
  243. if (le->users == 0) {
  244. rb_erase(&le->rb, &ubi->ltree);
  245. kfree(le);
  246. }
  247. spin_unlock(&ubi->ltree_lock);
  248. return 1;
  249. }
  250. /**
  251. * leb_write_unlock - unlock logical eraseblock.
  252. * @ubi: UBI device description object
  253. * @vol_id: volume ID
  254. * @lnum: logical eraseblock number
  255. */
  256. static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
  257. {
  258. struct ubi_ltree_entry *le;
  259. spin_lock(&ubi->ltree_lock);
  260. le = ltree_lookup(ubi, vol_id, lnum);
  261. le->users -= 1;
  262. ubi_assert(le->users >= 0);
  263. up_write(&le->mutex);
  264. if (le->users == 0) {
  265. rb_erase(&le->rb, &ubi->ltree);
  266. kfree(le);
  267. }
  268. spin_unlock(&ubi->ltree_lock);
  269. }
  270. /**
  271. * ubi_eba_unmap_leb - un-map logical eraseblock.
  272. * @ubi: UBI device description object
  273. * @vol: volume description object
  274. * @lnum: logical eraseblock number
  275. *
  276. * This function un-maps logical eraseblock @lnum and schedules corresponding
  277. * physical eraseblock for erasure. Returns zero in case of success and a
  278. * negative error code in case of failure.
  279. */
  280. int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
  281. int lnum)
  282. {
  283. int err, pnum, vol_id = vol->vol_id;
  284. if (ubi->ro_mode)
  285. return -EROFS;
  286. err = leb_write_lock(ubi, vol_id, lnum);
  287. if (err)
  288. return err;
  289. pnum = vol->eba_tbl[lnum];
  290. if (pnum < 0)
  291. /* This logical eraseblock is already unmapped */
  292. goto out_unlock;
  293. dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
  294. down_read(&ubi->fm_eba_sem);
  295. vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
  296. up_read(&ubi->fm_eba_sem);
  297. err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
  298. out_unlock:
  299. leb_write_unlock(ubi, vol_id, lnum);
  300. return err;
  301. }
  302. /**
  303. * ubi_eba_read_leb - read data.
  304. * @ubi: UBI device description object
  305. * @vol: volume description object
  306. * @lnum: logical eraseblock number
  307. * @buf: buffer to store the read data
  308. * @offset: offset from where to read
  309. * @len: how many bytes to read
  310. * @check: data CRC check flag
  311. *
  312. * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
  313. * bytes. The @check flag only makes sense for static volumes and forces
  314. * eraseblock data CRC checking.
  315. *
  316. * In case of success this function returns zero. In case of a static volume,
  317. * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
  318. * returned for any volume type if an ECC error was detected by the MTD device
  319. * driver. Other negative error cored may be returned in case of other errors.
  320. */
  321. int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
  322. void *buf, int offset, int len, int check)
  323. {
  324. int err, pnum, scrub = 0, vol_id = vol->vol_id;
  325. struct ubi_vid_hdr *vid_hdr;
  326. uint32_t uninitialized_var(crc);
  327. err = leb_read_lock(ubi, vol_id, lnum);
  328. if (err)
  329. return err;
  330. pnum = vol->eba_tbl[lnum];
  331. if (pnum < 0) {
  332. /*
  333. * The logical eraseblock is not mapped, fill the whole buffer
  334. * with 0xFF bytes. The exception is static volumes for which
  335. * it is an error to read unmapped logical eraseblocks.
  336. */
  337. dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
  338. len, offset, vol_id, lnum);
  339. leb_read_unlock(ubi, vol_id, lnum);
  340. ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
  341. memset(buf, 0xFF, len);
  342. return 0;
  343. }
  344. dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
  345. len, offset, vol_id, lnum, pnum);
  346. if (vol->vol_type == UBI_DYNAMIC_VOLUME)
  347. check = 0;
  348. retry:
  349. if (check) {
  350. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  351. if (!vid_hdr) {
  352. err = -ENOMEM;
  353. goto out_unlock;
  354. }
  355. err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
  356. if (err && err != UBI_IO_BITFLIPS) {
  357. if (err > 0) {
  358. /*
  359. * The header is either absent or corrupted.
  360. * The former case means there is a bug -
  361. * switch to read-only mode just in case.
  362. * The latter case means a real corruption - we
  363. * may try to recover data. FIXME: but this is
  364. * not implemented.
  365. */
  366. if (err == UBI_IO_BAD_HDR_EBADMSG ||
  367. err == UBI_IO_BAD_HDR) {
  368. ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
  369. pnum, vol_id, lnum);
  370. err = -EBADMSG;
  371. } else {
  372. err = -EINVAL;
  373. ubi_ro_mode(ubi);
  374. }
  375. }
  376. goto out_free;
  377. } else if (err == UBI_IO_BITFLIPS)
  378. scrub = 1;
  379. ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
  380. ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
  381. crc = be32_to_cpu(vid_hdr->data_crc);
  382. ubi_free_vid_hdr(ubi, vid_hdr);
  383. }
  384. err = ubi_io_read_data(ubi, buf, pnum, offset, len);
  385. if (err) {
  386. if (err == UBI_IO_BITFLIPS)
  387. scrub = 1;
  388. else if (mtd_is_eccerr(err)) {
  389. if (vol->vol_type == UBI_DYNAMIC_VOLUME)
  390. goto out_unlock;
  391. scrub = 1;
  392. if (!check) {
  393. ubi_msg(ubi, "force data checking");
  394. check = 1;
  395. goto retry;
  396. }
  397. } else
  398. goto out_unlock;
  399. }
  400. if (check) {
  401. uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
  402. if (crc1 != crc) {
  403. ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
  404. crc1, crc);
  405. err = -EBADMSG;
  406. goto out_unlock;
  407. }
  408. }
  409. if (scrub)
  410. err = ubi_wl_scrub_peb(ubi, pnum);
  411. leb_read_unlock(ubi, vol_id, lnum);
  412. return err;
  413. out_free:
  414. ubi_free_vid_hdr(ubi, vid_hdr);
  415. out_unlock:
  416. leb_read_unlock(ubi, vol_id, lnum);
  417. return err;
  418. }
  419. #ifndef __UBOOT__
  420. /**
  421. * ubi_eba_read_leb_sg - read data into a scatter gather list.
  422. * @ubi: UBI device description object
  423. * @vol: volume description object
  424. * @lnum: logical eraseblock number
  425. * @sgl: UBI scatter gather list to store the read data
  426. * @offset: offset from where to read
  427. * @len: how many bytes to read
  428. * @check: data CRC check flag
  429. *
  430. * This function works exactly like ubi_eba_read_leb(). But instead of
  431. * storing the read data into a buffer it writes to an UBI scatter gather
  432. * list.
  433. */
  434. int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
  435. struct ubi_sgl *sgl, int lnum, int offset, int len,
  436. int check)
  437. {
  438. int to_read;
  439. int ret;
  440. struct scatterlist *sg;
  441. for (;;) {
  442. ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
  443. sg = &sgl->sg[sgl->list_pos];
  444. if (len < sg->length - sgl->page_pos)
  445. to_read = len;
  446. else
  447. to_read = sg->length - sgl->page_pos;
  448. ret = ubi_eba_read_leb(ubi, vol, lnum,
  449. sg_virt(sg) + sgl->page_pos, offset,
  450. to_read, check);
  451. if (ret < 0)
  452. return ret;
  453. offset += to_read;
  454. len -= to_read;
  455. if (!len) {
  456. sgl->page_pos += to_read;
  457. if (sgl->page_pos == sg->length) {
  458. sgl->list_pos++;
  459. sgl->page_pos = 0;
  460. }
  461. break;
  462. }
  463. sgl->list_pos++;
  464. sgl->page_pos = 0;
  465. }
  466. return ret;
  467. }
  468. #endif
  469. /**
  470. * recover_peb - recover from write failure.
  471. * @ubi: UBI device description object
  472. * @pnum: the physical eraseblock to recover
  473. * @vol_id: volume ID
  474. * @lnum: logical eraseblock number
  475. * @buf: data which was not written because of the write failure
  476. * @offset: offset of the failed write
  477. * @len: how many bytes should have been written
  478. *
  479. * This function is called in case of a write failure and moves all good data
  480. * from the potentially bad physical eraseblock to a good physical eraseblock.
  481. * This function also writes the data which was not written due to the failure.
  482. * Returns new physical eraseblock number in case of success, and a negative
  483. * error code in case of failure.
  484. */
  485. static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
  486. const void *buf, int offset, int len)
  487. {
  488. int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
  489. struct ubi_volume *vol = ubi->volumes[idx];
  490. struct ubi_vid_hdr *vid_hdr;
  491. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  492. if (!vid_hdr)
  493. return -ENOMEM;
  494. retry:
  495. new_pnum = ubi_wl_get_peb(ubi);
  496. if (new_pnum < 0) {
  497. ubi_free_vid_hdr(ubi, vid_hdr);
  498. up_read(&ubi->fm_eba_sem);
  499. return new_pnum;
  500. }
  501. ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
  502. pnum, new_pnum);
  503. err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
  504. if (err && err != UBI_IO_BITFLIPS) {
  505. if (err > 0)
  506. err = -EIO;
  507. up_read(&ubi->fm_eba_sem);
  508. goto out_put;
  509. }
  510. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  511. err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
  512. if (err) {
  513. up_read(&ubi->fm_eba_sem);
  514. goto write_error;
  515. }
  516. data_size = offset + len;
  517. mutex_lock(&ubi->buf_mutex);
  518. memset(ubi->peb_buf + offset, 0xFF, len);
  519. /* Read everything before the area where the write failure happened */
  520. if (offset > 0) {
  521. err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
  522. if (err && err != UBI_IO_BITFLIPS) {
  523. up_read(&ubi->fm_eba_sem);
  524. goto out_unlock;
  525. }
  526. }
  527. memcpy(ubi->peb_buf + offset, buf, len);
  528. err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
  529. if (err) {
  530. mutex_unlock(&ubi->buf_mutex);
  531. up_read(&ubi->fm_eba_sem);
  532. goto write_error;
  533. }
  534. mutex_unlock(&ubi->buf_mutex);
  535. ubi_free_vid_hdr(ubi, vid_hdr);
  536. vol->eba_tbl[lnum] = new_pnum;
  537. up_read(&ubi->fm_eba_sem);
  538. ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
  539. ubi_msg(ubi, "data was successfully recovered");
  540. return 0;
  541. out_unlock:
  542. mutex_unlock(&ubi->buf_mutex);
  543. out_put:
  544. ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
  545. ubi_free_vid_hdr(ubi, vid_hdr);
  546. return err;
  547. write_error:
  548. /*
  549. * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
  550. * get another one.
  551. */
  552. ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
  553. ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
  554. if (++tries > UBI_IO_RETRIES) {
  555. ubi_free_vid_hdr(ubi, vid_hdr);
  556. return err;
  557. }
  558. ubi_msg(ubi, "try again");
  559. goto retry;
  560. }
  561. /**
  562. * ubi_eba_write_leb - write data to dynamic volume.
  563. * @ubi: UBI device description object
  564. * @vol: volume description object
  565. * @lnum: logical eraseblock number
  566. * @buf: the data to write
  567. * @offset: offset within the logical eraseblock where to write
  568. * @len: how many bytes to write
  569. *
  570. * This function writes data to logical eraseblock @lnum of a dynamic volume
  571. * @vol. Returns zero in case of success and a negative error code in case
  572. * of failure. In case of error, it is possible that something was still
  573. * written to the flash media, but may be some garbage.
  574. */
  575. int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
  576. const void *buf, int offset, int len)
  577. {
  578. int err, pnum, tries = 0, vol_id = vol->vol_id;
  579. struct ubi_vid_hdr *vid_hdr;
  580. if (ubi->ro_mode)
  581. return -EROFS;
  582. err = leb_write_lock(ubi, vol_id, lnum);
  583. if (err)
  584. return err;
  585. pnum = vol->eba_tbl[lnum];
  586. if (pnum >= 0) {
  587. dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
  588. len, offset, vol_id, lnum, pnum);
  589. err = ubi_io_write_data(ubi, buf, pnum, offset, len);
  590. if (err) {
  591. ubi_warn(ubi, "failed to write data to PEB %d", pnum);
  592. if (err == -EIO && ubi->bad_allowed)
  593. err = recover_peb(ubi, pnum, vol_id, lnum, buf,
  594. offset, len);
  595. if (err)
  596. ubi_ro_mode(ubi);
  597. }
  598. leb_write_unlock(ubi, vol_id, lnum);
  599. return err;
  600. }
  601. /*
  602. * The logical eraseblock is not mapped. We have to get a free physical
  603. * eraseblock and write the volume identifier header there first.
  604. */
  605. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  606. if (!vid_hdr) {
  607. leb_write_unlock(ubi, vol_id, lnum);
  608. return -ENOMEM;
  609. }
  610. vid_hdr->vol_type = UBI_VID_DYNAMIC;
  611. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  612. vid_hdr->vol_id = cpu_to_be32(vol_id);
  613. vid_hdr->lnum = cpu_to_be32(lnum);
  614. vid_hdr->compat = ubi_get_compat(ubi, vol_id);
  615. vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
  616. retry:
  617. pnum = ubi_wl_get_peb(ubi);
  618. if (pnum < 0) {
  619. ubi_free_vid_hdr(ubi, vid_hdr);
  620. leb_write_unlock(ubi, vol_id, lnum);
  621. up_read(&ubi->fm_eba_sem);
  622. return pnum;
  623. }
  624. dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
  625. len, offset, vol_id, lnum, pnum);
  626. err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
  627. if (err) {
  628. ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
  629. vol_id, lnum, pnum);
  630. up_read(&ubi->fm_eba_sem);
  631. goto write_error;
  632. }
  633. if (len) {
  634. err = ubi_io_write_data(ubi, buf, pnum, offset, len);
  635. if (err) {
  636. ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
  637. len, offset, vol_id, lnum, pnum);
  638. up_read(&ubi->fm_eba_sem);
  639. goto write_error;
  640. }
  641. }
  642. vol->eba_tbl[lnum] = pnum;
  643. up_read(&ubi->fm_eba_sem);
  644. leb_write_unlock(ubi, vol_id, lnum);
  645. ubi_free_vid_hdr(ubi, vid_hdr);
  646. return 0;
  647. write_error:
  648. if (err != -EIO || !ubi->bad_allowed) {
  649. ubi_ro_mode(ubi);
  650. leb_write_unlock(ubi, vol_id, lnum);
  651. ubi_free_vid_hdr(ubi, vid_hdr);
  652. return err;
  653. }
  654. /*
  655. * Fortunately, this is the first write operation to this physical
  656. * eraseblock, so just put it and request a new one. We assume that if
  657. * this physical eraseblock went bad, the erase code will handle that.
  658. */
  659. err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
  660. if (err || ++tries > UBI_IO_RETRIES) {
  661. ubi_ro_mode(ubi);
  662. leb_write_unlock(ubi, vol_id, lnum);
  663. ubi_free_vid_hdr(ubi, vid_hdr);
  664. return err;
  665. }
  666. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  667. ubi_msg(ubi, "try another PEB");
  668. goto retry;
  669. }
  670. /**
  671. * ubi_eba_write_leb_st - write data to static volume.
  672. * @ubi: UBI device description object
  673. * @vol: volume description object
  674. * @lnum: logical eraseblock number
  675. * @buf: data to write
  676. * @len: how many bytes to write
  677. * @used_ebs: how many logical eraseblocks will this volume contain
  678. *
  679. * This function writes data to logical eraseblock @lnum of static volume
  680. * @vol. The @used_ebs argument should contain total number of logical
  681. * eraseblock in this static volume.
  682. *
  683. * When writing to the last logical eraseblock, the @len argument doesn't have
  684. * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
  685. * to the real data size, although the @buf buffer has to contain the
  686. * alignment. In all other cases, @len has to be aligned.
  687. *
  688. * It is prohibited to write more than once to logical eraseblocks of static
  689. * volumes. This function returns zero in case of success and a negative error
  690. * code in case of failure.
  691. */
  692. int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
  693. int lnum, const void *buf, int len, int used_ebs)
  694. {
  695. int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
  696. struct ubi_vid_hdr *vid_hdr;
  697. uint32_t crc;
  698. if (ubi->ro_mode)
  699. return -EROFS;
  700. if (lnum == used_ebs - 1)
  701. /* If this is the last LEB @len may be unaligned */
  702. len = ALIGN(data_size, ubi->min_io_size);
  703. else
  704. ubi_assert(!(len & (ubi->min_io_size - 1)));
  705. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  706. if (!vid_hdr)
  707. return -ENOMEM;
  708. err = leb_write_lock(ubi, vol_id, lnum);
  709. if (err) {
  710. ubi_free_vid_hdr(ubi, vid_hdr);
  711. return err;
  712. }
  713. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  714. vid_hdr->vol_id = cpu_to_be32(vol_id);
  715. vid_hdr->lnum = cpu_to_be32(lnum);
  716. vid_hdr->compat = ubi_get_compat(ubi, vol_id);
  717. vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
  718. crc = crc32(UBI_CRC32_INIT, buf, data_size);
  719. vid_hdr->vol_type = UBI_VID_STATIC;
  720. vid_hdr->data_size = cpu_to_be32(data_size);
  721. vid_hdr->used_ebs = cpu_to_be32(used_ebs);
  722. vid_hdr->data_crc = cpu_to_be32(crc);
  723. retry:
  724. pnum = ubi_wl_get_peb(ubi);
  725. if (pnum < 0) {
  726. ubi_free_vid_hdr(ubi, vid_hdr);
  727. leb_write_unlock(ubi, vol_id, lnum);
  728. up_read(&ubi->fm_eba_sem);
  729. return pnum;
  730. }
  731. dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
  732. len, vol_id, lnum, pnum, used_ebs);
  733. err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
  734. if (err) {
  735. ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
  736. vol_id, lnum, pnum);
  737. up_read(&ubi->fm_eba_sem);
  738. goto write_error;
  739. }
  740. err = ubi_io_write_data(ubi, buf, pnum, 0, len);
  741. if (err) {
  742. ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
  743. len, pnum);
  744. up_read(&ubi->fm_eba_sem);
  745. goto write_error;
  746. }
  747. ubi_assert(vol->eba_tbl[lnum] < 0);
  748. vol->eba_tbl[lnum] = pnum;
  749. up_read(&ubi->fm_eba_sem);
  750. leb_write_unlock(ubi, vol_id, lnum);
  751. ubi_free_vid_hdr(ubi, vid_hdr);
  752. return 0;
  753. write_error:
  754. if (err != -EIO || !ubi->bad_allowed) {
  755. /*
  756. * This flash device does not admit of bad eraseblocks or
  757. * something nasty and unexpected happened. Switch to read-only
  758. * mode just in case.
  759. */
  760. ubi_ro_mode(ubi);
  761. leb_write_unlock(ubi, vol_id, lnum);
  762. ubi_free_vid_hdr(ubi, vid_hdr);
  763. return err;
  764. }
  765. err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
  766. if (err || ++tries > UBI_IO_RETRIES) {
  767. ubi_ro_mode(ubi);
  768. leb_write_unlock(ubi, vol_id, lnum);
  769. ubi_free_vid_hdr(ubi, vid_hdr);
  770. return err;
  771. }
  772. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  773. ubi_msg(ubi, "try another PEB");
  774. goto retry;
  775. }
  776. /*
  777. * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
  778. * @ubi: UBI device description object
  779. * @vol: volume description object
  780. * @lnum: logical eraseblock number
  781. * @buf: data to write
  782. * @len: how many bytes to write
  783. *
  784. * This function changes the contents of a logical eraseblock atomically. @buf
  785. * has to contain new logical eraseblock data, and @len - the length of the
  786. * data, which has to be aligned. This function guarantees that in case of an
  787. * unclean reboot the old contents is preserved. Returns zero in case of
  788. * success and a negative error code in case of failure.
  789. *
  790. * UBI reserves one LEB for the "atomic LEB change" operation, so only one
  791. * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
  792. */
  793. int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
  794. int lnum, const void *buf, int len)
  795. {
  796. int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id;
  797. struct ubi_vid_hdr *vid_hdr;
  798. uint32_t crc;
  799. if (ubi->ro_mode)
  800. return -EROFS;
  801. if (len == 0) {
  802. /*
  803. * Special case when data length is zero. In this case the LEB
  804. * has to be unmapped and mapped somewhere else.
  805. */
  806. err = ubi_eba_unmap_leb(ubi, vol, lnum);
  807. if (err)
  808. return err;
  809. return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
  810. }
  811. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  812. if (!vid_hdr)
  813. return -ENOMEM;
  814. mutex_lock(&ubi->alc_mutex);
  815. err = leb_write_lock(ubi, vol_id, lnum);
  816. if (err)
  817. goto out_mutex;
  818. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  819. vid_hdr->vol_id = cpu_to_be32(vol_id);
  820. vid_hdr->lnum = cpu_to_be32(lnum);
  821. vid_hdr->compat = ubi_get_compat(ubi, vol_id);
  822. vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
  823. crc = crc32(UBI_CRC32_INIT, buf, len);
  824. vid_hdr->vol_type = UBI_VID_DYNAMIC;
  825. vid_hdr->data_size = cpu_to_be32(len);
  826. vid_hdr->copy_flag = 1;
  827. vid_hdr->data_crc = cpu_to_be32(crc);
  828. retry:
  829. pnum = ubi_wl_get_peb(ubi);
  830. if (pnum < 0) {
  831. err = pnum;
  832. up_read(&ubi->fm_eba_sem);
  833. goto out_leb_unlock;
  834. }
  835. dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
  836. vol_id, lnum, vol->eba_tbl[lnum], pnum);
  837. err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
  838. if (err) {
  839. ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
  840. vol_id, lnum, pnum);
  841. up_read(&ubi->fm_eba_sem);
  842. goto write_error;
  843. }
  844. err = ubi_io_write_data(ubi, buf, pnum, 0, len);
  845. if (err) {
  846. ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
  847. len, pnum);
  848. up_read(&ubi->fm_eba_sem);
  849. goto write_error;
  850. }
  851. old_pnum = vol->eba_tbl[lnum];
  852. vol->eba_tbl[lnum] = pnum;
  853. up_read(&ubi->fm_eba_sem);
  854. if (old_pnum >= 0) {
  855. err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0);
  856. if (err)
  857. goto out_leb_unlock;
  858. }
  859. out_leb_unlock:
  860. leb_write_unlock(ubi, vol_id, lnum);
  861. out_mutex:
  862. mutex_unlock(&ubi->alc_mutex);
  863. ubi_free_vid_hdr(ubi, vid_hdr);
  864. return err;
  865. write_error:
  866. if (err != -EIO || !ubi->bad_allowed) {
  867. /*
  868. * This flash device does not admit of bad eraseblocks or
  869. * something nasty and unexpected happened. Switch to read-only
  870. * mode just in case.
  871. */
  872. ubi_ro_mode(ubi);
  873. goto out_leb_unlock;
  874. }
  875. err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
  876. if (err || ++tries > UBI_IO_RETRIES) {
  877. ubi_ro_mode(ubi);
  878. goto out_leb_unlock;
  879. }
  880. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  881. ubi_msg(ubi, "try another PEB");
  882. goto retry;
  883. }
  884. /**
  885. * is_error_sane - check whether a read error is sane.
  886. * @err: code of the error happened during reading
  887. *
  888. * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
  889. * cannot read data from the target PEB (an error @err happened). If the error
  890. * code is sane, then we treat this error as non-fatal. Otherwise the error is
  891. * fatal and UBI will be switched to R/O mode later.
  892. *
  893. * The idea is that we try not to switch to R/O mode if the read error is
  894. * something which suggests there was a real read problem. E.g., %-EIO. Or a
  895. * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
  896. * mode, simply because we do not know what happened at the MTD level, and we
  897. * cannot handle this. E.g., the underlying driver may have become crazy, and
  898. * it is safer to switch to R/O mode to preserve the data.
  899. *
  900. * And bear in mind, this is about reading from the target PEB, i.e. the PEB
  901. * which we have just written.
  902. */
  903. static int is_error_sane(int err)
  904. {
  905. if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
  906. err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
  907. return 0;
  908. return 1;
  909. }
  910. /**
  911. * ubi_eba_copy_leb - copy logical eraseblock.
  912. * @ubi: UBI device description object
  913. * @from: physical eraseblock number from where to copy
  914. * @to: physical eraseblock number where to copy
  915. * @vid_hdr: VID header of the @from physical eraseblock
  916. *
  917. * This function copies logical eraseblock from physical eraseblock @from to
  918. * physical eraseblock @to. The @vid_hdr buffer may be changed by this
  919. * function. Returns:
  920. * o %0 in case of success;
  921. * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
  922. * o a negative error code in case of failure.
  923. */
  924. int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
  925. struct ubi_vid_hdr *vid_hdr)
  926. {
  927. int err, vol_id, lnum, data_size, aldata_size, idx;
  928. struct ubi_volume *vol;
  929. uint32_t crc;
  930. vol_id = be32_to_cpu(vid_hdr->vol_id);
  931. lnum = be32_to_cpu(vid_hdr->lnum);
  932. dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
  933. if (vid_hdr->vol_type == UBI_VID_STATIC) {
  934. data_size = be32_to_cpu(vid_hdr->data_size);
  935. aldata_size = ALIGN(data_size, ubi->min_io_size);
  936. } else
  937. data_size = aldata_size =
  938. ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
  939. idx = vol_id2idx(ubi, vol_id);
  940. spin_lock(&ubi->volumes_lock);
  941. /*
  942. * Note, we may race with volume deletion, which means that the volume
  943. * this logical eraseblock belongs to might be being deleted. Since the
  944. * volume deletion un-maps all the volume's logical eraseblocks, it will
  945. * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
  946. */
  947. vol = ubi->volumes[idx];
  948. spin_unlock(&ubi->volumes_lock);
  949. if (!vol) {
  950. /* No need to do further work, cancel */
  951. dbg_wl("volume %d is being removed, cancel", vol_id);
  952. return MOVE_CANCEL_RACE;
  953. }
  954. /*
  955. * We do not want anybody to write to this logical eraseblock while we
  956. * are moving it, so lock it.
  957. *
  958. * Note, we are using non-waiting locking here, because we cannot sleep
  959. * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
  960. * unmapping the LEB which is mapped to the PEB we are going to move
  961. * (@from). This task locks the LEB and goes sleep in the
  962. * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
  963. * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
  964. * LEB is already locked, we just do not move it and return
  965. * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
  966. * we do not know the reasons of the contention - it may be just a
  967. * normal I/O on this LEB, so we want to re-try.
  968. */
  969. err = leb_write_trylock(ubi, vol_id, lnum);
  970. if (err) {
  971. dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
  972. return MOVE_RETRY;
  973. }
  974. /*
  975. * The LEB might have been put meanwhile, and the task which put it is
  976. * probably waiting on @ubi->move_mutex. No need to continue the work,
  977. * cancel it.
  978. */
  979. if (vol->eba_tbl[lnum] != from) {
  980. dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
  981. vol_id, lnum, from, vol->eba_tbl[lnum]);
  982. err = MOVE_CANCEL_RACE;
  983. goto out_unlock_leb;
  984. }
  985. /*
  986. * OK, now the LEB is locked and we can safely start moving it. Since
  987. * this function utilizes the @ubi->peb_buf buffer which is shared
  988. * with some other functions - we lock the buffer by taking the
  989. * @ubi->buf_mutex.
  990. */
  991. mutex_lock(&ubi->buf_mutex);
  992. dbg_wl("read %d bytes of data", aldata_size);
  993. err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
  994. if (err && err != UBI_IO_BITFLIPS) {
  995. ubi_warn(ubi, "error %d while reading data from PEB %d",
  996. err, from);
  997. err = MOVE_SOURCE_RD_ERR;
  998. goto out_unlock_buf;
  999. }
  1000. /*
  1001. * Now we have got to calculate how much data we have to copy. In
  1002. * case of a static volume it is fairly easy - the VID header contains
  1003. * the data size. In case of a dynamic volume it is more difficult - we
  1004. * have to read the contents, cut 0xFF bytes from the end and copy only
  1005. * the first part. We must do this to avoid writing 0xFF bytes as it
  1006. * may have some side-effects. And not only this. It is important not
  1007. * to include those 0xFFs to CRC because later the they may be filled
  1008. * by data.
  1009. */
  1010. if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
  1011. aldata_size = data_size =
  1012. ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
  1013. cond_resched();
  1014. crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
  1015. cond_resched();
  1016. /*
  1017. * It may turn out to be that the whole @from physical eraseblock
  1018. * contains only 0xFF bytes. Then we have to only write the VID header
  1019. * and do not write any data. This also means we should not set
  1020. * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
  1021. */
  1022. if (data_size > 0) {
  1023. vid_hdr->copy_flag = 1;
  1024. vid_hdr->data_size = cpu_to_be32(data_size);
  1025. vid_hdr->data_crc = cpu_to_be32(crc);
  1026. }
  1027. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  1028. err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
  1029. if (err) {
  1030. if (err == -EIO)
  1031. err = MOVE_TARGET_WR_ERR;
  1032. goto out_unlock_buf;
  1033. }
  1034. cond_resched();
  1035. /* Read the VID header back and check if it was written correctly */
  1036. err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
  1037. if (err) {
  1038. if (err != UBI_IO_BITFLIPS) {
  1039. ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
  1040. err, to);
  1041. if (is_error_sane(err))
  1042. err = MOVE_TARGET_RD_ERR;
  1043. } else
  1044. err = MOVE_TARGET_BITFLIPS;
  1045. goto out_unlock_buf;
  1046. }
  1047. if (data_size > 0) {
  1048. err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
  1049. if (err) {
  1050. if (err == -EIO)
  1051. err = MOVE_TARGET_WR_ERR;
  1052. goto out_unlock_buf;
  1053. }
  1054. cond_resched();
  1055. /*
  1056. * We've written the data and are going to read it back to make
  1057. * sure it was written correctly.
  1058. */
  1059. memset(ubi->peb_buf, 0xFF, aldata_size);
  1060. err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
  1061. if (err) {
  1062. if (err != UBI_IO_BITFLIPS) {
  1063. ubi_warn(ubi, "error %d while reading data back from PEB %d",
  1064. err, to);
  1065. if (is_error_sane(err))
  1066. err = MOVE_TARGET_RD_ERR;
  1067. } else
  1068. err = MOVE_TARGET_BITFLIPS;
  1069. goto out_unlock_buf;
  1070. }
  1071. cond_resched();
  1072. if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
  1073. ubi_warn(ubi, "read data back from PEB %d and it is different",
  1074. to);
  1075. err = -EINVAL;
  1076. goto out_unlock_buf;
  1077. }
  1078. }
  1079. ubi_assert(vol->eba_tbl[lnum] == from);
  1080. down_read(&ubi->fm_eba_sem);
  1081. vol->eba_tbl[lnum] = to;
  1082. up_read(&ubi->fm_eba_sem);
  1083. out_unlock_buf:
  1084. mutex_unlock(&ubi->buf_mutex);
  1085. out_unlock_leb:
  1086. leb_write_unlock(ubi, vol_id, lnum);
  1087. return err;
  1088. }
  1089. /**
  1090. * print_rsvd_warning - warn about not having enough reserved PEBs.
  1091. * @ubi: UBI device description object
  1092. *
  1093. * This is a helper function for 'ubi_eba_init()' which is called when UBI
  1094. * cannot reserve enough PEBs for bad block handling. This function makes a
  1095. * decision whether we have to print a warning or not. The algorithm is as
  1096. * follows:
  1097. * o if this is a new UBI image, then just print the warning
  1098. * o if this is an UBI image which has already been used for some time, print
  1099. * a warning only if we can reserve less than 10% of the expected amount of
  1100. * the reserved PEB.
  1101. *
  1102. * The idea is that when UBI is used, PEBs become bad, and the reserved pool
  1103. * of PEBs becomes smaller, which is normal and we do not want to scare users
  1104. * with a warning every time they attach the MTD device. This was an issue
  1105. * reported by real users.
  1106. */
  1107. static void print_rsvd_warning(struct ubi_device *ubi,
  1108. struct ubi_attach_info *ai)
  1109. {
  1110. /*
  1111. * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
  1112. * large number to distinguish between newly flashed and used images.
  1113. */
  1114. if (ai->max_sqnum > (1 << 18)) {
  1115. int min = ubi->beb_rsvd_level / 10;
  1116. if (!min)
  1117. min = 1;
  1118. if (ubi->beb_rsvd_pebs > min)
  1119. return;
  1120. }
  1121. ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
  1122. ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
  1123. if (ubi->corr_peb_count)
  1124. ubi_warn(ubi, "%d PEBs are corrupted and not used",
  1125. ubi->corr_peb_count);
  1126. }
  1127. /**
  1128. * self_check_eba - run a self check on the EBA table constructed by fastmap.
  1129. * @ubi: UBI device description object
  1130. * @ai_fastmap: UBI attach info object created by fastmap
  1131. * @ai_scan: UBI attach info object created by scanning
  1132. *
  1133. * Returns < 0 in case of an internal error, 0 otherwise.
  1134. * If a bad EBA table entry was found it will be printed out and
  1135. * ubi_assert() triggers.
  1136. */
  1137. int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
  1138. struct ubi_attach_info *ai_scan)
  1139. {
  1140. int i, j, num_volumes, ret = 0;
  1141. int **scan_eba, **fm_eba;
  1142. struct ubi_ainf_volume *av;
  1143. struct ubi_volume *vol;
  1144. struct ubi_ainf_peb *aeb;
  1145. struct rb_node *rb;
  1146. num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
  1147. scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
  1148. if (!scan_eba)
  1149. return -ENOMEM;
  1150. fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
  1151. if (!fm_eba) {
  1152. kfree(scan_eba);
  1153. return -ENOMEM;
  1154. }
  1155. for (i = 0; i < num_volumes; i++) {
  1156. vol = ubi->volumes[i];
  1157. if (!vol)
  1158. continue;
  1159. scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
  1160. GFP_KERNEL);
  1161. if (!scan_eba[i]) {
  1162. ret = -ENOMEM;
  1163. goto out_free;
  1164. }
  1165. fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
  1166. GFP_KERNEL);
  1167. if (!fm_eba[i]) {
  1168. ret = -ENOMEM;
  1169. goto out_free;
  1170. }
  1171. for (j = 0; j < vol->reserved_pebs; j++)
  1172. scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
  1173. av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
  1174. if (!av)
  1175. continue;
  1176. ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
  1177. scan_eba[i][aeb->lnum] = aeb->pnum;
  1178. av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
  1179. if (!av)
  1180. continue;
  1181. ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
  1182. fm_eba[i][aeb->lnum] = aeb->pnum;
  1183. for (j = 0; j < vol->reserved_pebs; j++) {
  1184. if (scan_eba[i][j] != fm_eba[i][j]) {
  1185. if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
  1186. fm_eba[i][j] == UBI_LEB_UNMAPPED)
  1187. continue;
  1188. ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
  1189. vol->vol_id, i, fm_eba[i][j],
  1190. scan_eba[i][j]);
  1191. ubi_assert(0);
  1192. }
  1193. }
  1194. }
  1195. out_free:
  1196. for (i = 0; i < num_volumes; i++) {
  1197. if (!ubi->volumes[i])
  1198. continue;
  1199. kfree(scan_eba[i]);
  1200. kfree(fm_eba[i]);
  1201. }
  1202. kfree(scan_eba);
  1203. kfree(fm_eba);
  1204. return ret;
  1205. }
  1206. /**
  1207. * ubi_eba_init - initialize the EBA sub-system using attaching information.
  1208. * @ubi: UBI device description object
  1209. * @ai: attaching information
  1210. *
  1211. * This function returns zero in case of success and a negative error code in
  1212. * case of failure.
  1213. */
  1214. int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
  1215. {
  1216. int i, j, err, num_volumes;
  1217. struct ubi_ainf_volume *av;
  1218. struct ubi_volume *vol;
  1219. struct ubi_ainf_peb *aeb;
  1220. struct rb_node *rb;
  1221. dbg_eba("initialize EBA sub-system");
  1222. spin_lock_init(&ubi->ltree_lock);
  1223. mutex_init(&ubi->alc_mutex);
  1224. ubi->ltree = RB_ROOT;
  1225. ubi->global_sqnum = ai->max_sqnum + 1;
  1226. num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
  1227. for (i = 0; i < num_volumes; i++) {
  1228. vol = ubi->volumes[i];
  1229. if (!vol)
  1230. continue;
  1231. cond_resched();
  1232. vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
  1233. GFP_KERNEL);
  1234. if (!vol->eba_tbl) {
  1235. err = -ENOMEM;
  1236. goto out_free;
  1237. }
  1238. for (j = 0; j < vol->reserved_pebs; j++)
  1239. vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
  1240. av = ubi_find_av(ai, idx2vol_id(ubi, i));
  1241. if (!av)
  1242. continue;
  1243. ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
  1244. if (aeb->lnum >= vol->reserved_pebs)
  1245. /*
  1246. * This may happen in case of an unclean reboot
  1247. * during re-size.
  1248. */
  1249. ubi_move_aeb_to_list(av, aeb, &ai->erase);
  1250. else
  1251. vol->eba_tbl[aeb->lnum] = aeb->pnum;
  1252. }
  1253. }
  1254. if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
  1255. ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
  1256. ubi->avail_pebs, EBA_RESERVED_PEBS);
  1257. if (ubi->corr_peb_count)
  1258. ubi_err(ubi, "%d PEBs are corrupted and not used",
  1259. ubi->corr_peb_count);
  1260. err = -ENOSPC;
  1261. goto out_free;
  1262. }
  1263. ubi->avail_pebs -= EBA_RESERVED_PEBS;
  1264. ubi->rsvd_pebs += EBA_RESERVED_PEBS;
  1265. if (ubi->bad_allowed) {
  1266. ubi_calculate_reserved(ubi);
  1267. if (ubi->avail_pebs < ubi->beb_rsvd_level) {
  1268. /* No enough free physical eraseblocks */
  1269. ubi->beb_rsvd_pebs = ubi->avail_pebs;
  1270. print_rsvd_warning(ubi, ai);
  1271. } else
  1272. ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
  1273. ubi->avail_pebs -= ubi->beb_rsvd_pebs;
  1274. ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
  1275. }
  1276. dbg_eba("EBA sub-system is initialized");
  1277. return 0;
  1278. out_free:
  1279. for (i = 0; i < num_volumes; i++) {
  1280. if (!ubi->volumes[i])
  1281. continue;
  1282. kfree(ubi->volumes[i]->eba_tbl);
  1283. ubi->volumes[i]->eba_tbl = NULL;
  1284. }
  1285. return err;
  1286. }