ubispl.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925
  1. // SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
  2. /*
  3. * Copyright (c) Thomas Gleixner <tglx@linutronix.de>
  4. *
  5. * The parts taken from the kernel implementation are:
  6. *
  7. * Copyright (c) International Business Machines Corp., 2006
  8. */
  9. #include <common.h>
  10. #include <errno.h>
  11. #include <ubispl.h>
  12. #include <linux/crc32.h>
  13. #include "ubispl.h"
  14. /**
  15. * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
  16. * @ubi: UBI device description object
  17. */
  18. static size_t ubi_calc_fm_size(struct ubi_scan_info *ubi)
  19. {
  20. size_t size;
  21. size = sizeof(struct ubi_fm_sb) +
  22. sizeof(struct ubi_fm_hdr) +
  23. sizeof(struct ubi_fm_scan_pool) +
  24. sizeof(struct ubi_fm_scan_pool) +
  25. (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
  26. (sizeof(struct ubi_fm_eba) +
  27. (ubi->peb_count * sizeof(__be32))) +
  28. sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
  29. return roundup(size, ubi->leb_size);
  30. }
  31. static int ubi_io_read(struct ubi_scan_info *ubi, void *buf, int pnum,
  32. unsigned long from, unsigned long len)
  33. {
  34. return ubi->read(pnum + ubi->peb_offset, from, len, buf);
  35. }
  36. static int ubi_io_is_bad(struct ubi_scan_info *ubi, int peb)
  37. {
  38. return peb >= ubi->peb_count || peb < 0;
  39. }
  40. static int ubi_io_read_vid_hdr(struct ubi_scan_info *ubi, int pnum,
  41. struct ubi_vid_hdr *vh, int unused)
  42. {
  43. u32 magic;
  44. int res;
  45. /* No point in rescanning a corrupt block */
  46. if (test_bit(pnum, ubi->corrupt))
  47. return UBI_IO_BAD_HDR;
  48. /*
  49. * If the block has been scanned already, no need to rescan
  50. */
  51. if (test_and_set_bit(pnum, ubi->scanned))
  52. return 0;
  53. res = ubi_io_read(ubi, vh, pnum, ubi->vid_offset, sizeof(*vh));
  54. /*
  55. * Bad block, unrecoverable ECC error, skip the block
  56. */
  57. if (res) {
  58. ubi_dbg("Skipping bad or unreadable block %d", pnum);
  59. vh->magic = 0;
  60. generic_set_bit(pnum, ubi->corrupt);
  61. return res;
  62. }
  63. /* Magic number available ? */
  64. magic = be32_to_cpu(vh->magic);
  65. if (magic != UBI_VID_HDR_MAGIC) {
  66. generic_set_bit(pnum, ubi->corrupt);
  67. if (magic == 0xffffffff)
  68. return UBI_IO_FF;
  69. ubi_msg("Bad magic in block 0%d %08x", pnum, magic);
  70. return UBI_IO_BAD_HDR;
  71. }
  72. /* Header CRC correct ? */
  73. if (crc32(UBI_CRC32_INIT, vh, UBI_VID_HDR_SIZE_CRC) !=
  74. be32_to_cpu(vh->hdr_crc)) {
  75. ubi_msg("Bad CRC in block 0%d", pnum);
  76. generic_set_bit(pnum, ubi->corrupt);
  77. return UBI_IO_BAD_HDR;
  78. }
  79. ubi_dbg("RV: pnum: %i sqnum %llu", pnum, be64_to_cpu(vh->sqnum));
  80. return 0;
  81. }
  82. static int ubi_rescan_fm_vid_hdr(struct ubi_scan_info *ubi,
  83. struct ubi_vid_hdr *vh,
  84. u32 fm_pnum, u32 fm_vol_id, u32 fm_lnum)
  85. {
  86. int res;
  87. if (ubi_io_is_bad(ubi, fm_pnum))
  88. return -EINVAL;
  89. res = ubi_io_read_vid_hdr(ubi, fm_pnum, vh, 0);
  90. if (!res) {
  91. /* Check volume id, volume type and lnum */
  92. if (be32_to_cpu(vh->vol_id) == fm_vol_id &&
  93. vh->vol_type == UBI_VID_STATIC &&
  94. be32_to_cpu(vh->lnum) == fm_lnum)
  95. return 0;
  96. ubi_dbg("RS: PEB %u vol: %u : %u typ %u lnum %u %u",
  97. fm_pnum, fm_vol_id, vh->vol_type,
  98. be32_to_cpu(vh->vol_id),
  99. fm_lnum, be32_to_cpu(vh->lnum));
  100. }
  101. return res;
  102. }
  103. /* Insert the logic block into the volume info */
  104. static int ubi_add_peb_to_vol(struct ubi_scan_info *ubi,
  105. struct ubi_vid_hdr *vh, u32 vol_id,
  106. u32 pnum, u32 lnum)
  107. {
  108. struct ubi_vol_info *vi = ubi->volinfo + vol_id;
  109. u32 *ltp;
  110. /*
  111. * If the volume is larger than expected, yell and give up :(
  112. */
  113. if (lnum >= UBI_MAX_VOL_LEBS) {
  114. ubi_warn("Vol: %u LEB %d > %d", vol_id, lnum, UBI_MAX_VOL_LEBS);
  115. return -EINVAL;
  116. }
  117. ubi_dbg("SC: Add PEB %u to Vol %u as LEB %u fnd %d sc %d",
  118. pnum, vol_id, lnum, !!test_bit(lnum, vi->found),
  119. !!test_bit(pnum, ubi->scanned));
  120. /* Points to the translation entry */
  121. ltp = vi->lebs_to_pebs + lnum;
  122. /* If the block is already assigned, check sqnum */
  123. if (__test_and_set_bit(lnum, vi->found)) {
  124. u32 cur_pnum = *ltp;
  125. struct ubi_vid_hdr *cur = ubi->blockinfo + cur_pnum;
  126. /*
  127. * If the current block hase not yet been scanned, we
  128. * need to do that. The other block might be stale or
  129. * the current block corrupted and the FM not yet
  130. * updated.
  131. */
  132. if (!test_bit(cur_pnum, ubi->scanned)) {
  133. /*
  134. * If the scan fails, we use the valid block
  135. */
  136. if (ubi_rescan_fm_vid_hdr(ubi, cur, cur_pnum, vol_id,
  137. lnum)) {
  138. *ltp = pnum;
  139. return 0;
  140. }
  141. }
  142. /*
  143. * Should not happen ....
  144. */
  145. if (test_bit(cur_pnum, ubi->corrupt)) {
  146. *ltp = pnum;
  147. return 0;
  148. }
  149. ubi_dbg("Vol %u LEB %u PEB %u->sqnum %llu NPEB %u->sqnum %llu",
  150. vol_id, lnum, cur_pnum, be64_to_cpu(cur->sqnum), pnum,
  151. be64_to_cpu(vh->sqnum));
  152. /*
  153. * Compare sqnum and take the newer one
  154. */
  155. if (be64_to_cpu(cur->sqnum) < be64_to_cpu(vh->sqnum))
  156. *ltp = pnum;
  157. } else {
  158. *ltp = pnum;
  159. if (lnum > vi->last_block)
  160. vi->last_block = lnum;
  161. }
  162. return 0;
  163. }
  164. static int ubi_scan_vid_hdr(struct ubi_scan_info *ubi, struct ubi_vid_hdr *vh,
  165. u32 pnum)
  166. {
  167. u32 vol_id, lnum;
  168. int res;
  169. if (ubi_io_is_bad(ubi, pnum))
  170. return -EINVAL;
  171. res = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
  172. if (res)
  173. return res;
  174. /* Get volume id */
  175. vol_id = be32_to_cpu(vh->vol_id);
  176. /* If this is the fastmap anchor, return right away */
  177. if (vol_id == UBI_FM_SB_VOLUME_ID)
  178. return ubi->fm_enabled ? UBI_FASTMAP_ANCHOR : 0;
  179. /* We only care about static volumes with an id < UBI_SPL_VOL_IDS */
  180. if (vol_id >= UBI_SPL_VOL_IDS || vh->vol_type != UBI_VID_STATIC)
  181. return 0;
  182. /* We are only interested in the volumes to load */
  183. if (!test_bit(vol_id, ubi->toload))
  184. return 0;
  185. lnum = be32_to_cpu(vh->lnum);
  186. return ubi_add_peb_to_vol(ubi, vh, vol_id, pnum, lnum);
  187. }
  188. static int assign_aeb_to_av(struct ubi_scan_info *ubi, u32 pnum, u32 lnum,
  189. u32 vol_id, u32 vol_type, u32 used)
  190. {
  191. struct ubi_vid_hdr *vh;
  192. if (ubi_io_is_bad(ubi, pnum))
  193. return -EINVAL;
  194. ubi->fastmap_pebs++;
  195. if (vol_id >= UBI_SPL_VOL_IDS || vol_type != UBI_STATIC_VOLUME)
  196. return 0;
  197. /* We are only interested in the volumes to load */
  198. if (!test_bit(vol_id, ubi->toload))
  199. return 0;
  200. vh = ubi->blockinfo + pnum;
  201. return ubi_scan_vid_hdr(ubi, vh, pnum);
  202. }
  203. static int scan_pool(struct ubi_scan_info *ubi, __be32 *pebs, int pool_size)
  204. {
  205. struct ubi_vid_hdr *vh;
  206. u32 pnum;
  207. int i;
  208. ubi_dbg("Scanning pool size: %d", pool_size);
  209. for (i = 0; i < pool_size; i++) {
  210. pnum = be32_to_cpu(pebs[i]);
  211. if (ubi_io_is_bad(ubi, pnum)) {
  212. ubi_err("FM: Bad PEB in fastmap pool! %u", pnum);
  213. return UBI_BAD_FASTMAP;
  214. }
  215. vh = ubi->blockinfo + pnum;
  216. /*
  217. * We allow the scan to fail here. The loader will notice
  218. * and look for a replacement.
  219. */
  220. ubi_scan_vid_hdr(ubi, vh, pnum);
  221. }
  222. return 0;
  223. }
  224. /*
  225. * Fastmap code is stolen from Linux kernel and this stub structure is used
  226. * to make it happy.
  227. */
  228. struct ubi_attach_info {
  229. int i;
  230. };
  231. static int ubi_attach_fastmap(struct ubi_scan_info *ubi,
  232. struct ubi_attach_info *ai,
  233. struct ubi_fastmap_layout *fm)
  234. {
  235. struct ubi_fm_hdr *fmhdr;
  236. struct ubi_fm_scan_pool *fmpl1, *fmpl2;
  237. struct ubi_fm_ec *fmec;
  238. struct ubi_fm_volhdr *fmvhdr;
  239. struct ubi_fm_eba *fm_eba;
  240. int ret, i, j, pool_size, wl_pool_size;
  241. size_t fm_pos = 0, fm_size = ubi->fm_size;
  242. void *fm_raw = ubi->fm_buf;
  243. memset(ubi->fm_used, 0, sizeof(ubi->fm_used));
  244. fm_pos += sizeof(struct ubi_fm_sb);
  245. if (fm_pos >= fm_size)
  246. goto fail_bad;
  247. fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
  248. fm_pos += sizeof(*fmhdr);
  249. if (fm_pos >= fm_size)
  250. goto fail_bad;
  251. if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
  252. ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
  253. be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
  254. goto fail_bad;
  255. }
  256. fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
  257. fm_pos += sizeof(*fmpl1);
  258. if (fm_pos >= fm_size)
  259. goto fail_bad;
  260. if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
  261. ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
  262. be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
  263. goto fail_bad;
  264. }
  265. fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
  266. fm_pos += sizeof(*fmpl2);
  267. if (fm_pos >= fm_size)
  268. goto fail_bad;
  269. if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
  270. ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
  271. be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
  272. goto fail_bad;
  273. }
  274. pool_size = be16_to_cpu(fmpl1->size);
  275. wl_pool_size = be16_to_cpu(fmpl2->size);
  276. fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
  277. fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
  278. if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
  279. ubi_err("bad pool size: %i", pool_size);
  280. goto fail_bad;
  281. }
  282. if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
  283. ubi_err("bad WL pool size: %i", wl_pool_size);
  284. goto fail_bad;
  285. }
  286. if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
  287. fm->max_pool_size < 0) {
  288. ubi_err("bad maximal pool size: %i", fm->max_pool_size);
  289. goto fail_bad;
  290. }
  291. if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
  292. fm->max_wl_pool_size < 0) {
  293. ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
  294. goto fail_bad;
  295. }
  296. /* read EC values from free list */
  297. for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
  298. fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
  299. fm_pos += sizeof(*fmec);
  300. if (fm_pos >= fm_size)
  301. goto fail_bad;
  302. }
  303. /* read EC values from used list */
  304. for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
  305. fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
  306. fm_pos += sizeof(*fmec);
  307. if (fm_pos >= fm_size)
  308. goto fail_bad;
  309. generic_set_bit(be32_to_cpu(fmec->pnum), ubi->fm_used);
  310. }
  311. /* read EC values from scrub list */
  312. for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
  313. fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
  314. fm_pos += sizeof(*fmec);
  315. if (fm_pos >= fm_size)
  316. goto fail_bad;
  317. }
  318. /* read EC values from erase list */
  319. for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
  320. fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
  321. fm_pos += sizeof(*fmec);
  322. if (fm_pos >= fm_size)
  323. goto fail_bad;
  324. }
  325. /* Iterate over all volumes and read their EBA table */
  326. for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
  327. u32 vol_id, vol_type, used, reserved;
  328. fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
  329. fm_pos += sizeof(*fmvhdr);
  330. if (fm_pos >= fm_size)
  331. goto fail_bad;
  332. if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
  333. ubi_err("bad fastmap vol header magic: 0x%x, " \
  334. "expected: 0x%x",
  335. be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
  336. goto fail_bad;
  337. }
  338. vol_id = be32_to_cpu(fmvhdr->vol_id);
  339. vol_type = fmvhdr->vol_type;
  340. used = be32_to_cpu(fmvhdr->used_ebs);
  341. fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
  342. fm_pos += sizeof(*fm_eba);
  343. fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
  344. if (fm_pos >= fm_size)
  345. goto fail_bad;
  346. if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
  347. ubi_err("bad fastmap EBA header magic: 0x%x, " \
  348. "expected: 0x%x",
  349. be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
  350. goto fail_bad;
  351. }
  352. reserved = be32_to_cpu(fm_eba->reserved_pebs);
  353. ubi_dbg("FA: vol %u used %u res: %u", vol_id, used, reserved);
  354. for (j = 0; j < reserved; j++) {
  355. int pnum = be32_to_cpu(fm_eba->pnum[j]);
  356. if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
  357. continue;
  358. if (!__test_and_clear_bit(pnum, ubi->fm_used))
  359. continue;
  360. /*
  361. * We only handle static volumes so used_ebs
  362. * needs to be handed in. And we do not assign
  363. * the reserved blocks
  364. */
  365. if (j >= used)
  366. continue;
  367. ret = assign_aeb_to_av(ubi, pnum, j, vol_id,
  368. vol_type, used);
  369. if (!ret)
  370. continue;
  371. /*
  372. * Nasty: The fastmap claims that the volume
  373. * has one block more than it, but that block
  374. * is always empty and the other blocks have
  375. * the correct number of total LEBs in the
  376. * headers. Deal with it.
  377. */
  378. if (ret != UBI_IO_FF && j != used - 1)
  379. goto fail_bad;
  380. ubi_dbg("FA: Vol: %u Ignoring empty LEB %d of %d",
  381. vol_id, j, used);
  382. }
  383. }
  384. ret = scan_pool(ubi, fmpl1->pebs, pool_size);
  385. if (ret)
  386. goto fail;
  387. ret = scan_pool(ubi, fmpl2->pebs, wl_pool_size);
  388. if (ret)
  389. goto fail;
  390. #ifdef CHECKME
  391. /*
  392. * If fastmap is leaking PEBs (must not happen), raise a
  393. * fat warning and fall back to scanning mode.
  394. * We do this here because in ubi_wl_init() it's too late
  395. * and we cannot fall back to scanning.
  396. */
  397. if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
  398. ai->bad_peb_count - fm->used_blocks))
  399. goto fail_bad;
  400. #endif
  401. return 0;
  402. fail_bad:
  403. ret = UBI_BAD_FASTMAP;
  404. fail:
  405. return ret;
  406. }
  407. static int ubi_scan_fastmap(struct ubi_scan_info *ubi,
  408. struct ubi_attach_info *ai,
  409. int fm_anchor)
  410. {
  411. struct ubi_fm_sb *fmsb, *fmsb2;
  412. struct ubi_vid_hdr *vh;
  413. struct ubi_fastmap_layout *fm;
  414. int i, used_blocks, pnum, ret = 0;
  415. size_t fm_size;
  416. __be32 crc, tmp_crc;
  417. unsigned long long sqnum = 0;
  418. fmsb = &ubi->fm_sb;
  419. fm = &ubi->fm_layout;
  420. ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
  421. if (ret && ret != UBI_IO_BITFLIPS)
  422. goto free_fm_sb;
  423. else if (ret == UBI_IO_BITFLIPS)
  424. fm->to_be_tortured[0] = 1;
  425. if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
  426. ubi_err("bad super block magic: 0x%x, expected: 0x%x",
  427. be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
  428. ret = UBI_BAD_FASTMAP;
  429. goto free_fm_sb;
  430. }
  431. if (fmsb->version != UBI_FM_FMT_VERSION) {
  432. ubi_err("bad fastmap version: %i, expected: %i",
  433. fmsb->version, UBI_FM_FMT_VERSION);
  434. ret = UBI_BAD_FASTMAP;
  435. goto free_fm_sb;
  436. }
  437. used_blocks = be32_to_cpu(fmsb->used_blocks);
  438. if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
  439. ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
  440. ret = UBI_BAD_FASTMAP;
  441. goto free_fm_sb;
  442. }
  443. fm_size = ubi->leb_size * used_blocks;
  444. if (fm_size != ubi->fm_size) {
  445. ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
  446. ubi->fm_size);
  447. ret = UBI_BAD_FASTMAP;
  448. goto free_fm_sb;
  449. }
  450. vh = &ubi->fm_vh;
  451. for (i = 0; i < used_blocks; i++) {
  452. pnum = be32_to_cpu(fmsb->block_loc[i]);
  453. if (ubi_io_is_bad(ubi, pnum)) {
  454. ret = UBI_BAD_FASTMAP;
  455. goto free_hdr;
  456. }
  457. #ifdef LATER
  458. int image_seq;
  459. ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
  460. if (ret && ret != UBI_IO_BITFLIPS) {
  461. ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
  462. i, pnum);
  463. if (ret > 0)
  464. ret = UBI_BAD_FASTMAP;
  465. goto free_hdr;
  466. } else if (ret == UBI_IO_BITFLIPS)
  467. fm->to_be_tortured[i] = 1;
  468. image_seq = be32_to_cpu(ech->image_seq);
  469. if (!ubi->image_seq)
  470. ubi->image_seq = image_seq;
  471. /*
  472. * Older UBI implementations have image_seq set to zero, so
  473. * we shouldn't fail if image_seq == 0.
  474. */
  475. if (image_seq && (image_seq != ubi->image_seq)) {
  476. ubi_err("wrong image seq:%d instead of %d",
  477. be32_to_cpu(ech->image_seq), ubi->image_seq);
  478. ret = UBI_BAD_FASTMAP;
  479. goto free_hdr;
  480. }
  481. #endif
  482. ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
  483. if (ret && ret != UBI_IO_BITFLIPS) {
  484. ubi_err("unable to read fastmap block# %i (PEB: %i)",
  485. i, pnum);
  486. goto free_hdr;
  487. }
  488. /*
  489. * Mainline code rescans the anchor header. We've done
  490. * that already so we merily copy it over.
  491. */
  492. if (pnum == fm_anchor)
  493. memcpy(vh, ubi->blockinfo + pnum, sizeof(*fm));
  494. if (i == 0) {
  495. if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
  496. ubi_err("bad fastmap anchor vol_id: 0x%x," \
  497. " expected: 0x%x",
  498. be32_to_cpu(vh->vol_id),
  499. UBI_FM_SB_VOLUME_ID);
  500. ret = UBI_BAD_FASTMAP;
  501. goto free_hdr;
  502. }
  503. } else {
  504. if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
  505. ubi_err("bad fastmap data vol_id: 0x%x," \
  506. " expected: 0x%x",
  507. be32_to_cpu(vh->vol_id),
  508. UBI_FM_DATA_VOLUME_ID);
  509. ret = UBI_BAD_FASTMAP;
  510. goto free_hdr;
  511. }
  512. }
  513. if (sqnum < be64_to_cpu(vh->sqnum))
  514. sqnum = be64_to_cpu(vh->sqnum);
  515. ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
  516. ubi->leb_start, ubi->leb_size);
  517. if (ret && ret != UBI_IO_BITFLIPS) {
  518. ubi_err("unable to read fastmap block# %i (PEB: %i, " \
  519. "err: %i)", i, pnum, ret);
  520. goto free_hdr;
  521. }
  522. }
  523. fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
  524. tmp_crc = be32_to_cpu(fmsb2->data_crc);
  525. fmsb2->data_crc = 0;
  526. crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
  527. if (crc != tmp_crc) {
  528. ubi_err("fastmap data CRC is invalid");
  529. ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
  530. ret = UBI_BAD_FASTMAP;
  531. goto free_hdr;
  532. }
  533. fmsb2->sqnum = sqnum;
  534. fm->used_blocks = used_blocks;
  535. ret = ubi_attach_fastmap(ubi, ai, fm);
  536. if (ret) {
  537. if (ret > 0)
  538. ret = UBI_BAD_FASTMAP;
  539. goto free_hdr;
  540. }
  541. ubi->fm = fm;
  542. ubi->fm_pool.max_size = ubi->fm->max_pool_size;
  543. ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
  544. ubi_msg("attached by fastmap %uMB %u blocks",
  545. ubi->fsize_mb, ubi->peb_count);
  546. ubi_dbg("fastmap pool size: %d", ubi->fm_pool.max_size);
  547. ubi_dbg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
  548. out:
  549. if (ret)
  550. ubi_err("Attach by fastmap failed, doing a full scan!");
  551. return ret;
  552. free_hdr:
  553. free_fm_sb:
  554. goto out;
  555. }
  556. /*
  557. * Scan the flash and attempt to attach via fastmap
  558. */
  559. static void ipl_scan(struct ubi_scan_info *ubi)
  560. {
  561. unsigned int pnum;
  562. int res;
  563. /*
  564. * Scan first for the fastmap super block
  565. */
  566. for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
  567. res = ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum);
  568. /*
  569. * We ignore errors here as we are meriliy scanning
  570. * the headers.
  571. */
  572. if (res != UBI_FASTMAP_ANCHOR)
  573. continue;
  574. /*
  575. * If fastmap is disabled, continue scanning. This
  576. * might happen because the previous attempt failed or
  577. * the caller disabled it right away.
  578. */
  579. if (!ubi->fm_enabled)
  580. continue;
  581. /*
  582. * Try to attach the fastmap, if that fails continue
  583. * scanning.
  584. */
  585. if (!ubi_scan_fastmap(ubi, NULL, pnum))
  586. return;
  587. /*
  588. * Fastmap failed. Clear everything we have and start
  589. * over. We are paranoid and do not trust anything.
  590. */
  591. memset(ubi->volinfo, 0, sizeof(ubi->volinfo));
  592. pnum = 0;
  593. break;
  594. }
  595. /*
  596. * Continue scanning, ignore errors, we might find what we are
  597. * looking for,
  598. */
  599. for (; pnum < ubi->peb_count; pnum++)
  600. ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum);
  601. }
  602. /*
  603. * Load a logical block of a volume into memory
  604. */
  605. static int ubi_load_block(struct ubi_scan_info *ubi, uint8_t *laddr,
  606. struct ubi_vol_info *vi, u32 vol_id, u32 lnum,
  607. u32 last)
  608. {
  609. struct ubi_vid_hdr *vh, *vrepl;
  610. u32 pnum, crc, dlen;
  611. retry:
  612. /*
  613. * If this is a fastmap run, we try to rescan full, otherwise
  614. * we simply give up.
  615. */
  616. if (!test_bit(lnum, vi->found)) {
  617. ubi_warn("LEB %d of %d is missing", lnum, last);
  618. return -EINVAL;
  619. }
  620. pnum = vi->lebs_to_pebs[lnum];
  621. ubi_dbg("Load vol %u LEB %u PEB %u", vol_id, lnum, pnum);
  622. if (ubi_io_is_bad(ubi, pnum)) {
  623. ubi_warn("Corrupted mapping block %d PB %d\n", lnum, pnum);
  624. return -EINVAL;
  625. }
  626. if (test_bit(pnum, ubi->corrupt))
  627. goto find_other;
  628. /*
  629. * Lets try to read that block
  630. */
  631. vh = ubi->blockinfo + pnum;
  632. if (!test_bit(pnum, ubi->scanned)) {
  633. ubi_warn("Vol: %u LEB %u PEB %u not yet scanned", vol_id,
  634. lnum, pnum);
  635. if (ubi_rescan_fm_vid_hdr(ubi, vh, pnum, vol_id, lnum))
  636. goto find_other;
  637. }
  638. /*
  639. * Check, if the total number of blocks is correct
  640. */
  641. if (be32_to_cpu(vh->used_ebs) != last) {
  642. ubi_dbg("Block count missmatch.");
  643. ubi_dbg("vh->used_ebs: %d nrblocks: %d",
  644. be32_to_cpu(vh->used_ebs), last);
  645. generic_set_bit(pnum, ubi->corrupt);
  646. goto find_other;
  647. }
  648. /*
  649. * Get the data length of this block.
  650. */
  651. dlen = be32_to_cpu(vh->data_size);
  652. /*
  653. * Read the data into RAM. We ignore the return value
  654. * here as the only thing which might go wrong are
  655. * bitflips. Try nevertheless.
  656. */
  657. ubi_io_read(ubi, laddr, pnum, ubi->leb_start, dlen);
  658. /* Calculate CRC over the data */
  659. crc = crc32(UBI_CRC32_INIT, laddr, dlen);
  660. if (crc != be32_to_cpu(vh->data_crc)) {
  661. ubi_warn("Vol: %u LEB %u PEB %u data CRC failure", vol_id,
  662. lnum, pnum);
  663. generic_set_bit(pnum, ubi->corrupt);
  664. goto find_other;
  665. }
  666. /* We are good. Return the data length we read */
  667. return dlen;
  668. find_other:
  669. ubi_dbg("Find replacement for LEB %u PEB %u", lnum, pnum);
  670. generic_clear_bit(lnum, vi->found);
  671. vrepl = NULL;
  672. for (pnum = 0; pnum < ubi->peb_count; pnum++) {
  673. struct ubi_vid_hdr *tmp = ubi->blockinfo + pnum;
  674. u32 t_vol_id = be32_to_cpu(tmp->vol_id);
  675. u32 t_lnum = be32_to_cpu(tmp->lnum);
  676. if (test_bit(pnum, ubi->corrupt))
  677. continue;
  678. if (t_vol_id != vol_id || t_lnum != lnum)
  679. continue;
  680. if (!test_bit(pnum, ubi->scanned)) {
  681. ubi_warn("Vol: %u LEB %u PEB %u not yet scanned",
  682. vol_id, lnum, pnum);
  683. if (ubi_rescan_fm_vid_hdr(ubi, tmp, pnum, vol_id, lnum))
  684. continue;
  685. }
  686. /*
  687. * We found one. If its the first, assign it otherwise
  688. * compare the sqnum
  689. */
  690. generic_set_bit(lnum, vi->found);
  691. if (!vrepl) {
  692. vrepl = tmp;
  693. continue;
  694. }
  695. if (be64_to_cpu(vrepl->sqnum) < be64_to_cpu(tmp->sqnum))
  696. vrepl = tmp;
  697. }
  698. if (vrepl) {
  699. /* Update the vi table */
  700. pnum = vrepl - ubi->blockinfo;
  701. vi->lebs_to_pebs[lnum] = pnum;
  702. ubi_dbg("Trying PEB %u for LEB %u", pnum, lnum);
  703. vh = vrepl;
  704. }
  705. goto retry;
  706. }
  707. /*
  708. * Load a volume into RAM
  709. */
  710. static int ipl_load(struct ubi_scan_info *ubi, const u32 vol_id, uint8_t *laddr)
  711. {
  712. struct ubi_vol_info *vi;
  713. u32 lnum, last, len;
  714. if (vol_id >= UBI_SPL_VOL_IDS)
  715. return -EINVAL;
  716. len = 0;
  717. vi = ubi->volinfo + vol_id;
  718. last = vi->last_block + 1;
  719. /* Read the blocks to RAM, check CRC */
  720. for (lnum = 0 ; lnum < last; lnum++) {
  721. int res = ubi_load_block(ubi, laddr, vi, vol_id, lnum, last);
  722. if (res < 0) {
  723. ubi_warn("Failed to load volume %u", vol_id);
  724. return res;
  725. }
  726. /* res is the data length of the read block */
  727. laddr += res;
  728. len += res;
  729. }
  730. return len;
  731. }
  732. int ubispl_load_volumes(struct ubispl_info *info, struct ubispl_load *lvols,
  733. int nrvols)
  734. {
  735. struct ubi_scan_info *ubi = info->ubi;
  736. int res, i, fastmap = info->fastmap;
  737. u32 fsize;
  738. retry:
  739. /*
  740. * We do a partial initializiation of @ubi. Cleaning fm_buf is
  741. * not necessary.
  742. */
  743. memset(ubi, 0, offsetof(struct ubi_scan_info, fm_buf));
  744. ubi->read = info->read;
  745. /* Precalculate the offsets */
  746. ubi->vid_offset = info->vid_offset;
  747. ubi->leb_start = info->leb_start;
  748. ubi->leb_size = info->peb_size - ubi->leb_start;
  749. ubi->peb_count = info->peb_count;
  750. ubi->peb_offset = info->peb_offset;
  751. fsize = info->peb_size * info->peb_count;
  752. ubi->fsize_mb = fsize >> 20;
  753. /* Fastmap init */
  754. ubi->fm_size = ubi_calc_fm_size(ubi);
  755. ubi->fm_enabled = fastmap;
  756. for (i = 0; i < nrvols; i++) {
  757. struct ubispl_load *lv = lvols + i;
  758. generic_set_bit(lv->vol_id, ubi->toload);
  759. }
  760. ipl_scan(ubi);
  761. for (i = 0; i < nrvols; i++) {
  762. struct ubispl_load *lv = lvols + i;
  763. ubi_msg("Loading VolId #%d", lv->vol_id);
  764. res = ipl_load(ubi, lv->vol_id, lv->load_addr);
  765. if (res < 0) {
  766. if (fastmap) {
  767. fastmap = 0;
  768. goto retry;
  769. }
  770. ubi_warn("Failed");
  771. return res;
  772. }
  773. }
  774. return 0;
  775. }