zmap.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698
  1. // SPDX-License-Identifier: GPL-2.0+
  2. #include "internal.h"
  3. static int z_erofs_do_map_blocks(struct erofs_inode *vi,
  4. struct erofs_map_blocks *map,
  5. int flags);
  6. int z_erofs_fill_inode(struct erofs_inode *vi)
  7. {
  8. if (!erofs_sb_has_big_pcluster() &&
  9. !erofs_sb_has_ztailpacking() && !erofs_sb_has_fragments() &&
  10. vi->datalayout == EROFS_INODE_COMPRESSED_FULL) {
  11. vi->z_advise = 0;
  12. vi->z_algorithmtype[0] = 0;
  13. vi->z_algorithmtype[1] = 0;
  14. vi->z_logical_clusterbits = sbi.blkszbits;
  15. vi->flags |= EROFS_I_Z_INITED;
  16. }
  17. return 0;
  18. }
  19. static int z_erofs_fill_inode_lazy(struct erofs_inode *vi)
  20. {
  21. int ret;
  22. erofs_off_t pos;
  23. struct z_erofs_map_header *h;
  24. char buf[sizeof(struct z_erofs_map_header)];
  25. if (vi->flags & EROFS_I_Z_INITED)
  26. return 0;
  27. pos = round_up(iloc(vi->nid) + vi->inode_isize + vi->xattr_isize, 8);
  28. ret = erofs_dev_read(0, buf, pos, sizeof(buf));
  29. if (ret < 0)
  30. return -EIO;
  31. h = (struct z_erofs_map_header *)buf;
  32. /*
  33. * if the highest bit of the 8-byte map header is set, the whole file
  34. * is stored in the packed inode. The rest bits keeps z_fragmentoff.
  35. */
  36. if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
  37. vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
  38. vi->fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
  39. vi->z_tailextent_headlcn = 0;
  40. goto out;
  41. }
  42. vi->z_advise = le16_to_cpu(h->h_advise);
  43. vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
  44. vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
  45. if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) {
  46. erofs_err("unknown compression format %u for nid %llu",
  47. vi->z_algorithmtype[0], (unsigned long long)vi->nid);
  48. return -EOPNOTSUPP;
  49. }
  50. vi->z_logical_clusterbits = sbi.blkszbits + (h->h_clusterbits & 7);
  51. if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
  52. !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
  53. !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
  54. erofs_err("big pcluster head1/2 of compact indexes should be consistent for nid %llu",
  55. vi->nid * 1ULL);
  56. return -EFSCORRUPTED;
  57. }
  58. if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
  59. struct erofs_map_blocks map = { .index = UINT_MAX };
  60. vi->idata_size = le16_to_cpu(h->h_idata_size);
  61. ret = z_erofs_do_map_blocks(vi, &map,
  62. EROFS_GET_BLOCKS_FINDTAIL);
  63. if (!map.m_plen ||
  64. erofs_blkoff(map.m_pa) + map.m_plen > erofs_blksiz()) {
  65. erofs_err("invalid tail-packing pclustersize %llu",
  66. map.m_plen | 0ULL);
  67. return -EFSCORRUPTED;
  68. }
  69. if (ret < 0)
  70. return ret;
  71. }
  72. if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
  73. !(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
  74. struct erofs_map_blocks map = { .index = UINT_MAX };
  75. vi->fragmentoff = le32_to_cpu(h->h_fragmentoff);
  76. ret = z_erofs_do_map_blocks(vi, &map,
  77. EROFS_GET_BLOCKS_FINDTAIL);
  78. if (ret < 0)
  79. return ret;
  80. }
  81. out:
  82. vi->flags |= EROFS_I_Z_INITED;
  83. return 0;
  84. }
  85. struct z_erofs_maprecorder {
  86. struct erofs_inode *inode;
  87. struct erofs_map_blocks *map;
  88. void *kaddr;
  89. unsigned long lcn;
  90. /* compression extent information gathered */
  91. u8 type, headtype;
  92. u16 clusterofs;
  93. u16 delta[2];
  94. erofs_blk_t pblk, compressedblks;
  95. erofs_off_t nextpackoff;
  96. bool partialref;
  97. };
  98. static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
  99. erofs_blk_t eblk)
  100. {
  101. int ret;
  102. struct erofs_map_blocks *const map = m->map;
  103. char *mpage = map->mpage;
  104. if (map->index == eblk)
  105. return 0;
  106. ret = erofs_blk_read(mpage, eblk, 1);
  107. if (ret < 0)
  108. return -EIO;
  109. map->index = eblk;
  110. return 0;
  111. }
  112. static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
  113. unsigned long lcn)
  114. {
  115. struct erofs_inode *const vi = m->inode;
  116. const erofs_off_t ibase = iloc(vi->nid);
  117. const erofs_off_t pos = Z_EROFS_FULL_INDEX_ALIGN(ibase +
  118. vi->inode_isize + vi->xattr_isize) +
  119. lcn * sizeof(struct z_erofs_lcluster_index);
  120. struct z_erofs_lcluster_index *di;
  121. unsigned int advise, type;
  122. int err;
  123. err = z_erofs_reload_indexes(m, erofs_blknr(pos));
  124. if (err)
  125. return err;
  126. m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
  127. m->lcn = lcn;
  128. di = m->kaddr + erofs_blkoff(pos);
  129. advise = le16_to_cpu(di->di_advise);
  130. type = (advise >> Z_EROFS_LI_LCLUSTER_TYPE_BIT) &
  131. ((1 << Z_EROFS_LI_LCLUSTER_TYPE_BITS) - 1);
  132. switch (type) {
  133. case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
  134. m->clusterofs = 1 << vi->z_logical_clusterbits;
  135. m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
  136. if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
  137. if (!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
  138. DBG_BUGON(1);
  139. return -EFSCORRUPTED;
  140. }
  141. m->compressedblks = m->delta[0] &
  142. ~Z_EROFS_LI_D0_CBLKCNT;
  143. m->delta[0] = 1;
  144. }
  145. m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
  146. break;
  147. case Z_EROFS_LCLUSTER_TYPE_PLAIN:
  148. case Z_EROFS_LCLUSTER_TYPE_HEAD1:
  149. if (advise & Z_EROFS_LI_PARTIAL_REF)
  150. m->partialref = true;
  151. m->clusterofs = le16_to_cpu(di->di_clusterofs);
  152. m->pblk = le32_to_cpu(di->di_u.blkaddr);
  153. break;
  154. default:
  155. DBG_BUGON(1);
  156. return -EOPNOTSUPP;
  157. }
  158. m->type = type;
  159. return 0;
  160. }
  161. static unsigned int decode_compactedbits(unsigned int lobits,
  162. unsigned int lomask,
  163. u8 *in, unsigned int pos, u8 *type)
  164. {
  165. const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
  166. const unsigned int lo = v & lomask;
  167. *type = (v >> lobits) & 3;
  168. return lo;
  169. }
  170. static int get_compacted_la_distance(unsigned int lclusterbits,
  171. unsigned int encodebits,
  172. unsigned int vcnt, u8 *in, int i)
  173. {
  174. const unsigned int lomask = (1 << lclusterbits) - 1;
  175. unsigned int lo, d1 = 0;
  176. u8 type;
  177. DBG_BUGON(i >= vcnt);
  178. do {
  179. lo = decode_compactedbits(lclusterbits, lomask,
  180. in, encodebits * i, &type);
  181. if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
  182. return d1;
  183. ++d1;
  184. } while (++i < vcnt);
  185. /* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
  186. if (!(lo & Z_EROFS_LI_D0_CBLKCNT))
  187. d1 += lo - 1;
  188. return d1;
  189. }
  190. static int unpack_compacted_index(struct z_erofs_maprecorder *m,
  191. unsigned int amortizedshift,
  192. erofs_off_t pos, bool lookahead)
  193. {
  194. struct erofs_inode *const vi = m->inode;
  195. const unsigned int lclusterbits = vi->z_logical_clusterbits;
  196. const unsigned int lomask = (1 << lclusterbits) - 1;
  197. unsigned int vcnt, base, lo, encodebits, nblk, eofs;
  198. int i;
  199. u8 *in, type;
  200. bool big_pcluster;
  201. if (1 << amortizedshift == 4)
  202. vcnt = 2;
  203. else if (1 << amortizedshift == 2 && lclusterbits == 12)
  204. vcnt = 16;
  205. else
  206. return -EOPNOTSUPP;
  207. /* it doesn't equal to round_up(..) */
  208. m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
  209. (vcnt << amortizedshift);
  210. big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
  211. encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
  212. eofs = erofs_blkoff(pos);
  213. base = round_down(eofs, vcnt << amortizedshift);
  214. in = m->kaddr + base;
  215. i = (eofs - base) >> amortizedshift;
  216. lo = decode_compactedbits(lclusterbits, lomask,
  217. in, encodebits * i, &type);
  218. m->type = type;
  219. if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
  220. m->clusterofs = 1 << lclusterbits;
  221. /* figure out lookahead_distance: delta[1] if needed */
  222. if (lookahead)
  223. m->delta[1] = get_compacted_la_distance(lclusterbits,
  224. encodebits, vcnt, in, i);
  225. if (lo & Z_EROFS_LI_D0_CBLKCNT) {
  226. if (!big_pcluster) {
  227. DBG_BUGON(1);
  228. return -EFSCORRUPTED;
  229. }
  230. m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT;
  231. m->delta[0] = 1;
  232. return 0;
  233. } else if (i + 1 != (int)vcnt) {
  234. m->delta[0] = lo;
  235. return 0;
  236. }
  237. /*
  238. * since the last lcluster in the pack is special,
  239. * of which lo saves delta[1] rather than delta[0].
  240. * Hence, get delta[0] by the previous lcluster indirectly.
  241. */
  242. lo = decode_compactedbits(lclusterbits, lomask,
  243. in, encodebits * (i - 1), &type);
  244. if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
  245. lo = 0;
  246. else if (lo & Z_EROFS_LI_D0_CBLKCNT)
  247. lo = 1;
  248. m->delta[0] = lo + 1;
  249. return 0;
  250. }
  251. m->clusterofs = lo;
  252. m->delta[0] = 0;
  253. /* figout out blkaddr (pblk) for HEAD lclusters */
  254. if (!big_pcluster) {
  255. nblk = 1;
  256. while (i > 0) {
  257. --i;
  258. lo = decode_compactedbits(lclusterbits, lomask,
  259. in, encodebits * i, &type);
  260. if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
  261. i -= lo;
  262. if (i >= 0)
  263. ++nblk;
  264. }
  265. } else {
  266. nblk = 0;
  267. while (i > 0) {
  268. --i;
  269. lo = decode_compactedbits(lclusterbits, lomask,
  270. in, encodebits * i, &type);
  271. if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
  272. if (lo & Z_EROFS_LI_D0_CBLKCNT) {
  273. --i;
  274. nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT;
  275. continue;
  276. }
  277. if (lo <= 1) {
  278. DBG_BUGON(1);
  279. /* --i; ++nblk; continue; */
  280. return -EFSCORRUPTED;
  281. }
  282. i -= lo - 2;
  283. continue;
  284. }
  285. ++nblk;
  286. }
  287. }
  288. in += (vcnt << amortizedshift) - sizeof(__le32);
  289. m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
  290. return 0;
  291. }
  292. static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
  293. unsigned long lcn, bool lookahead)
  294. {
  295. struct erofs_inode *const vi = m->inode;
  296. const unsigned int lclusterbits = vi->z_logical_clusterbits;
  297. const erofs_off_t ebase = round_up(iloc(vi->nid) + vi->inode_isize +
  298. vi->xattr_isize, 8) +
  299. sizeof(struct z_erofs_map_header);
  300. const unsigned int totalidx = BLK_ROUND_UP(vi->i_size);
  301. unsigned int compacted_4b_initial, compacted_2b;
  302. unsigned int amortizedshift;
  303. erofs_off_t pos;
  304. int err;
  305. if (lclusterbits != 12)
  306. return -EOPNOTSUPP;
  307. if (lcn >= totalidx)
  308. return -EINVAL;
  309. m->lcn = lcn;
  310. /* used to align to 32-byte (compacted_2b) alignment */
  311. compacted_4b_initial = (32 - ebase % 32) / 4;
  312. if (compacted_4b_initial == 32 / 4)
  313. compacted_4b_initial = 0;
  314. if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
  315. compacted_4b_initial < totalidx)
  316. compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
  317. else
  318. compacted_2b = 0;
  319. pos = ebase;
  320. if (lcn < compacted_4b_initial) {
  321. amortizedshift = 2;
  322. goto out;
  323. }
  324. pos += compacted_4b_initial * 4;
  325. lcn -= compacted_4b_initial;
  326. if (lcn < compacted_2b) {
  327. amortizedshift = 1;
  328. goto out;
  329. }
  330. pos += compacted_2b * 2;
  331. lcn -= compacted_2b;
  332. amortizedshift = 2;
  333. out:
  334. pos += lcn * (1 << amortizedshift);
  335. err = z_erofs_reload_indexes(m, erofs_blknr(pos));
  336. if (err)
  337. return err;
  338. return unpack_compacted_index(m, amortizedshift, pos, lookahead);
  339. }
  340. static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
  341. unsigned int lcn, bool lookahead)
  342. {
  343. const unsigned int datamode = m->inode->datalayout;
  344. if (datamode == EROFS_INODE_COMPRESSED_FULL)
  345. return legacy_load_cluster_from_disk(m, lcn);
  346. if (datamode == EROFS_INODE_COMPRESSED_COMPACT)
  347. return compacted_load_cluster_from_disk(m, lcn, lookahead);
  348. return -EINVAL;
  349. }
  350. static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
  351. unsigned int lookback_distance)
  352. {
  353. struct erofs_inode *const vi = m->inode;
  354. struct erofs_map_blocks *const map = m->map;
  355. const unsigned int lclusterbits = vi->z_logical_clusterbits;
  356. unsigned long lcn = m->lcn;
  357. int err;
  358. if (lcn < lookback_distance) {
  359. erofs_err("bogus lookback distance @ nid %llu",
  360. (unsigned long long)vi->nid);
  361. DBG_BUGON(1);
  362. return -EFSCORRUPTED;
  363. }
  364. /* load extent head logical cluster if needed */
  365. lcn -= lookback_distance;
  366. err = z_erofs_load_cluster_from_disk(m, lcn, false);
  367. if (err)
  368. return err;
  369. switch (m->type) {
  370. case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
  371. if (!m->delta[0]) {
  372. erofs_err("invalid lookback distance 0 @ nid %llu",
  373. (unsigned long long)vi->nid);
  374. DBG_BUGON(1);
  375. return -EFSCORRUPTED;
  376. }
  377. return z_erofs_extent_lookback(m, m->delta[0]);
  378. case Z_EROFS_LCLUSTER_TYPE_PLAIN:
  379. case Z_EROFS_LCLUSTER_TYPE_HEAD1:
  380. m->headtype = m->type;
  381. map->m_la = (lcn << lclusterbits) | m->clusterofs;
  382. break;
  383. default:
  384. erofs_err("unknown type %u @ lcn %lu of nid %llu",
  385. m->type, lcn, (unsigned long long)vi->nid);
  386. DBG_BUGON(1);
  387. return -EOPNOTSUPP;
  388. }
  389. return 0;
  390. }
  391. static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
  392. unsigned int initial_lcn)
  393. {
  394. struct erofs_inode *const vi = m->inode;
  395. struct erofs_map_blocks *const map = m->map;
  396. const unsigned int lclusterbits = vi->z_logical_clusterbits;
  397. unsigned long lcn;
  398. int err;
  399. DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN &&
  400. m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1);
  401. if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
  402. !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
  403. map->m_plen = 1 << lclusterbits;
  404. return 0;
  405. }
  406. lcn = m->lcn + 1;
  407. if (m->compressedblks)
  408. goto out;
  409. err = z_erofs_load_cluster_from_disk(m, lcn, false);
  410. if (err)
  411. return err;
  412. /*
  413. * If the 1st NONHEAD lcluster has already been handled initially w/o
  414. * valid compressedblks, which means at least it mustn't be CBLKCNT, or
  415. * an internal implemenatation error is detected.
  416. *
  417. * The following code can also handle it properly anyway, but let's
  418. * BUG_ON in the debugging mode only for developers to notice that.
  419. */
  420. DBG_BUGON(lcn == initial_lcn &&
  421. m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
  422. switch (m->type) {
  423. case Z_EROFS_LCLUSTER_TYPE_PLAIN:
  424. case Z_EROFS_LCLUSTER_TYPE_HEAD1:
  425. /*
  426. * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
  427. * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
  428. */
  429. m->compressedblks = 1 << (lclusterbits - sbi.blkszbits);
  430. break;
  431. case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
  432. if (m->delta[0] != 1)
  433. goto err_bonus_cblkcnt;
  434. if (m->compressedblks)
  435. break;
  436. /* fallthrough */
  437. default:
  438. erofs_err("cannot found CBLKCNT @ lcn %lu of nid %llu",
  439. lcn, vi->nid | 0ULL);
  440. DBG_BUGON(1);
  441. return -EFSCORRUPTED;
  442. }
  443. out:
  444. map->m_plen = m->compressedblks << sbi.blkszbits;
  445. return 0;
  446. err_bonus_cblkcnt:
  447. erofs_err("bogus CBLKCNT @ lcn %lu of nid %llu",
  448. lcn, vi->nid | 0ULL);
  449. DBG_BUGON(1);
  450. return -EFSCORRUPTED;
  451. }
  452. static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
  453. {
  454. struct erofs_inode *const vi = m->inode;
  455. struct erofs_map_blocks *map = m->map;
  456. unsigned int lclusterbits = vi->z_logical_clusterbits;
  457. u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
  458. int err;
  459. do {
  460. /* handle the last EOF pcluster (no next HEAD lcluster) */
  461. if ((lcn << lclusterbits) >= vi->i_size) {
  462. map->m_llen = vi->i_size - map->m_la;
  463. return 0;
  464. }
  465. err = z_erofs_load_cluster_from_disk(m, lcn, true);
  466. if (err)
  467. return err;
  468. if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
  469. DBG_BUGON(!m->delta[1] &&
  470. m->clusterofs != 1 << lclusterbits);
  471. } else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
  472. m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1) {
  473. /* go on until the next HEAD lcluster */
  474. if (lcn != headlcn)
  475. break;
  476. m->delta[1] = 1;
  477. } else {
  478. erofs_err("unknown type %u @ lcn %llu of nid %llu",
  479. m->type, lcn | 0ULL,
  480. (unsigned long long)vi->nid);
  481. DBG_BUGON(1);
  482. return -EOPNOTSUPP;
  483. }
  484. lcn += m->delta[1];
  485. } while (m->delta[1]);
  486. map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
  487. return 0;
  488. }
  489. static int z_erofs_do_map_blocks(struct erofs_inode *vi,
  490. struct erofs_map_blocks *map,
  491. int flags)
  492. {
  493. bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
  494. bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
  495. struct z_erofs_maprecorder m = {
  496. .inode = vi,
  497. .map = map,
  498. .kaddr = map->mpage,
  499. };
  500. int err = 0;
  501. unsigned int lclusterbits, endoff;
  502. unsigned long initial_lcn;
  503. unsigned long long ofs, end;
  504. lclusterbits = vi->z_logical_clusterbits;
  505. ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? vi->i_size - 1 : map->m_la;
  506. initial_lcn = ofs >> lclusterbits;
  507. endoff = ofs & ((1 << lclusterbits) - 1);
  508. err = z_erofs_load_cluster_from_disk(&m, initial_lcn, false);
  509. if (err)
  510. goto out;
  511. if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
  512. vi->z_idataoff = m.nextpackoff;
  513. map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
  514. end = (m.lcn + 1ULL) << lclusterbits;
  515. switch (m.type) {
  516. case Z_EROFS_LCLUSTER_TYPE_PLAIN:
  517. case Z_EROFS_LCLUSTER_TYPE_HEAD1:
  518. if (endoff >= m.clusterofs) {
  519. m.headtype = m.type;
  520. map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
  521. break;
  522. }
  523. /* m.lcn should be >= 1 if endoff < m.clusterofs */
  524. if (!m.lcn) {
  525. erofs_err("invalid logical cluster 0 at nid %llu",
  526. (unsigned long long)vi->nid);
  527. err = -EFSCORRUPTED;
  528. goto out;
  529. }
  530. end = (m.lcn << lclusterbits) | m.clusterofs;
  531. map->m_flags |= EROFS_MAP_FULL_MAPPED;
  532. m.delta[0] = 1;
  533. /* fallthrough */
  534. case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
  535. /* get the correspoinding first chunk */
  536. err = z_erofs_extent_lookback(&m, m.delta[0]);
  537. if (err)
  538. goto out;
  539. break;
  540. default:
  541. erofs_err("unknown type %u @ offset %llu of nid %llu",
  542. m.type, ofs, (unsigned long long)vi->nid);
  543. err = -EOPNOTSUPP;
  544. goto out;
  545. }
  546. if (m.partialref)
  547. map->m_flags |= EROFS_MAP_PARTIAL_REF;
  548. map->m_llen = end - map->m_la;
  549. if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
  550. vi->z_tailextent_headlcn = m.lcn;
  551. /* for non-compact indexes, fragmentoff is 64 bits */
  552. if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
  553. vi->fragmentoff |= (u64)m.pblk << 32;
  554. }
  555. if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
  556. map->m_flags |= EROFS_MAP_META;
  557. map->m_pa = vi->z_idataoff;
  558. map->m_plen = vi->z_idata_size;
  559. } else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
  560. map->m_flags |= EROFS_MAP_FRAGMENT;
  561. } else {
  562. map->m_pa = erofs_pos(m.pblk);
  563. err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
  564. if (err)
  565. goto out;
  566. }
  567. if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) {
  568. if (map->m_llen > map->m_plen) {
  569. DBG_BUGON(1);
  570. err = -EFSCORRUPTED;
  571. goto out;
  572. }
  573. if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
  574. map->m_algorithmformat =
  575. Z_EROFS_COMPRESSION_INTERLACED;
  576. else
  577. map->m_algorithmformat =
  578. Z_EROFS_COMPRESSION_SHIFTED;
  579. } else {
  580. map->m_algorithmformat = vi->z_algorithmtype[0];
  581. }
  582. if (flags & EROFS_GET_BLOCKS_FIEMAP) {
  583. err = z_erofs_get_extent_decompressedlen(&m);
  584. if (!err)
  585. map->m_flags |= EROFS_MAP_FULL_MAPPED;
  586. }
  587. out:
  588. erofs_dbg("m_la %" PRIu64 " m_pa %" PRIu64 " m_llen %" PRIu64 " m_plen %" PRIu64 " m_flags 0%o",
  589. map->m_la, map->m_pa,
  590. map->m_llen, map->m_plen, map->m_flags);
  591. return err;
  592. }
  593. int z_erofs_map_blocks_iter(struct erofs_inode *vi,
  594. struct erofs_map_blocks *map,
  595. int flags)
  596. {
  597. int err = 0;
  598. /* when trying to read beyond EOF, leave it unmapped */
  599. if (map->m_la >= vi->i_size) {
  600. map->m_llen = map->m_la + 1 - vi->i_size;
  601. map->m_la = vi->i_size;
  602. map->m_flags = 0;
  603. goto out;
  604. }
  605. err = z_erofs_fill_inode_lazy(vi);
  606. if (err)
  607. goto out;
  608. if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
  609. !vi->z_tailextent_headlcn) {
  610. map->m_la = 0;
  611. map->m_llen = vi->i_size;
  612. map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_FULL_MAPPED |
  613. EROFS_MAP_FRAGMENT;
  614. goto out;
  615. }
  616. err = z_erofs_do_map_blocks(vi, map, flags);
  617. out:
  618. DBG_BUGON(err < 0 && err != -ENOMEM);
  619. return err;
  620. }