recovery.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * fs/f2fs/recovery.c
  4. *
  5. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  6. * http://www.samsung.com/
  7. */
  8. #include <linux/unaligned.h>
  9. #include <linux/fs.h>
  10. #include <linux/f2fs_fs.h>
  11. #include <linux/sched/mm.h>
  12. #include "f2fs.h"
  13. #include "node.h"
  14. #include "segment.h"
  15. /*
  16. * Roll forward recovery scenarios.
  17. *
  18. * [Term] F: fsync_mark, D: dentry_mark
  19. *
  20. * 1. inode(x) | CP | inode(x) | dnode(F)
  21. * -> Update the latest inode(x).
  22. *
  23. * 2. inode(x) | CP | inode(F) | dnode(F)
  24. * -> No problem.
  25. *
  26. * 3. inode(x) | CP | dnode(F) | inode(x)
  27. * -> Recover to the latest dnode(F), and drop the last inode(x)
  28. *
  29. * 4. inode(x) | CP | dnode(F) | inode(F)
  30. * -> No problem.
  31. *
  32. * 5. CP | inode(x) | dnode(F)
  33. * -> The inode(DF) was missing. Should drop this dnode(F).
  34. *
  35. * 6. CP | inode(DF) | dnode(F)
  36. * -> No problem.
  37. *
  38. * 7. CP | dnode(F) | inode(DF)
  39. * -> If f2fs_iget fails, then goto next to find inode(DF).
  40. *
  41. * 8. CP | dnode(F) | inode(x)
  42. * -> If f2fs_iget fails, then goto next to find inode(DF).
  43. * But it will fail due to no inode(DF).
  44. */
  45. static struct kmem_cache *fsync_entry_slab;
  46. bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
  47. {
  48. s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
  49. if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
  50. return false;
  51. if (NM_I(sbi)->max_rf_node_blocks &&
  52. percpu_counter_sum_positive(&sbi->rf_node_block_count) >=
  53. NM_I(sbi)->max_rf_node_blocks)
  54. return false;
  55. return true;
  56. }
  57. static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
  58. nid_t ino)
  59. {
  60. struct fsync_inode_entry *entry;
  61. list_for_each_entry(entry, head, list)
  62. if (entry->inode->i_ino == ino)
  63. return entry;
  64. return NULL;
  65. }
  66. static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
  67. struct list_head *head, nid_t ino, bool quota_inode)
  68. {
  69. struct inode *inode;
  70. struct fsync_inode_entry *entry;
  71. int err;
  72. inode = f2fs_iget_retry(sbi->sb, ino);
  73. if (IS_ERR(inode))
  74. return ERR_CAST(inode);
  75. err = f2fs_dquot_initialize(inode);
  76. if (err)
  77. goto err_out;
  78. if (quota_inode) {
  79. err = dquot_alloc_inode(inode);
  80. if (err)
  81. goto err_out;
  82. }
  83. entry = f2fs_kmem_cache_alloc(fsync_entry_slab,
  84. GFP_F2FS_ZERO, true, NULL);
  85. entry->inode = inode;
  86. list_add_tail(&entry->list, head);
  87. return entry;
  88. err_out:
  89. iput(inode);
  90. return ERR_PTR(err);
  91. }
  92. static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
  93. {
  94. if (drop) {
  95. /* inode should not be recovered, drop it */
  96. f2fs_inode_synced(entry->inode);
  97. }
  98. iput(entry->inode);
  99. list_del(&entry->list);
  100. kmem_cache_free(fsync_entry_slab, entry);
  101. }
  102. static int init_recovered_filename(const struct inode *dir,
  103. struct f2fs_inode *raw_inode,
  104. struct f2fs_filename *fname,
  105. struct qstr *usr_fname)
  106. {
  107. int err;
  108. memset(fname, 0, sizeof(*fname));
  109. fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen);
  110. fname->disk_name.name = raw_inode->i_name;
  111. if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN))
  112. return -ENAMETOOLONG;
  113. if (!IS_ENCRYPTED(dir)) {
  114. usr_fname->name = fname->disk_name.name;
  115. usr_fname->len = fname->disk_name.len;
  116. fname->usr_fname = usr_fname;
  117. }
  118. /* Compute the hash of the filename */
  119. if (IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir)) {
  120. /*
  121. * In this case the hash isn't computable without the key, so it
  122. * was saved on-disk.
  123. */
  124. if (fname->disk_name.len + sizeof(f2fs_hash_t) > F2FS_NAME_LEN)
  125. return -EINVAL;
  126. fname->hash = get_unaligned((f2fs_hash_t *)
  127. &raw_inode->i_name[fname->disk_name.len]);
  128. } else if (IS_CASEFOLDED(dir)) {
  129. err = f2fs_init_casefolded_name(dir, fname);
  130. if (err)
  131. return err;
  132. f2fs_hash_filename(dir, fname);
  133. /* Case-sensitive match is fine for recovery */
  134. f2fs_free_casefolded_name(fname);
  135. } else {
  136. f2fs_hash_filename(dir, fname);
  137. }
  138. return 0;
  139. }
  140. static int recover_dentry(struct inode *inode, struct page *ipage,
  141. struct list_head *dir_list)
  142. {
  143. struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
  144. nid_t pino = le32_to_cpu(raw_inode->i_pino);
  145. struct f2fs_dir_entry *de;
  146. struct f2fs_filename fname;
  147. struct qstr usr_fname;
  148. struct page *page;
  149. struct inode *dir, *einode;
  150. struct fsync_inode_entry *entry;
  151. int err = 0;
  152. char *name;
  153. entry = get_fsync_inode(dir_list, pino);
  154. if (!entry) {
  155. entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
  156. pino, false);
  157. if (IS_ERR(entry)) {
  158. dir = ERR_CAST(entry);
  159. err = PTR_ERR(entry);
  160. goto out;
  161. }
  162. }
  163. dir = entry->inode;
  164. err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname);
  165. if (err)
  166. goto out;
  167. retry:
  168. de = __f2fs_find_entry(dir, &fname, &page);
  169. if (de && inode->i_ino == le32_to_cpu(de->ino))
  170. goto out_put;
  171. if (de) {
  172. einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
  173. if (IS_ERR(einode)) {
  174. WARN_ON(1);
  175. err = PTR_ERR(einode);
  176. if (err == -ENOENT)
  177. err = -EEXIST;
  178. goto out_put;
  179. }
  180. err = f2fs_dquot_initialize(einode);
  181. if (err) {
  182. iput(einode);
  183. goto out_put;
  184. }
  185. err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
  186. if (err) {
  187. iput(einode);
  188. goto out_put;
  189. }
  190. f2fs_delete_entry(de, page, dir, einode);
  191. iput(einode);
  192. goto retry;
  193. } else if (IS_ERR(page)) {
  194. err = PTR_ERR(page);
  195. } else {
  196. err = f2fs_add_dentry(dir, &fname, inode,
  197. inode->i_ino, inode->i_mode);
  198. }
  199. if (err == -ENOMEM)
  200. goto retry;
  201. goto out;
  202. out_put:
  203. f2fs_put_page(page, 0);
  204. out:
  205. if (file_enc_name(inode))
  206. name = "<encrypted>";
  207. else
  208. name = raw_inode->i_name;
  209. f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
  210. __func__, ino_of_node(ipage), name,
  211. IS_ERR(dir) ? 0 : dir->i_ino, err);
  212. return err;
  213. }
  214. static int recover_quota_data(struct inode *inode, struct page *page)
  215. {
  216. struct f2fs_inode *raw = F2FS_INODE(page);
  217. struct iattr attr;
  218. uid_t i_uid = le32_to_cpu(raw->i_uid);
  219. gid_t i_gid = le32_to_cpu(raw->i_gid);
  220. int err;
  221. memset(&attr, 0, sizeof(attr));
  222. attr.ia_vfsuid = VFSUIDT_INIT(make_kuid(inode->i_sb->s_user_ns, i_uid));
  223. attr.ia_vfsgid = VFSGIDT_INIT(make_kgid(inode->i_sb->s_user_ns, i_gid));
  224. if (!vfsuid_eq(attr.ia_vfsuid, i_uid_into_vfsuid(&nop_mnt_idmap, inode)))
  225. attr.ia_valid |= ATTR_UID;
  226. if (!vfsgid_eq(attr.ia_vfsgid, i_gid_into_vfsgid(&nop_mnt_idmap, inode)))
  227. attr.ia_valid |= ATTR_GID;
  228. if (!attr.ia_valid)
  229. return 0;
  230. err = dquot_transfer(&nop_mnt_idmap, inode, &attr);
  231. if (err)
  232. set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
  233. return err;
  234. }
  235. static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
  236. {
  237. if (ri->i_inline & F2FS_PIN_FILE)
  238. set_inode_flag(inode, FI_PIN_FILE);
  239. else
  240. clear_inode_flag(inode, FI_PIN_FILE);
  241. if (ri->i_inline & F2FS_DATA_EXIST)
  242. set_inode_flag(inode, FI_DATA_EXIST);
  243. else
  244. clear_inode_flag(inode, FI_DATA_EXIST);
  245. }
  246. static int recover_inode(struct inode *inode, struct page *page)
  247. {
  248. struct f2fs_inode *raw = F2FS_INODE(page);
  249. struct f2fs_inode_info *fi = F2FS_I(inode);
  250. char *name;
  251. int err;
  252. inode->i_mode = le16_to_cpu(raw->i_mode);
  253. err = recover_quota_data(inode, page);
  254. if (err)
  255. return err;
  256. i_uid_write(inode, le32_to_cpu(raw->i_uid));
  257. i_gid_write(inode, le32_to_cpu(raw->i_gid));
  258. if (raw->i_inline & F2FS_EXTRA_ATTR) {
  259. if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
  260. F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
  261. i_projid)) {
  262. projid_t i_projid;
  263. kprojid_t kprojid;
  264. i_projid = (projid_t)le32_to_cpu(raw->i_projid);
  265. kprojid = make_kprojid(&init_user_ns, i_projid);
  266. if (!projid_eq(kprojid, fi->i_projid)) {
  267. err = f2fs_transfer_project_quota(inode,
  268. kprojid);
  269. if (err)
  270. return err;
  271. fi->i_projid = kprojid;
  272. }
  273. }
  274. }
  275. f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
  276. inode_set_atime(inode, le64_to_cpu(raw->i_atime),
  277. le32_to_cpu(raw->i_atime_nsec));
  278. inode_set_ctime(inode, le64_to_cpu(raw->i_ctime),
  279. le32_to_cpu(raw->i_ctime_nsec));
  280. inode_set_mtime(inode, le64_to_cpu(raw->i_mtime),
  281. le32_to_cpu(raw->i_mtime_nsec));
  282. fi->i_advise = raw->i_advise;
  283. fi->i_flags = le32_to_cpu(raw->i_flags);
  284. f2fs_set_inode_flags(inode);
  285. fi->i_gc_failures = le16_to_cpu(raw->i_gc_failures);
  286. recover_inline_flags(inode, raw);
  287. f2fs_mark_inode_dirty_sync(inode, true);
  288. if (file_enc_name(inode))
  289. name = "<encrypted>";
  290. else
  291. name = F2FS_INODE(page)->i_name;
  292. f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
  293. ino_of_node(page), name, raw->i_inline);
  294. return 0;
  295. }
  296. static unsigned int adjust_por_ra_blocks(struct f2fs_sb_info *sbi,
  297. unsigned int ra_blocks, unsigned int blkaddr,
  298. unsigned int next_blkaddr)
  299. {
  300. if (blkaddr + 1 == next_blkaddr)
  301. ra_blocks = min_t(unsigned int, RECOVERY_MAX_RA_BLOCKS,
  302. ra_blocks * 2);
  303. else if (next_blkaddr % BLKS_PER_SEG(sbi))
  304. ra_blocks = max_t(unsigned int, RECOVERY_MIN_RA_BLOCKS,
  305. ra_blocks / 2);
  306. return ra_blocks;
  307. }
  308. /* Detect looped node chain with Floyd's cycle detection algorithm. */
  309. static int sanity_check_node_chain(struct f2fs_sb_info *sbi, block_t blkaddr,
  310. block_t *blkaddr_fast, bool *is_detecting)
  311. {
  312. unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS;
  313. struct page *page = NULL;
  314. int i;
  315. if (!*is_detecting)
  316. return 0;
  317. for (i = 0; i < 2; i++) {
  318. if (!f2fs_is_valid_blkaddr(sbi, *blkaddr_fast, META_POR)) {
  319. *is_detecting = false;
  320. return 0;
  321. }
  322. page = f2fs_get_tmp_page(sbi, *blkaddr_fast);
  323. if (IS_ERR(page))
  324. return PTR_ERR(page);
  325. if (!is_recoverable_dnode(page)) {
  326. f2fs_put_page(page, 1);
  327. *is_detecting = false;
  328. return 0;
  329. }
  330. ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, *blkaddr_fast,
  331. next_blkaddr_of_node(page));
  332. *blkaddr_fast = next_blkaddr_of_node(page);
  333. f2fs_put_page(page, 1);
  334. f2fs_ra_meta_pages_cond(sbi, *blkaddr_fast, ra_blocks);
  335. }
  336. if (*blkaddr_fast == blkaddr) {
  337. f2fs_notice(sbi, "%s: Detect looped node chain on blkaddr:%u."
  338. " Run fsck to fix it.", __func__, blkaddr);
  339. return -EINVAL;
  340. }
  341. return 0;
  342. }
  343. static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
  344. bool check_only)
  345. {
  346. struct curseg_info *curseg;
  347. struct page *page = NULL;
  348. block_t blkaddr, blkaddr_fast;
  349. bool is_detecting = true;
  350. int err = 0;
  351. /* get node pages in the current segment */
  352. curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
  353. blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
  354. blkaddr_fast = blkaddr;
  355. while (1) {
  356. struct fsync_inode_entry *entry;
  357. if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
  358. return 0;
  359. page = f2fs_get_tmp_page(sbi, blkaddr);
  360. if (IS_ERR(page)) {
  361. err = PTR_ERR(page);
  362. break;
  363. }
  364. if (!is_recoverable_dnode(page)) {
  365. f2fs_put_page(page, 1);
  366. break;
  367. }
  368. if (!is_fsync_dnode(page))
  369. goto next;
  370. entry = get_fsync_inode(head, ino_of_node(page));
  371. if (!entry) {
  372. bool quota_inode = false;
  373. if (!check_only &&
  374. IS_INODE(page) && is_dent_dnode(page)) {
  375. err = f2fs_recover_inode_page(sbi, page);
  376. if (err) {
  377. f2fs_put_page(page, 1);
  378. break;
  379. }
  380. quota_inode = true;
  381. }
  382. /*
  383. * CP | dnode(F) | inode(DF)
  384. * For this case, we should not give up now.
  385. */
  386. entry = add_fsync_inode(sbi, head, ino_of_node(page),
  387. quota_inode);
  388. if (IS_ERR(entry)) {
  389. err = PTR_ERR(entry);
  390. if (err == -ENOENT)
  391. goto next;
  392. f2fs_put_page(page, 1);
  393. break;
  394. }
  395. }
  396. entry->blkaddr = blkaddr;
  397. if (IS_INODE(page) && is_dent_dnode(page))
  398. entry->last_dentry = blkaddr;
  399. next:
  400. /* check next segment */
  401. blkaddr = next_blkaddr_of_node(page);
  402. f2fs_put_page(page, 1);
  403. err = sanity_check_node_chain(sbi, blkaddr, &blkaddr_fast,
  404. &is_detecting);
  405. if (err)
  406. break;
  407. }
  408. return err;
  409. }
  410. static void destroy_fsync_dnodes(struct list_head *head, int drop)
  411. {
  412. struct fsync_inode_entry *entry, *tmp;
  413. list_for_each_entry_safe(entry, tmp, head, list)
  414. del_fsync_inode(entry, drop);
  415. }
  416. static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
  417. block_t blkaddr, struct dnode_of_data *dn)
  418. {
  419. struct seg_entry *sentry;
  420. unsigned int segno = GET_SEGNO(sbi, blkaddr);
  421. unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
  422. struct f2fs_summary_block *sum_node;
  423. struct f2fs_summary sum;
  424. struct page *sum_page, *node_page;
  425. struct dnode_of_data tdn = *dn;
  426. nid_t ino, nid;
  427. struct inode *inode;
  428. unsigned int offset, ofs_in_node, max_addrs;
  429. block_t bidx;
  430. int i;
  431. sentry = get_seg_entry(sbi, segno);
  432. if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
  433. return 0;
  434. /* Get the previous summary */
  435. for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
  436. struct curseg_info *curseg = CURSEG_I(sbi, i);
  437. if (curseg->segno == segno) {
  438. sum = curseg->sum_blk->entries[blkoff];
  439. goto got_it;
  440. }
  441. }
  442. sum_page = f2fs_get_sum_page(sbi, segno);
  443. if (IS_ERR(sum_page))
  444. return PTR_ERR(sum_page);
  445. sum_node = (struct f2fs_summary_block *)page_address(sum_page);
  446. sum = sum_node->entries[blkoff];
  447. f2fs_put_page(sum_page, 1);
  448. got_it:
  449. /* Use the locked dnode page and inode */
  450. nid = le32_to_cpu(sum.nid);
  451. ofs_in_node = le16_to_cpu(sum.ofs_in_node);
  452. max_addrs = ADDRS_PER_PAGE(dn->node_page, dn->inode);
  453. if (ofs_in_node >= max_addrs) {
  454. f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%lu, nid:%u, max:%u",
  455. ofs_in_node, dn->inode->i_ino, nid, max_addrs);
  456. f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUMMARY);
  457. return -EFSCORRUPTED;
  458. }
  459. if (dn->inode->i_ino == nid) {
  460. tdn.nid = nid;
  461. if (!dn->inode_page_locked)
  462. lock_page(dn->inode_page);
  463. tdn.node_page = dn->inode_page;
  464. tdn.ofs_in_node = ofs_in_node;
  465. goto truncate_out;
  466. } else if (dn->nid == nid) {
  467. tdn.ofs_in_node = ofs_in_node;
  468. goto truncate_out;
  469. }
  470. /* Get the node page */
  471. node_page = f2fs_get_node_page(sbi, nid);
  472. if (IS_ERR(node_page))
  473. return PTR_ERR(node_page);
  474. offset = ofs_of_node(node_page);
  475. ino = ino_of_node(node_page);
  476. f2fs_put_page(node_page, 1);
  477. if (ino != dn->inode->i_ino) {
  478. int ret;
  479. /* Deallocate previous index in the node page */
  480. inode = f2fs_iget_retry(sbi->sb, ino);
  481. if (IS_ERR(inode))
  482. return PTR_ERR(inode);
  483. ret = f2fs_dquot_initialize(inode);
  484. if (ret) {
  485. iput(inode);
  486. return ret;
  487. }
  488. } else {
  489. inode = dn->inode;
  490. }
  491. bidx = f2fs_start_bidx_of_node(offset, inode) +
  492. le16_to_cpu(sum.ofs_in_node);
  493. /*
  494. * if inode page is locked, unlock temporarily, but its reference
  495. * count keeps alive.
  496. */
  497. if (ino == dn->inode->i_ino && dn->inode_page_locked)
  498. unlock_page(dn->inode_page);
  499. set_new_dnode(&tdn, inode, NULL, NULL, 0);
  500. if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
  501. goto out;
  502. if (tdn.data_blkaddr == blkaddr)
  503. f2fs_truncate_data_blocks_range(&tdn, 1);
  504. f2fs_put_dnode(&tdn);
  505. out:
  506. if (ino != dn->inode->i_ino)
  507. iput(inode);
  508. else if (dn->inode_page_locked)
  509. lock_page(dn->inode_page);
  510. return 0;
  511. truncate_out:
  512. if (f2fs_data_blkaddr(&tdn) == blkaddr)
  513. f2fs_truncate_data_blocks_range(&tdn, 1);
  514. if (dn->inode->i_ino == nid && !dn->inode_page_locked)
  515. unlock_page(dn->inode_page);
  516. return 0;
  517. }
  518. static int f2fs_reserve_new_block_retry(struct dnode_of_data *dn)
  519. {
  520. int i, err = 0;
  521. for (i = DEFAULT_FAILURE_RETRY_COUNT; i > 0; i--) {
  522. err = f2fs_reserve_new_block(dn);
  523. if (!err)
  524. break;
  525. }
  526. return err;
  527. }
  528. static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
  529. struct page *page)
  530. {
  531. struct dnode_of_data dn;
  532. struct node_info ni;
  533. unsigned int start, end;
  534. int err = 0, recovered = 0;
  535. /* step 1: recover xattr */
  536. if (IS_INODE(page)) {
  537. err = f2fs_recover_inline_xattr(inode, page);
  538. if (err)
  539. goto out;
  540. } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
  541. err = f2fs_recover_xattr_data(inode, page);
  542. if (!err)
  543. recovered++;
  544. goto out;
  545. }
  546. /* step 2: recover inline data */
  547. err = f2fs_recover_inline_data(inode, page);
  548. if (err) {
  549. if (err == 1)
  550. err = 0;
  551. goto out;
  552. }
  553. /* step 3: recover data indices */
  554. start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
  555. end = start + ADDRS_PER_PAGE(page, inode);
  556. set_new_dnode(&dn, inode, NULL, NULL, 0);
  557. retry_dn:
  558. err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
  559. if (err) {
  560. if (err == -ENOMEM) {
  561. memalloc_retry_wait(GFP_NOFS);
  562. goto retry_dn;
  563. }
  564. goto out;
  565. }
  566. f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
  567. err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
  568. if (err)
  569. goto err;
  570. f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
  571. if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
  572. f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
  573. inode->i_ino, ofs_of_node(dn.node_page),
  574. ofs_of_node(page));
  575. err = -EFSCORRUPTED;
  576. f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
  577. goto err;
  578. }
  579. for (; start < end; start++, dn.ofs_in_node++) {
  580. block_t src, dest;
  581. src = f2fs_data_blkaddr(&dn);
  582. dest = data_blkaddr(dn.inode, page, dn.ofs_in_node);
  583. if (__is_valid_data_blkaddr(src) &&
  584. !f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
  585. err = -EFSCORRUPTED;
  586. goto err;
  587. }
  588. if (__is_valid_data_blkaddr(dest) &&
  589. !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
  590. err = -EFSCORRUPTED;
  591. goto err;
  592. }
  593. /* skip recovering if dest is the same as src */
  594. if (src == dest)
  595. continue;
  596. /* dest is invalid, just invalidate src block */
  597. if (dest == NULL_ADDR) {
  598. f2fs_truncate_data_blocks_range(&dn, 1);
  599. continue;
  600. }
  601. if (!file_keep_isize(inode) &&
  602. (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
  603. f2fs_i_size_write(inode,
  604. (loff_t)(start + 1) << PAGE_SHIFT);
  605. /*
  606. * dest is reserved block, invalidate src block
  607. * and then reserve one new block in dnode page.
  608. */
  609. if (dest == NEW_ADDR) {
  610. f2fs_truncate_data_blocks_range(&dn, 1);
  611. err = f2fs_reserve_new_block_retry(&dn);
  612. if (err)
  613. goto err;
  614. continue;
  615. }
  616. /* dest is valid block, try to recover from src to dest */
  617. if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
  618. if (src == NULL_ADDR) {
  619. err = f2fs_reserve_new_block_retry(&dn);
  620. if (err)
  621. goto err;
  622. }
  623. retry_prev:
  624. /* Check the previous node page having this index */
  625. err = check_index_in_prev_nodes(sbi, dest, &dn);
  626. if (err) {
  627. if (err == -ENOMEM) {
  628. memalloc_retry_wait(GFP_NOFS);
  629. goto retry_prev;
  630. }
  631. goto err;
  632. }
  633. if (f2fs_is_valid_blkaddr(sbi, dest,
  634. DATA_GENERIC_ENHANCE_UPDATE)) {
  635. f2fs_err(sbi, "Inconsistent dest blkaddr:%u, ino:%lu, ofs:%u",
  636. dest, inode->i_ino, dn.ofs_in_node);
  637. err = -EFSCORRUPTED;
  638. goto err;
  639. }
  640. /* write dummy data page */
  641. f2fs_replace_block(sbi, &dn, src, dest,
  642. ni.version, false, false);
  643. recovered++;
  644. }
  645. }
  646. copy_node_footer(dn.node_page, page);
  647. fill_node_footer(dn.node_page, dn.nid, ni.ino,
  648. ofs_of_node(page), false);
  649. set_page_dirty(dn.node_page);
  650. err:
  651. f2fs_put_dnode(&dn);
  652. out:
  653. f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
  654. inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
  655. recovered, err);
  656. return err;
  657. }
  658. static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
  659. struct list_head *tmp_inode_list, struct list_head *dir_list)
  660. {
  661. struct curseg_info *curseg;
  662. struct page *page = NULL;
  663. int err = 0;
  664. block_t blkaddr;
  665. unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS;
  666. /* get node pages in the current segment */
  667. curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
  668. blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
  669. while (1) {
  670. struct fsync_inode_entry *entry;
  671. if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
  672. break;
  673. page = f2fs_get_tmp_page(sbi, blkaddr);
  674. if (IS_ERR(page)) {
  675. err = PTR_ERR(page);
  676. break;
  677. }
  678. if (!is_recoverable_dnode(page)) {
  679. f2fs_put_page(page, 1);
  680. break;
  681. }
  682. entry = get_fsync_inode(inode_list, ino_of_node(page));
  683. if (!entry)
  684. goto next;
  685. /*
  686. * inode(x) | CP | inode(x) | dnode(F)
  687. * In this case, we can lose the latest inode(x).
  688. * So, call recover_inode for the inode update.
  689. */
  690. if (IS_INODE(page)) {
  691. err = recover_inode(entry->inode, page);
  692. if (err) {
  693. f2fs_put_page(page, 1);
  694. break;
  695. }
  696. }
  697. if (entry->last_dentry == blkaddr) {
  698. err = recover_dentry(entry->inode, page, dir_list);
  699. if (err) {
  700. f2fs_put_page(page, 1);
  701. break;
  702. }
  703. }
  704. err = do_recover_data(sbi, entry->inode, page);
  705. if (err) {
  706. f2fs_put_page(page, 1);
  707. break;
  708. }
  709. if (entry->blkaddr == blkaddr)
  710. list_move_tail(&entry->list, tmp_inode_list);
  711. next:
  712. ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, blkaddr,
  713. next_blkaddr_of_node(page));
  714. /* check next segment */
  715. blkaddr = next_blkaddr_of_node(page);
  716. f2fs_put_page(page, 1);
  717. f2fs_ra_meta_pages_cond(sbi, blkaddr, ra_blocks);
  718. }
  719. if (!err)
  720. err = f2fs_allocate_new_segments(sbi);
  721. return err;
  722. }
  723. int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
  724. {
  725. struct list_head inode_list, tmp_inode_list;
  726. struct list_head dir_list;
  727. int err;
  728. int ret = 0;
  729. unsigned long s_flags = sbi->sb->s_flags;
  730. bool need_writecp = false;
  731. if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE))
  732. f2fs_info(sbi, "recover fsync data on readonly fs");
  733. INIT_LIST_HEAD(&inode_list);
  734. INIT_LIST_HEAD(&tmp_inode_list);
  735. INIT_LIST_HEAD(&dir_list);
  736. /* prevent checkpoint */
  737. f2fs_down_write(&sbi->cp_global_sem);
  738. /* step #1: find fsynced inode numbers */
  739. err = find_fsync_dnodes(sbi, &inode_list, check_only);
  740. if (err || list_empty(&inode_list))
  741. goto skip;
  742. if (check_only) {
  743. ret = 1;
  744. goto skip;
  745. }
  746. need_writecp = true;
  747. /* step #2: recover data */
  748. err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
  749. if (!err)
  750. f2fs_bug_on(sbi, !list_empty(&inode_list));
  751. else
  752. f2fs_bug_on(sbi, sbi->sb->s_flags & SB_ACTIVE);
  753. skip:
  754. destroy_fsync_dnodes(&inode_list, err);
  755. destroy_fsync_dnodes(&tmp_inode_list, err);
  756. /* truncate meta pages to be used by the recovery */
  757. truncate_inode_pages_range(META_MAPPING(sbi),
  758. (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
  759. if (err) {
  760. truncate_inode_pages_final(NODE_MAPPING(sbi));
  761. truncate_inode_pages_final(META_MAPPING(sbi));
  762. }
  763. /*
  764. * If fsync data succeeds or there is no fsync data to recover,
  765. * and the f2fs is not read only, check and fix zoned block devices'
  766. * write pointer consistency.
  767. */
  768. if (f2fs_sb_has_blkzoned(sbi) && !f2fs_readonly(sbi->sb)) {
  769. int err2 = f2fs_fix_curseg_write_pointer(sbi);
  770. if (!err2)
  771. err2 = f2fs_check_write_pointer(sbi);
  772. if (err2)
  773. err = err2;
  774. ret = err;
  775. }
  776. if (!err)
  777. clear_sbi_flag(sbi, SBI_POR_DOING);
  778. f2fs_up_write(&sbi->cp_global_sem);
  779. /* let's drop all the directory inodes for clean checkpoint */
  780. destroy_fsync_dnodes(&dir_list, err);
  781. if (need_writecp) {
  782. set_sbi_flag(sbi, SBI_IS_RECOVERED);
  783. if (!err) {
  784. struct cp_control cpc = {
  785. .reason = CP_RECOVERY,
  786. };
  787. stat_inc_cp_call_count(sbi, TOTAL_CALL);
  788. err = f2fs_write_checkpoint(sbi, &cpc);
  789. }
  790. }
  791. sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
  792. return ret ? ret : err;
  793. }
  794. int __init f2fs_create_recovery_cache(void)
  795. {
  796. fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
  797. sizeof(struct fsync_inode_entry));
  798. return fsync_entry_slab ? 0 : -ENOMEM;
  799. }
  800. void f2fs_destroy_recovery_cache(void)
  801. {
  802. kmem_cache_destroy(fsync_entry_slab);
  803. }