inline.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * fs/f2fs/inline.c
  4. * Copyright (c) 2013, Intel Corporation
  5. * Authors: Huajun Li <huajun.li@intel.com>
  6. * Haicheng Li <haicheng.li@intel.com>
  7. */
  8. #include <linux/fs.h>
  9. #include <linux/f2fs_fs.h>
  10. #include <linux/fiemap.h>
  11. #include "f2fs.h"
  12. #include "node.h"
  13. #include <trace/events/f2fs.h>
  14. static bool support_inline_data(struct inode *inode)
  15. {
  16. if (f2fs_used_in_atomic_write(inode))
  17. return false;
  18. if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
  19. return false;
  20. if (i_size_read(inode) > MAX_INLINE_DATA(inode))
  21. return false;
  22. return true;
  23. }
  24. bool f2fs_may_inline_data(struct inode *inode)
  25. {
  26. if (!support_inline_data(inode))
  27. return false;
  28. return !f2fs_post_read_required(inode);
  29. }
  30. static bool inode_has_blocks(struct inode *inode, struct page *ipage)
  31. {
  32. struct f2fs_inode *ri = F2FS_INODE(ipage);
  33. int i;
  34. if (F2FS_HAS_BLOCKS(inode))
  35. return true;
  36. for (i = 0; i < DEF_NIDS_PER_INODE; i++) {
  37. if (ri->i_nid[i])
  38. return true;
  39. }
  40. return false;
  41. }
  42. bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage)
  43. {
  44. if (!f2fs_has_inline_data(inode))
  45. return false;
  46. if (inode_has_blocks(inode, ipage))
  47. return false;
  48. if (!support_inline_data(inode))
  49. return true;
  50. /*
  51. * used by sanity_check_inode(), when disk layout fields has not
  52. * been synchronized to inmem fields.
  53. */
  54. return (S_ISREG(inode->i_mode) &&
  55. (file_is_encrypt(inode) || file_is_verity(inode) ||
  56. (F2FS_I(inode)->i_flags & F2FS_COMPR_FL)));
  57. }
  58. bool f2fs_may_inline_dentry(struct inode *inode)
  59. {
  60. if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY))
  61. return false;
  62. if (!S_ISDIR(inode->i_mode))
  63. return false;
  64. return true;
  65. }
  66. void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage)
  67. {
  68. struct inode *inode = folio_file_mapping(folio)->host;
  69. if (folio_test_uptodate(folio))
  70. return;
  71. f2fs_bug_on(F2FS_I_SB(inode), folio_index(folio));
  72. folio_zero_segment(folio, MAX_INLINE_DATA(inode), folio_size(folio));
  73. /* Copy the whole inline data block */
  74. memcpy_to_folio(folio, 0, inline_data_addr(inode, ipage),
  75. MAX_INLINE_DATA(inode));
  76. if (!folio_test_uptodate(folio))
  77. folio_mark_uptodate(folio);
  78. }
  79. void f2fs_truncate_inline_inode(struct inode *inode,
  80. struct page *ipage, u64 from)
  81. {
  82. void *addr;
  83. if (from >= MAX_INLINE_DATA(inode))
  84. return;
  85. addr = inline_data_addr(inode, ipage);
  86. f2fs_wait_on_page_writeback(ipage, NODE, true, true);
  87. memset(addr + from, 0, MAX_INLINE_DATA(inode) - from);
  88. set_page_dirty(ipage);
  89. if (from == 0)
  90. clear_inode_flag(inode, FI_DATA_EXIST);
  91. }
  92. int f2fs_read_inline_data(struct inode *inode, struct folio *folio)
  93. {
  94. struct page *ipage;
  95. ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
  96. if (IS_ERR(ipage)) {
  97. folio_unlock(folio);
  98. return PTR_ERR(ipage);
  99. }
  100. if (!f2fs_has_inline_data(inode)) {
  101. f2fs_put_page(ipage, 1);
  102. return -EAGAIN;
  103. }
  104. if (folio_index(folio))
  105. folio_zero_segment(folio, 0, folio_size(folio));
  106. else
  107. f2fs_do_read_inline_data(folio, ipage);
  108. if (!folio_test_uptodate(folio))
  109. folio_mark_uptodate(folio);
  110. f2fs_put_page(ipage, 1);
  111. folio_unlock(folio);
  112. return 0;
  113. }
  114. int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
  115. {
  116. struct f2fs_io_info fio = {
  117. .sbi = F2FS_I_SB(dn->inode),
  118. .ino = dn->inode->i_ino,
  119. .type = DATA,
  120. .op = REQ_OP_WRITE,
  121. .op_flags = REQ_SYNC | REQ_PRIO,
  122. .page = page,
  123. .encrypted_page = NULL,
  124. .io_type = FS_DATA_IO,
  125. };
  126. struct node_info ni;
  127. int dirty, err;
  128. if (!f2fs_exist_data(dn->inode))
  129. goto clear_out;
  130. err = f2fs_reserve_block(dn, 0);
  131. if (err)
  132. return err;
  133. err = f2fs_get_node_info(fio.sbi, dn->nid, &ni, false);
  134. if (err) {
  135. f2fs_truncate_data_blocks_range(dn, 1);
  136. f2fs_put_dnode(dn);
  137. return err;
  138. }
  139. fio.version = ni.version;
  140. if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
  141. f2fs_put_dnode(dn);
  142. set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
  143. f2fs_warn(fio.sbi, "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
  144. __func__, dn->inode->i_ino, dn->data_blkaddr);
  145. f2fs_handle_error(fio.sbi, ERROR_INVALID_BLKADDR);
  146. return -EFSCORRUPTED;
  147. }
  148. f2fs_bug_on(F2FS_P_SB(page), folio_test_writeback(page_folio(page)));
  149. f2fs_do_read_inline_data(page_folio(page), dn->inode_page);
  150. set_page_dirty(page);
  151. /* clear dirty state */
  152. dirty = clear_page_dirty_for_io(page);
  153. /* write data page to try to make data consistent */
  154. set_page_writeback(page);
  155. fio.old_blkaddr = dn->data_blkaddr;
  156. set_inode_flag(dn->inode, FI_HOT_DATA);
  157. f2fs_outplace_write_data(dn, &fio);
  158. f2fs_wait_on_page_writeback(page, DATA, true, true);
  159. if (dirty) {
  160. inode_dec_dirty_pages(dn->inode);
  161. f2fs_remove_dirty_inode(dn->inode);
  162. }
  163. /* this converted inline_data should be recovered. */
  164. set_inode_flag(dn->inode, FI_APPEND_WRITE);
  165. /* clear inline data and flag after data writeback */
  166. f2fs_truncate_inline_inode(dn->inode, dn->inode_page, 0);
  167. clear_page_private_inline(dn->inode_page);
  168. clear_out:
  169. stat_dec_inline_inode(dn->inode);
  170. clear_inode_flag(dn->inode, FI_INLINE_DATA);
  171. f2fs_put_dnode(dn);
  172. return 0;
  173. }
  174. int f2fs_convert_inline_inode(struct inode *inode)
  175. {
  176. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  177. struct dnode_of_data dn;
  178. struct page *ipage, *page;
  179. int err = 0;
  180. if (f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
  181. return -EROFS;
  182. if (!f2fs_has_inline_data(inode))
  183. return 0;
  184. err = f2fs_dquot_initialize(inode);
  185. if (err)
  186. return err;
  187. page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
  188. if (!page)
  189. return -ENOMEM;
  190. f2fs_lock_op(sbi);
  191. ipage = f2fs_get_node_page(sbi, inode->i_ino);
  192. if (IS_ERR(ipage)) {
  193. err = PTR_ERR(ipage);
  194. goto out;
  195. }
  196. set_new_dnode(&dn, inode, ipage, ipage, 0);
  197. if (f2fs_has_inline_data(inode))
  198. err = f2fs_convert_inline_page(&dn, page);
  199. f2fs_put_dnode(&dn);
  200. out:
  201. f2fs_unlock_op(sbi);
  202. f2fs_put_page(page, 1);
  203. if (!err)
  204. f2fs_balance_fs(sbi, dn.node_changed);
  205. return err;
  206. }
  207. int f2fs_write_inline_data(struct inode *inode, struct folio *folio)
  208. {
  209. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  210. struct page *ipage;
  211. ipage = f2fs_get_node_page(sbi, inode->i_ino);
  212. if (IS_ERR(ipage))
  213. return PTR_ERR(ipage);
  214. if (!f2fs_has_inline_data(inode)) {
  215. f2fs_put_page(ipage, 1);
  216. return -EAGAIN;
  217. }
  218. f2fs_bug_on(F2FS_I_SB(inode), folio->index);
  219. f2fs_wait_on_page_writeback(ipage, NODE, true, true);
  220. memcpy_from_folio(inline_data_addr(inode, ipage),
  221. folio, 0, MAX_INLINE_DATA(inode));
  222. set_page_dirty(ipage);
  223. f2fs_clear_page_cache_dirty_tag(folio);
  224. set_inode_flag(inode, FI_APPEND_WRITE);
  225. set_inode_flag(inode, FI_DATA_EXIST);
  226. clear_page_private_inline(ipage);
  227. f2fs_put_page(ipage, 1);
  228. return 0;
  229. }
  230. int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
  231. {
  232. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  233. struct f2fs_inode *ri = NULL;
  234. void *src_addr, *dst_addr;
  235. struct page *ipage;
  236. /*
  237. * The inline_data recovery policy is as follows.
  238. * [prev.] [next] of inline_data flag
  239. * o o -> recover inline_data
  240. * o x -> remove inline_data, and then recover data blocks
  241. * x o -> remove data blocks, and then recover inline_data
  242. * x x -> recover data blocks
  243. */
  244. if (IS_INODE(npage))
  245. ri = F2FS_INODE(npage);
  246. if (f2fs_has_inline_data(inode) &&
  247. ri && (ri->i_inline & F2FS_INLINE_DATA)) {
  248. process_inline:
  249. ipage = f2fs_get_node_page(sbi, inode->i_ino);
  250. if (IS_ERR(ipage))
  251. return PTR_ERR(ipage);
  252. f2fs_wait_on_page_writeback(ipage, NODE, true, true);
  253. src_addr = inline_data_addr(inode, npage);
  254. dst_addr = inline_data_addr(inode, ipage);
  255. memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
  256. set_inode_flag(inode, FI_INLINE_DATA);
  257. set_inode_flag(inode, FI_DATA_EXIST);
  258. set_page_dirty(ipage);
  259. f2fs_put_page(ipage, 1);
  260. return 1;
  261. }
  262. if (f2fs_has_inline_data(inode)) {
  263. ipage = f2fs_get_node_page(sbi, inode->i_ino);
  264. if (IS_ERR(ipage))
  265. return PTR_ERR(ipage);
  266. f2fs_truncate_inline_inode(inode, ipage, 0);
  267. stat_dec_inline_inode(inode);
  268. clear_inode_flag(inode, FI_INLINE_DATA);
  269. f2fs_put_page(ipage, 1);
  270. } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
  271. int ret;
  272. ret = f2fs_truncate_blocks(inode, 0, false);
  273. if (ret)
  274. return ret;
  275. stat_inc_inline_inode(inode);
  276. goto process_inline;
  277. }
  278. return 0;
  279. }
  280. struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
  281. const struct f2fs_filename *fname,
  282. struct page **res_page,
  283. bool use_hash)
  284. {
  285. struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
  286. struct f2fs_dir_entry *de;
  287. struct f2fs_dentry_ptr d;
  288. struct page *ipage;
  289. void *inline_dentry;
  290. ipage = f2fs_get_node_page(sbi, dir->i_ino);
  291. if (IS_ERR(ipage)) {
  292. *res_page = ipage;
  293. return NULL;
  294. }
  295. inline_dentry = inline_data_addr(dir, ipage);
  296. make_dentry_ptr_inline(dir, &d, inline_dentry);
  297. de = f2fs_find_target_dentry(&d, fname, NULL, use_hash);
  298. unlock_page(ipage);
  299. if (IS_ERR(de)) {
  300. *res_page = ERR_CAST(de);
  301. de = NULL;
  302. }
  303. if (de)
  304. *res_page = ipage;
  305. else
  306. f2fs_put_page(ipage, 0);
  307. return de;
  308. }
  309. int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
  310. struct page *ipage)
  311. {
  312. struct f2fs_dentry_ptr d;
  313. void *inline_dentry;
  314. inline_dentry = inline_data_addr(inode, ipage);
  315. make_dentry_ptr_inline(inode, &d, inline_dentry);
  316. f2fs_do_make_empty_dir(inode, parent, &d);
  317. set_page_dirty(ipage);
  318. /* update i_size to MAX_INLINE_DATA */
  319. if (i_size_read(inode) < MAX_INLINE_DATA(inode))
  320. f2fs_i_size_write(inode, MAX_INLINE_DATA(inode));
  321. return 0;
  322. }
  323. /*
  324. * NOTE: ipage is grabbed by caller, but if any error occurs, we should
  325. * release ipage in this function.
  326. */
  327. static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
  328. void *inline_dentry)
  329. {
  330. struct page *page;
  331. struct dnode_of_data dn;
  332. struct f2fs_dentry_block *dentry_blk;
  333. struct f2fs_dentry_ptr src, dst;
  334. int err;
  335. page = f2fs_grab_cache_page(dir->i_mapping, 0, true);
  336. if (!page) {
  337. f2fs_put_page(ipage, 1);
  338. return -ENOMEM;
  339. }
  340. set_new_dnode(&dn, dir, ipage, NULL, 0);
  341. err = f2fs_reserve_block(&dn, 0);
  342. if (err)
  343. goto out;
  344. if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
  345. f2fs_put_dnode(&dn);
  346. set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
  347. f2fs_warn(F2FS_P_SB(page), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
  348. __func__, dir->i_ino, dn.data_blkaddr);
  349. f2fs_handle_error(F2FS_P_SB(page), ERROR_INVALID_BLKADDR);
  350. err = -EFSCORRUPTED;
  351. goto out;
  352. }
  353. f2fs_wait_on_page_writeback(page, DATA, true, true);
  354. dentry_blk = page_address(page);
  355. /*
  356. * Start by zeroing the full block, to ensure that all unused space is
  357. * zeroed and no uninitialized memory is leaked to disk.
  358. */
  359. memset(dentry_blk, 0, F2FS_BLKSIZE);
  360. make_dentry_ptr_inline(dir, &src, inline_dentry);
  361. make_dentry_ptr_block(dir, &dst, dentry_blk);
  362. /* copy data from inline dentry block to new dentry block */
  363. memcpy(dst.bitmap, src.bitmap, src.nr_bitmap);
  364. memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max);
  365. memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN);
  366. if (!PageUptodate(page))
  367. SetPageUptodate(page);
  368. set_page_dirty(page);
  369. /* clear inline dir and flag after data writeback */
  370. f2fs_truncate_inline_inode(dir, ipage, 0);
  371. stat_dec_inline_dir(dir);
  372. clear_inode_flag(dir, FI_INLINE_DENTRY);
  373. /*
  374. * should retrieve reserved space which was used to keep
  375. * inline_dentry's structure for backward compatibility.
  376. */
  377. if (!f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(dir)) &&
  378. !f2fs_has_inline_xattr(dir))
  379. F2FS_I(dir)->i_inline_xattr_size = 0;
  380. f2fs_i_depth_write(dir, 1);
  381. if (i_size_read(dir) < PAGE_SIZE)
  382. f2fs_i_size_write(dir, PAGE_SIZE);
  383. out:
  384. f2fs_put_page(page, 1);
  385. return err;
  386. }
  387. static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
  388. {
  389. struct f2fs_dentry_ptr d;
  390. unsigned long bit_pos = 0;
  391. int err = 0;
  392. make_dentry_ptr_inline(dir, &d, inline_dentry);
  393. while (bit_pos < d.max) {
  394. struct f2fs_dir_entry *de;
  395. struct f2fs_filename fname;
  396. nid_t ino;
  397. umode_t fake_mode;
  398. if (!test_bit_le(bit_pos, d.bitmap)) {
  399. bit_pos++;
  400. continue;
  401. }
  402. de = &d.dentry[bit_pos];
  403. if (unlikely(!de->name_len)) {
  404. bit_pos++;
  405. continue;
  406. }
  407. /*
  408. * We only need the disk_name and hash to move the dentry.
  409. * We don't need the original or casefolded filenames.
  410. */
  411. memset(&fname, 0, sizeof(fname));
  412. fname.disk_name.name = d.filename[bit_pos];
  413. fname.disk_name.len = le16_to_cpu(de->name_len);
  414. fname.hash = de->hash_code;
  415. ino = le32_to_cpu(de->ino);
  416. fake_mode = fs_ftype_to_dtype(de->file_type) << S_DT_SHIFT;
  417. err = f2fs_add_regular_entry(dir, &fname, NULL, ino, fake_mode);
  418. if (err)
  419. goto punch_dentry_pages;
  420. bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
  421. }
  422. return 0;
  423. punch_dentry_pages:
  424. truncate_inode_pages(&dir->i_data, 0);
  425. f2fs_truncate_blocks(dir, 0, false);
  426. f2fs_remove_dirty_inode(dir);
  427. return err;
  428. }
  429. static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
  430. void *inline_dentry)
  431. {
  432. void *backup_dentry;
  433. int err;
  434. backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir),
  435. MAX_INLINE_DATA(dir), GFP_F2FS_ZERO);
  436. if (!backup_dentry) {
  437. f2fs_put_page(ipage, 1);
  438. return -ENOMEM;
  439. }
  440. memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA(dir));
  441. f2fs_truncate_inline_inode(dir, ipage, 0);
  442. unlock_page(ipage);
  443. err = f2fs_add_inline_entries(dir, backup_dentry);
  444. if (err)
  445. goto recover;
  446. lock_page(ipage);
  447. stat_dec_inline_dir(dir);
  448. clear_inode_flag(dir, FI_INLINE_DENTRY);
  449. /*
  450. * should retrieve reserved space which was used to keep
  451. * inline_dentry's structure for backward compatibility.
  452. */
  453. if (!f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(dir)) &&
  454. !f2fs_has_inline_xattr(dir))
  455. F2FS_I(dir)->i_inline_xattr_size = 0;
  456. kfree(backup_dentry);
  457. return 0;
  458. recover:
  459. lock_page(ipage);
  460. f2fs_wait_on_page_writeback(ipage, NODE, true, true);
  461. memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
  462. f2fs_i_depth_write(dir, 0);
  463. f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
  464. set_page_dirty(ipage);
  465. f2fs_put_page(ipage, 1);
  466. kfree(backup_dentry);
  467. return err;
  468. }
  469. static int do_convert_inline_dir(struct inode *dir, struct page *ipage,
  470. void *inline_dentry)
  471. {
  472. if (!F2FS_I(dir)->i_dir_level)
  473. return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
  474. else
  475. return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
  476. }
  477. int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
  478. {
  479. struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
  480. struct page *ipage;
  481. struct f2fs_filename fname;
  482. void *inline_dentry = NULL;
  483. int err = 0;
  484. if (!f2fs_has_inline_dentry(dir))
  485. return 0;
  486. f2fs_lock_op(sbi);
  487. err = f2fs_setup_filename(dir, &dentry->d_name, 0, &fname);
  488. if (err)
  489. goto out;
  490. ipage = f2fs_get_node_page(sbi, dir->i_ino);
  491. if (IS_ERR(ipage)) {
  492. err = PTR_ERR(ipage);
  493. goto out_fname;
  494. }
  495. if (f2fs_has_enough_room(dir, ipage, &fname)) {
  496. f2fs_put_page(ipage, 1);
  497. goto out_fname;
  498. }
  499. inline_dentry = inline_data_addr(dir, ipage);
  500. err = do_convert_inline_dir(dir, ipage, inline_dentry);
  501. if (!err)
  502. f2fs_put_page(ipage, 1);
  503. out_fname:
  504. f2fs_free_filename(&fname);
  505. out:
  506. f2fs_unlock_op(sbi);
  507. return err;
  508. }
  509. int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
  510. struct inode *inode, nid_t ino, umode_t mode)
  511. {
  512. struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
  513. struct page *ipage;
  514. unsigned int bit_pos;
  515. void *inline_dentry = NULL;
  516. struct f2fs_dentry_ptr d;
  517. int slots = GET_DENTRY_SLOTS(fname->disk_name.len);
  518. struct page *page = NULL;
  519. int err = 0;
  520. ipage = f2fs_get_node_page(sbi, dir->i_ino);
  521. if (IS_ERR(ipage))
  522. return PTR_ERR(ipage);
  523. inline_dentry = inline_data_addr(dir, ipage);
  524. make_dentry_ptr_inline(dir, &d, inline_dentry);
  525. bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
  526. if (bit_pos >= d.max) {
  527. err = do_convert_inline_dir(dir, ipage, inline_dentry);
  528. if (err)
  529. return err;
  530. err = -EAGAIN;
  531. goto out;
  532. }
  533. if (inode) {
  534. f2fs_down_write_nested(&F2FS_I(inode)->i_sem,
  535. SINGLE_DEPTH_NESTING);
  536. page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
  537. if (IS_ERR(page)) {
  538. err = PTR_ERR(page);
  539. goto fail;
  540. }
  541. }
  542. f2fs_wait_on_page_writeback(ipage, NODE, true, true);
  543. f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash,
  544. bit_pos);
  545. set_page_dirty(ipage);
  546. /* we don't need to mark_inode_dirty now */
  547. if (inode) {
  548. f2fs_i_pino_write(inode, dir->i_ino);
  549. /* synchronize inode page's data from inode cache */
  550. if (is_inode_flag_set(inode, FI_NEW_INODE))
  551. f2fs_update_inode(inode, page);
  552. f2fs_put_page(page, 1);
  553. }
  554. f2fs_update_parent_metadata(dir, inode, 0);
  555. fail:
  556. if (inode)
  557. f2fs_up_write(&F2FS_I(inode)->i_sem);
  558. out:
  559. f2fs_put_page(ipage, 1);
  560. return err;
  561. }
  562. void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
  563. struct inode *dir, struct inode *inode)
  564. {
  565. struct f2fs_dentry_ptr d;
  566. void *inline_dentry;
  567. int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
  568. unsigned int bit_pos;
  569. int i;
  570. lock_page(page);
  571. f2fs_wait_on_page_writeback(page, NODE, true, true);
  572. inline_dentry = inline_data_addr(dir, page);
  573. make_dentry_ptr_inline(dir, &d, inline_dentry);
  574. bit_pos = dentry - d.dentry;
  575. for (i = 0; i < slots; i++)
  576. __clear_bit_le(bit_pos + i, d.bitmap);
  577. set_page_dirty(page);
  578. f2fs_put_page(page, 1);
  579. inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
  580. f2fs_mark_inode_dirty_sync(dir, false);
  581. if (inode)
  582. f2fs_drop_nlink(dir, inode);
  583. }
  584. bool f2fs_empty_inline_dir(struct inode *dir)
  585. {
  586. struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
  587. struct page *ipage;
  588. unsigned int bit_pos = 2;
  589. void *inline_dentry;
  590. struct f2fs_dentry_ptr d;
  591. ipage = f2fs_get_node_page(sbi, dir->i_ino);
  592. if (IS_ERR(ipage))
  593. return false;
  594. inline_dentry = inline_data_addr(dir, ipage);
  595. make_dentry_ptr_inline(dir, &d, inline_dentry);
  596. bit_pos = find_next_bit_le(d.bitmap, d.max, bit_pos);
  597. f2fs_put_page(ipage, 1);
  598. if (bit_pos < d.max)
  599. return false;
  600. return true;
  601. }
  602. int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
  603. struct fscrypt_str *fstr)
  604. {
  605. struct inode *inode = file_inode(file);
  606. struct page *ipage = NULL;
  607. struct f2fs_dentry_ptr d;
  608. void *inline_dentry = NULL;
  609. int err;
  610. make_dentry_ptr_inline(inode, &d, inline_dentry);
  611. if (ctx->pos == d.max)
  612. return 0;
  613. ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
  614. if (IS_ERR(ipage))
  615. return PTR_ERR(ipage);
  616. /*
  617. * f2fs_readdir was protected by inode.i_rwsem, it is safe to access
  618. * ipage without page's lock held.
  619. */
  620. unlock_page(ipage);
  621. inline_dentry = inline_data_addr(inode, ipage);
  622. make_dentry_ptr_inline(inode, &d, inline_dentry);
  623. err = f2fs_fill_dentries(ctx, &d, 0, fstr);
  624. if (!err)
  625. ctx->pos = d.max;
  626. f2fs_put_page(ipage, 0);
  627. return err < 0 ? err : 0;
  628. }
  629. int f2fs_inline_data_fiemap(struct inode *inode,
  630. struct fiemap_extent_info *fieinfo, __u64 start, __u64 len)
  631. {
  632. __u64 byteaddr, ilen;
  633. __u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
  634. FIEMAP_EXTENT_LAST;
  635. struct node_info ni;
  636. struct page *ipage;
  637. int err = 0;
  638. ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
  639. if (IS_ERR(ipage))
  640. return PTR_ERR(ipage);
  641. if ((S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
  642. !f2fs_has_inline_data(inode)) {
  643. err = -EAGAIN;
  644. goto out;
  645. }
  646. if (S_ISDIR(inode->i_mode) && !f2fs_has_inline_dentry(inode)) {
  647. err = -EAGAIN;
  648. goto out;
  649. }
  650. ilen = min_t(size_t, MAX_INLINE_DATA(inode), i_size_read(inode));
  651. if (start >= ilen)
  652. goto out;
  653. if (start + len < ilen)
  654. ilen = start + len;
  655. ilen -= start;
  656. err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni, false);
  657. if (err)
  658. goto out;
  659. byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
  660. byteaddr += (char *)inline_data_addr(inode, ipage) -
  661. (char *)F2FS_INODE(ipage);
  662. err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
  663. trace_f2fs_fiemap(inode, start, byteaddr, ilen, flags, err);
  664. out:
  665. f2fs_put_page(ipage, 1);
  666. return err;
  667. }