fscache.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2022, Alibaba Cloud
  4. * Copyright (C) 2022, Bytedance Inc. All rights reserved.
  5. */
  6. #include <linux/pseudo_fs.h>
  7. #include <linux/fscache.h>
  8. #include "internal.h"
  9. static DEFINE_MUTEX(erofs_domain_list_lock);
  10. static DEFINE_MUTEX(erofs_domain_cookies_lock);
  11. static LIST_HEAD(erofs_domain_list);
  12. static LIST_HEAD(erofs_domain_cookies_list);
  13. static struct vfsmount *erofs_pseudo_mnt;
  14. static int erofs_anon_init_fs_context(struct fs_context *fc)
  15. {
  16. return init_pseudo(fc, EROFS_SUPER_MAGIC) ? 0 : -ENOMEM;
  17. }
  18. static struct file_system_type erofs_anon_fs_type = {
  19. .owner = THIS_MODULE,
  20. .name = "pseudo_erofs",
  21. .init_fs_context = erofs_anon_init_fs_context,
  22. .kill_sb = kill_anon_super,
  23. };
  24. struct erofs_fscache_io {
  25. struct netfs_cache_resources cres;
  26. struct iov_iter iter;
  27. netfs_io_terminated_t end_io;
  28. void *private;
  29. refcount_t ref;
  30. };
  31. struct erofs_fscache_rq {
  32. struct address_space *mapping; /* The mapping being accessed */
  33. loff_t start; /* Start position */
  34. size_t len; /* Length of the request */
  35. size_t submitted; /* Length of submitted */
  36. short error; /* 0 or error that occurred */
  37. refcount_t ref;
  38. };
  39. static bool erofs_fscache_io_put(struct erofs_fscache_io *io)
  40. {
  41. if (!refcount_dec_and_test(&io->ref))
  42. return false;
  43. if (io->cres.ops)
  44. io->cres.ops->end_operation(&io->cres);
  45. kfree(io);
  46. return true;
  47. }
  48. static void erofs_fscache_req_complete(struct erofs_fscache_rq *req)
  49. {
  50. struct folio *folio;
  51. bool failed = req->error;
  52. pgoff_t start_page = req->start / PAGE_SIZE;
  53. pgoff_t last_page = ((req->start + req->len) / PAGE_SIZE) - 1;
  54. XA_STATE(xas, &req->mapping->i_pages, start_page);
  55. rcu_read_lock();
  56. xas_for_each(&xas, folio, last_page) {
  57. if (xas_retry(&xas, folio))
  58. continue;
  59. if (!failed)
  60. folio_mark_uptodate(folio);
  61. folio_unlock(folio);
  62. }
  63. rcu_read_unlock();
  64. }
  65. static void erofs_fscache_req_put(struct erofs_fscache_rq *req)
  66. {
  67. if (!refcount_dec_and_test(&req->ref))
  68. return;
  69. erofs_fscache_req_complete(req);
  70. kfree(req);
  71. }
  72. static struct erofs_fscache_rq *erofs_fscache_req_alloc(struct address_space *mapping,
  73. loff_t start, size_t len)
  74. {
  75. struct erofs_fscache_rq *req = kzalloc(sizeof(*req), GFP_KERNEL);
  76. if (!req)
  77. return NULL;
  78. req->mapping = mapping;
  79. req->start = start;
  80. req->len = len;
  81. refcount_set(&req->ref, 1);
  82. return req;
  83. }
  84. static void erofs_fscache_req_io_put(struct erofs_fscache_io *io)
  85. {
  86. struct erofs_fscache_rq *req = io->private;
  87. if (erofs_fscache_io_put(io))
  88. erofs_fscache_req_put(req);
  89. }
  90. static void erofs_fscache_req_end_io(void *priv,
  91. ssize_t transferred_or_error, bool was_async)
  92. {
  93. struct erofs_fscache_io *io = priv;
  94. struct erofs_fscache_rq *req = io->private;
  95. if (IS_ERR_VALUE(transferred_or_error))
  96. req->error = transferred_or_error;
  97. erofs_fscache_req_io_put(io);
  98. }
  99. static struct erofs_fscache_io *erofs_fscache_req_io_alloc(struct erofs_fscache_rq *req)
  100. {
  101. struct erofs_fscache_io *io = kzalloc(sizeof(*io), GFP_KERNEL);
  102. if (!io)
  103. return NULL;
  104. io->end_io = erofs_fscache_req_end_io;
  105. io->private = req;
  106. refcount_inc(&req->ref);
  107. refcount_set(&io->ref, 1);
  108. return io;
  109. }
  110. /*
  111. * Read data from fscache described by cookie at pstart physical address
  112. * offset, and fill the read data into buffer described by io->iter.
  113. */
  114. static int erofs_fscache_read_io_async(struct fscache_cookie *cookie,
  115. loff_t pstart, struct erofs_fscache_io *io)
  116. {
  117. enum netfs_io_source source;
  118. struct netfs_cache_resources *cres = &io->cres;
  119. struct iov_iter *iter = &io->iter;
  120. int ret;
  121. ret = fscache_begin_read_operation(cres, cookie);
  122. if (ret)
  123. return ret;
  124. while (iov_iter_count(iter)) {
  125. size_t orig_count = iov_iter_count(iter), len = orig_count;
  126. unsigned long flags = 1 << NETFS_SREQ_ONDEMAND;
  127. source = cres->ops->prepare_ondemand_read(cres,
  128. pstart, &len, LLONG_MAX, &flags, 0);
  129. if (WARN_ON(len == 0))
  130. source = NETFS_INVALID_READ;
  131. if (source != NETFS_READ_FROM_CACHE) {
  132. erofs_err(NULL, "prepare_ondemand_read failed (source %d)", source);
  133. return -EIO;
  134. }
  135. iov_iter_truncate(iter, len);
  136. refcount_inc(&io->ref);
  137. ret = fscache_read(cres, pstart, iter, NETFS_READ_HOLE_FAIL,
  138. io->end_io, io);
  139. if (ret == -EIOCBQUEUED)
  140. ret = 0;
  141. if (ret) {
  142. erofs_err(NULL, "fscache_read failed (ret %d)", ret);
  143. return ret;
  144. }
  145. if (WARN_ON(iov_iter_count(iter)))
  146. return -EIO;
  147. iov_iter_reexpand(iter, orig_count - len);
  148. pstart += len;
  149. }
  150. return 0;
  151. }
  152. struct erofs_fscache_bio {
  153. struct erofs_fscache_io io;
  154. struct bio bio; /* w/o bdev to share bio_add_page/endio() */
  155. struct bio_vec bvecs[BIO_MAX_VECS];
  156. };
  157. static void erofs_fscache_bio_endio(void *priv,
  158. ssize_t transferred_or_error, bool was_async)
  159. {
  160. struct erofs_fscache_bio *io = priv;
  161. if (IS_ERR_VALUE(transferred_or_error))
  162. io->bio.bi_status = errno_to_blk_status(transferred_or_error);
  163. io->bio.bi_end_io(&io->bio);
  164. BUILD_BUG_ON(offsetof(struct erofs_fscache_bio, io) != 0);
  165. erofs_fscache_io_put(&io->io);
  166. }
  167. struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev)
  168. {
  169. struct erofs_fscache_bio *io;
  170. io = kmalloc(sizeof(*io), GFP_KERNEL | __GFP_NOFAIL);
  171. bio_init(&io->bio, NULL, io->bvecs, BIO_MAX_VECS, REQ_OP_READ);
  172. io->io.private = mdev->m_dif->fscache->cookie;
  173. io->io.end_io = erofs_fscache_bio_endio;
  174. refcount_set(&io->io.ref, 1);
  175. return &io->bio;
  176. }
  177. void erofs_fscache_submit_bio(struct bio *bio)
  178. {
  179. struct erofs_fscache_bio *io = container_of(bio,
  180. struct erofs_fscache_bio, bio);
  181. int ret;
  182. iov_iter_bvec(&io->io.iter, ITER_DEST, io->bvecs, bio->bi_vcnt,
  183. bio->bi_iter.bi_size);
  184. ret = erofs_fscache_read_io_async(io->io.private,
  185. bio->bi_iter.bi_sector << 9, &io->io);
  186. erofs_fscache_io_put(&io->io);
  187. if (!ret)
  188. return;
  189. bio->bi_status = errno_to_blk_status(ret);
  190. bio->bi_end_io(bio);
  191. }
  192. static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
  193. {
  194. struct erofs_fscache *ctx = folio->mapping->host->i_private;
  195. int ret = -ENOMEM;
  196. struct erofs_fscache_rq *req;
  197. struct erofs_fscache_io *io;
  198. req = erofs_fscache_req_alloc(folio->mapping,
  199. folio_pos(folio), folio_size(folio));
  200. if (!req) {
  201. folio_unlock(folio);
  202. return ret;
  203. }
  204. io = erofs_fscache_req_io_alloc(req);
  205. if (!io) {
  206. req->error = ret;
  207. goto out;
  208. }
  209. iov_iter_xarray(&io->iter, ITER_DEST, &folio->mapping->i_pages,
  210. folio_pos(folio), folio_size(folio));
  211. ret = erofs_fscache_read_io_async(ctx->cookie, folio_pos(folio), io);
  212. if (ret)
  213. req->error = ret;
  214. erofs_fscache_req_io_put(io);
  215. out:
  216. erofs_fscache_req_put(req);
  217. return ret;
  218. }
  219. static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req)
  220. {
  221. struct address_space *mapping = req->mapping;
  222. struct inode *inode = mapping->host;
  223. struct super_block *sb = inode->i_sb;
  224. struct erofs_fscache_io *io;
  225. struct erofs_map_blocks map;
  226. struct erofs_map_dev mdev;
  227. loff_t pos = req->start + req->submitted;
  228. size_t count;
  229. int ret;
  230. map.m_la = pos;
  231. ret = erofs_map_blocks(inode, &map);
  232. if (ret)
  233. return ret;
  234. if (map.m_flags & EROFS_MAP_META) {
  235. struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
  236. struct iov_iter iter;
  237. size_t size = map.m_llen;
  238. void *src;
  239. src = erofs_read_metabuf(&buf, sb, map.m_pa, EROFS_KMAP);
  240. if (IS_ERR(src))
  241. return PTR_ERR(src);
  242. iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE);
  243. if (copy_to_iter(src, size, &iter) != size) {
  244. erofs_put_metabuf(&buf);
  245. return -EFAULT;
  246. }
  247. iov_iter_zero(PAGE_SIZE - size, &iter);
  248. erofs_put_metabuf(&buf);
  249. req->submitted += PAGE_SIZE;
  250. return 0;
  251. }
  252. count = req->len - req->submitted;
  253. if (!(map.m_flags & EROFS_MAP_MAPPED)) {
  254. struct iov_iter iter;
  255. iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, count);
  256. iov_iter_zero(count, &iter);
  257. req->submitted += count;
  258. return 0;
  259. }
  260. count = min_t(size_t, map.m_llen - (pos - map.m_la), count);
  261. DBG_BUGON(!count || count % PAGE_SIZE);
  262. mdev = (struct erofs_map_dev) {
  263. .m_deviceid = map.m_deviceid,
  264. .m_pa = map.m_pa,
  265. };
  266. ret = erofs_map_dev(sb, &mdev);
  267. if (ret)
  268. return ret;
  269. io = erofs_fscache_req_io_alloc(req);
  270. if (!io)
  271. return -ENOMEM;
  272. iov_iter_xarray(&io->iter, ITER_DEST, &mapping->i_pages, pos, count);
  273. ret = erofs_fscache_read_io_async(mdev.m_dif->fscache->cookie,
  274. mdev.m_pa + (pos - map.m_la), io);
  275. erofs_fscache_req_io_put(io);
  276. req->submitted += count;
  277. return ret;
  278. }
  279. static int erofs_fscache_data_read(struct erofs_fscache_rq *req)
  280. {
  281. int ret;
  282. do {
  283. ret = erofs_fscache_data_read_slice(req);
  284. if (ret)
  285. req->error = ret;
  286. } while (!ret && req->submitted < req->len);
  287. return ret;
  288. }
  289. static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
  290. {
  291. struct erofs_fscache_rq *req;
  292. int ret;
  293. req = erofs_fscache_req_alloc(folio->mapping,
  294. folio_pos(folio), folio_size(folio));
  295. if (!req) {
  296. folio_unlock(folio);
  297. return -ENOMEM;
  298. }
  299. ret = erofs_fscache_data_read(req);
  300. erofs_fscache_req_put(req);
  301. return ret;
  302. }
  303. static void erofs_fscache_readahead(struct readahead_control *rac)
  304. {
  305. struct erofs_fscache_rq *req;
  306. if (!readahead_count(rac))
  307. return;
  308. req = erofs_fscache_req_alloc(rac->mapping,
  309. readahead_pos(rac), readahead_length(rac));
  310. if (!req)
  311. return;
  312. /* The request completion will drop refs on the folios. */
  313. while (readahead_folio(rac))
  314. ;
  315. erofs_fscache_data_read(req);
  316. erofs_fscache_req_put(req);
  317. }
  318. static const struct address_space_operations erofs_fscache_meta_aops = {
  319. .read_folio = erofs_fscache_meta_read_folio,
  320. };
  321. const struct address_space_operations erofs_fscache_access_aops = {
  322. .read_folio = erofs_fscache_read_folio,
  323. .readahead = erofs_fscache_readahead,
  324. };
  325. static void erofs_fscache_domain_put(struct erofs_domain *domain)
  326. {
  327. mutex_lock(&erofs_domain_list_lock);
  328. if (refcount_dec_and_test(&domain->ref)) {
  329. list_del(&domain->list);
  330. if (list_empty(&erofs_domain_list)) {
  331. kern_unmount(erofs_pseudo_mnt);
  332. erofs_pseudo_mnt = NULL;
  333. }
  334. fscache_relinquish_volume(domain->volume, NULL, false);
  335. mutex_unlock(&erofs_domain_list_lock);
  336. kfree(domain->domain_id);
  337. kfree(domain);
  338. return;
  339. }
  340. mutex_unlock(&erofs_domain_list_lock);
  341. }
  342. static int erofs_fscache_register_volume(struct super_block *sb)
  343. {
  344. struct erofs_sb_info *sbi = EROFS_SB(sb);
  345. char *domain_id = sbi->domain_id;
  346. struct fscache_volume *volume;
  347. char *name;
  348. int ret = 0;
  349. name = kasprintf(GFP_KERNEL, "erofs,%s",
  350. domain_id ? domain_id : sbi->fsid);
  351. if (!name)
  352. return -ENOMEM;
  353. volume = fscache_acquire_volume(name, NULL, NULL, 0);
  354. if (IS_ERR_OR_NULL(volume)) {
  355. erofs_err(sb, "failed to register volume for %s", name);
  356. ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
  357. volume = NULL;
  358. }
  359. sbi->volume = volume;
  360. kfree(name);
  361. return ret;
  362. }
  363. static int erofs_fscache_init_domain(struct super_block *sb)
  364. {
  365. int err;
  366. struct erofs_domain *domain;
  367. struct erofs_sb_info *sbi = EROFS_SB(sb);
  368. domain = kzalloc(sizeof(struct erofs_domain), GFP_KERNEL);
  369. if (!domain)
  370. return -ENOMEM;
  371. domain->domain_id = kstrdup(sbi->domain_id, GFP_KERNEL);
  372. if (!domain->domain_id) {
  373. kfree(domain);
  374. return -ENOMEM;
  375. }
  376. err = erofs_fscache_register_volume(sb);
  377. if (err)
  378. goto out;
  379. if (!erofs_pseudo_mnt) {
  380. struct vfsmount *mnt = kern_mount(&erofs_anon_fs_type);
  381. if (IS_ERR(mnt)) {
  382. err = PTR_ERR(mnt);
  383. goto out;
  384. }
  385. erofs_pseudo_mnt = mnt;
  386. }
  387. domain->volume = sbi->volume;
  388. refcount_set(&domain->ref, 1);
  389. list_add(&domain->list, &erofs_domain_list);
  390. sbi->domain = domain;
  391. return 0;
  392. out:
  393. kfree(domain->domain_id);
  394. kfree(domain);
  395. return err;
  396. }
  397. static int erofs_fscache_register_domain(struct super_block *sb)
  398. {
  399. int err;
  400. struct erofs_domain *domain;
  401. struct erofs_sb_info *sbi = EROFS_SB(sb);
  402. mutex_lock(&erofs_domain_list_lock);
  403. list_for_each_entry(domain, &erofs_domain_list, list) {
  404. if (!strcmp(domain->domain_id, sbi->domain_id)) {
  405. sbi->domain = domain;
  406. sbi->volume = domain->volume;
  407. refcount_inc(&domain->ref);
  408. mutex_unlock(&erofs_domain_list_lock);
  409. return 0;
  410. }
  411. }
  412. err = erofs_fscache_init_domain(sb);
  413. mutex_unlock(&erofs_domain_list_lock);
  414. return err;
  415. }
  416. static struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb,
  417. char *name, unsigned int flags)
  418. {
  419. struct fscache_volume *volume = EROFS_SB(sb)->volume;
  420. struct erofs_fscache *ctx;
  421. struct fscache_cookie *cookie;
  422. struct super_block *isb;
  423. struct inode *inode;
  424. int ret;
  425. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  426. if (!ctx)
  427. return ERR_PTR(-ENOMEM);
  428. INIT_LIST_HEAD(&ctx->node);
  429. refcount_set(&ctx->ref, 1);
  430. cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
  431. name, strlen(name), NULL, 0, 0);
  432. if (!cookie) {
  433. erofs_err(sb, "failed to get cookie for %s", name);
  434. ret = -EINVAL;
  435. goto err;
  436. }
  437. fscache_use_cookie(cookie, false);
  438. /*
  439. * Allocate anonymous inode in global pseudo mount for shareable blobs,
  440. * so that they are accessible among erofs fs instances.
  441. */
  442. isb = flags & EROFS_REG_COOKIE_SHARE ? erofs_pseudo_mnt->mnt_sb : sb;
  443. inode = new_inode(isb);
  444. if (!inode) {
  445. erofs_err(sb, "failed to get anon inode for %s", name);
  446. ret = -ENOMEM;
  447. goto err_cookie;
  448. }
  449. inode->i_size = OFFSET_MAX;
  450. inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
  451. mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
  452. inode->i_blkbits = EROFS_SB(sb)->blkszbits;
  453. inode->i_private = ctx;
  454. ctx->cookie = cookie;
  455. ctx->inode = inode;
  456. return ctx;
  457. err_cookie:
  458. fscache_unuse_cookie(cookie, NULL, NULL);
  459. fscache_relinquish_cookie(cookie, false);
  460. err:
  461. kfree(ctx);
  462. return ERR_PTR(ret);
  463. }
  464. static void erofs_fscache_relinquish_cookie(struct erofs_fscache *ctx)
  465. {
  466. fscache_unuse_cookie(ctx->cookie, NULL, NULL);
  467. fscache_relinquish_cookie(ctx->cookie, false);
  468. iput(ctx->inode);
  469. kfree(ctx->name);
  470. kfree(ctx);
  471. }
  472. static struct erofs_fscache *erofs_domain_init_cookie(struct super_block *sb,
  473. char *name, unsigned int flags)
  474. {
  475. struct erofs_fscache *ctx;
  476. struct erofs_domain *domain = EROFS_SB(sb)->domain;
  477. ctx = erofs_fscache_acquire_cookie(sb, name, flags);
  478. if (IS_ERR(ctx))
  479. return ctx;
  480. ctx->name = kstrdup(name, GFP_KERNEL);
  481. if (!ctx->name) {
  482. erofs_fscache_relinquish_cookie(ctx);
  483. return ERR_PTR(-ENOMEM);
  484. }
  485. refcount_inc(&domain->ref);
  486. ctx->domain = domain;
  487. list_add(&ctx->node, &erofs_domain_cookies_list);
  488. return ctx;
  489. }
  490. static struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb,
  491. char *name, unsigned int flags)
  492. {
  493. struct erofs_fscache *ctx;
  494. struct erofs_domain *domain = EROFS_SB(sb)->domain;
  495. flags |= EROFS_REG_COOKIE_SHARE;
  496. mutex_lock(&erofs_domain_cookies_lock);
  497. list_for_each_entry(ctx, &erofs_domain_cookies_list, node) {
  498. if (ctx->domain != domain || strcmp(ctx->name, name))
  499. continue;
  500. if (!(flags & EROFS_REG_COOKIE_NEED_NOEXIST)) {
  501. refcount_inc(&ctx->ref);
  502. } else {
  503. erofs_err(sb, "%s already exists in domain %s", name,
  504. domain->domain_id);
  505. ctx = ERR_PTR(-EEXIST);
  506. }
  507. mutex_unlock(&erofs_domain_cookies_lock);
  508. return ctx;
  509. }
  510. ctx = erofs_domain_init_cookie(sb, name, flags);
  511. mutex_unlock(&erofs_domain_cookies_lock);
  512. return ctx;
  513. }
  514. struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
  515. char *name,
  516. unsigned int flags)
  517. {
  518. if (EROFS_SB(sb)->domain_id)
  519. return erofs_domain_register_cookie(sb, name, flags);
  520. return erofs_fscache_acquire_cookie(sb, name, flags);
  521. }
  522. void erofs_fscache_unregister_cookie(struct erofs_fscache *ctx)
  523. {
  524. struct erofs_domain *domain = NULL;
  525. if (!ctx)
  526. return;
  527. if (!ctx->domain)
  528. return erofs_fscache_relinquish_cookie(ctx);
  529. mutex_lock(&erofs_domain_cookies_lock);
  530. if (refcount_dec_and_test(&ctx->ref)) {
  531. domain = ctx->domain;
  532. list_del(&ctx->node);
  533. erofs_fscache_relinquish_cookie(ctx);
  534. }
  535. mutex_unlock(&erofs_domain_cookies_lock);
  536. if (domain)
  537. erofs_fscache_domain_put(domain);
  538. }
  539. int erofs_fscache_register_fs(struct super_block *sb)
  540. {
  541. int ret;
  542. struct erofs_sb_info *sbi = EROFS_SB(sb);
  543. struct erofs_fscache *fscache;
  544. unsigned int flags = 0;
  545. if (sbi->domain_id)
  546. ret = erofs_fscache_register_domain(sb);
  547. else
  548. ret = erofs_fscache_register_volume(sb);
  549. if (ret)
  550. return ret;
  551. /*
  552. * When shared domain is enabled, using NEED_NOEXIST to guarantee
  553. * the primary data blob (aka fsid) is unique in the shared domain.
  554. *
  555. * For non-shared-domain case, fscache_acquire_volume() invoked by
  556. * erofs_fscache_register_volume() has already guaranteed
  557. * the uniqueness of primary data blob.
  558. *
  559. * Acquired domain/volume will be relinquished in kill_sb() on error.
  560. */
  561. if (sbi->domain_id)
  562. flags |= EROFS_REG_COOKIE_NEED_NOEXIST;
  563. fscache = erofs_fscache_register_cookie(sb, sbi->fsid, flags);
  564. if (IS_ERR(fscache))
  565. return PTR_ERR(fscache);
  566. sbi->dif0.fscache = fscache;
  567. return 0;
  568. }
  569. void erofs_fscache_unregister_fs(struct super_block *sb)
  570. {
  571. struct erofs_sb_info *sbi = EROFS_SB(sb);
  572. erofs_fscache_unregister_cookie(sbi->dif0.fscache);
  573. if (sbi->domain)
  574. erofs_fscache_domain_put(sbi->domain);
  575. else
  576. fscache_relinquish_volume(sbi->volume, NULL, false);
  577. sbi->dif0.fscache = NULL;
  578. sbi->volume = NULL;
  579. sbi->domain = NULL;
  580. }