inode.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169
  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU GPL.
  5. See the file COPYING.
  6. */
  7. #include "fuse_i.h"
  8. #include <linux/pagemap.h>
  9. #include <linux/slab.h>
  10. #include <linux/file.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/init.h>
  13. #include <linux/module.h>
  14. #include <linux/moduleparam.h>
  15. #include <linux/fs_context.h>
  16. #include <linux/fs_parser.h>
  17. #include <linux/statfs.h>
  18. #include <linux/random.h>
  19. #include <linux/sched.h>
  20. #include <linux/exportfs.h>
  21. #include <linux/posix_acl.h>
  22. #include <linux/pid_namespace.h>
  23. #include <uapi/linux/magic.h>
  24. MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
  25. MODULE_DESCRIPTION("Filesystem in Userspace");
  26. MODULE_LICENSE("GPL");
  27. static struct kmem_cache *fuse_inode_cachep;
  28. struct list_head fuse_conn_list;
  29. DEFINE_MUTEX(fuse_mutex);
  30. static int set_global_limit(const char *val, const struct kernel_param *kp);
  31. unsigned max_user_bgreq;
  32. module_param_call(max_user_bgreq, set_global_limit, param_get_uint,
  33. &max_user_bgreq, 0644);
  34. __MODULE_PARM_TYPE(max_user_bgreq, "uint");
  35. MODULE_PARM_DESC(max_user_bgreq,
  36. "Global limit for the maximum number of backgrounded requests an "
  37. "unprivileged user can set");
  38. unsigned max_user_congthresh;
  39. module_param_call(max_user_congthresh, set_global_limit, param_get_uint,
  40. &max_user_congthresh, 0644);
  41. __MODULE_PARM_TYPE(max_user_congthresh, "uint");
  42. MODULE_PARM_DESC(max_user_congthresh,
  43. "Global limit for the maximum congestion threshold an "
  44. "unprivileged user can set");
  45. #define FUSE_DEFAULT_BLKSIZE 512
  46. /** Maximum number of outstanding background requests */
  47. #define FUSE_DEFAULT_MAX_BACKGROUND 12
  48. /** Congestion starts at 75% of maximum */
  49. #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4)
  50. #ifdef CONFIG_BLOCK
  51. static struct file_system_type fuseblk_fs_type;
  52. #endif
  53. struct fuse_forget_link *fuse_alloc_forget(void)
  54. {
  55. return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT);
  56. }
  57. static struct fuse_submount_lookup *fuse_alloc_submount_lookup(void)
  58. {
  59. struct fuse_submount_lookup *sl;
  60. sl = kzalloc(sizeof(struct fuse_submount_lookup), GFP_KERNEL_ACCOUNT);
  61. if (!sl)
  62. return NULL;
  63. sl->forget = fuse_alloc_forget();
  64. if (!sl->forget)
  65. goto out_free;
  66. return sl;
  67. out_free:
  68. kfree(sl);
  69. return NULL;
  70. }
  71. static struct inode *fuse_alloc_inode(struct super_block *sb)
  72. {
  73. struct fuse_inode *fi;
  74. fi = alloc_inode_sb(sb, fuse_inode_cachep, GFP_KERNEL);
  75. if (!fi)
  76. return NULL;
  77. fi->i_time = 0;
  78. fi->inval_mask = ~0;
  79. fi->nodeid = 0;
  80. fi->nlookup = 0;
  81. fi->attr_version = 0;
  82. fi->orig_ino = 0;
  83. fi->state = 0;
  84. fi->submount_lookup = NULL;
  85. mutex_init(&fi->mutex);
  86. spin_lock_init(&fi->lock);
  87. fi->forget = fuse_alloc_forget();
  88. if (!fi->forget)
  89. goto out_free;
  90. if (IS_ENABLED(CONFIG_FUSE_DAX) && !fuse_dax_inode_alloc(sb, fi))
  91. goto out_free_forget;
  92. if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
  93. fuse_inode_backing_set(fi, NULL);
  94. return &fi->inode;
  95. out_free_forget:
  96. kfree(fi->forget);
  97. out_free:
  98. kmem_cache_free(fuse_inode_cachep, fi);
  99. return NULL;
  100. }
  101. static void fuse_free_inode(struct inode *inode)
  102. {
  103. struct fuse_inode *fi = get_fuse_inode(inode);
  104. mutex_destroy(&fi->mutex);
  105. kfree(fi->forget);
  106. #ifdef CONFIG_FUSE_DAX
  107. kfree(fi->dax);
  108. #endif
  109. if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
  110. fuse_backing_put(fuse_inode_backing(fi));
  111. kmem_cache_free(fuse_inode_cachep, fi);
  112. }
  113. static void fuse_cleanup_submount_lookup(struct fuse_conn *fc,
  114. struct fuse_submount_lookup *sl)
  115. {
  116. if (!refcount_dec_and_test(&sl->count))
  117. return;
  118. fuse_queue_forget(fc, sl->forget, sl->nodeid, 1);
  119. sl->forget = NULL;
  120. kfree(sl);
  121. }
  122. static void fuse_evict_inode(struct inode *inode)
  123. {
  124. struct fuse_inode *fi = get_fuse_inode(inode);
  125. /* Will write inode on close/munmap and in all other dirtiers */
  126. WARN_ON(inode->i_state & I_DIRTY_INODE);
  127. truncate_inode_pages_final(&inode->i_data);
  128. clear_inode(inode);
  129. if (inode->i_sb->s_flags & SB_ACTIVE) {
  130. struct fuse_conn *fc = get_fuse_conn(inode);
  131. if (FUSE_IS_DAX(inode))
  132. fuse_dax_inode_cleanup(inode);
  133. if (fi->nlookup) {
  134. fuse_queue_forget(fc, fi->forget, fi->nodeid,
  135. fi->nlookup);
  136. fi->forget = NULL;
  137. }
  138. if (fi->submount_lookup) {
  139. fuse_cleanup_submount_lookup(fc, fi->submount_lookup);
  140. fi->submount_lookup = NULL;
  141. }
  142. }
  143. if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) {
  144. WARN_ON(fi->iocachectr != 0);
  145. WARN_ON(!list_empty(&fi->write_files));
  146. WARN_ON(!list_empty(&fi->queued_writes));
  147. }
  148. }
  149. static int fuse_reconfigure(struct fs_context *fsc)
  150. {
  151. struct super_block *sb = fsc->root->d_sb;
  152. sync_filesystem(sb);
  153. if (fsc->sb_flags & SB_MANDLOCK)
  154. return -EINVAL;
  155. return 0;
  156. }
  157. /*
  158. * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
  159. * so that it will fit.
  160. */
  161. static ino_t fuse_squash_ino(u64 ino64)
  162. {
  163. ino_t ino = (ino_t) ino64;
  164. if (sizeof(ino_t) < sizeof(u64))
  165. ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8;
  166. return ino;
  167. }
  168. void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
  169. struct fuse_statx *sx,
  170. u64 attr_valid, u32 cache_mask)
  171. {
  172. struct fuse_conn *fc = get_fuse_conn(inode);
  173. struct fuse_inode *fi = get_fuse_inode(inode);
  174. lockdep_assert_held(&fi->lock);
  175. fi->attr_version = atomic64_inc_return(&fc->attr_version);
  176. fi->i_time = attr_valid;
  177. /* Clear basic stats from invalid mask */
  178. set_mask_bits(&fi->inval_mask, STATX_BASIC_STATS, 0);
  179. inode->i_ino = fuse_squash_ino(attr->ino);
  180. inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
  181. set_nlink(inode, attr->nlink);
  182. inode->i_uid = make_kuid(fc->user_ns, attr->uid);
  183. inode->i_gid = make_kgid(fc->user_ns, attr->gid);
  184. inode->i_blocks = attr->blocks;
  185. /* Sanitize nsecs */
  186. attr->atimensec = min_t(u32, attr->atimensec, NSEC_PER_SEC - 1);
  187. attr->mtimensec = min_t(u32, attr->mtimensec, NSEC_PER_SEC - 1);
  188. attr->ctimensec = min_t(u32, attr->ctimensec, NSEC_PER_SEC - 1);
  189. inode_set_atime(inode, attr->atime, attr->atimensec);
  190. /* mtime from server may be stale due to local buffered write */
  191. if (!(cache_mask & STATX_MTIME)) {
  192. inode_set_mtime(inode, attr->mtime, attr->mtimensec);
  193. }
  194. if (!(cache_mask & STATX_CTIME)) {
  195. inode_set_ctime(inode, attr->ctime, attr->ctimensec);
  196. }
  197. if (sx) {
  198. /* Sanitize nsecs */
  199. sx->btime.tv_nsec =
  200. min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1);
  201. /*
  202. * Btime has been queried, cache is valid (whether or not btime
  203. * is available or not) so clear STATX_BTIME from inval_mask.
  204. *
  205. * Availability of the btime attribute is indicated in
  206. * FUSE_I_BTIME
  207. */
  208. set_mask_bits(&fi->inval_mask, STATX_BTIME, 0);
  209. if (sx->mask & STATX_BTIME) {
  210. set_bit(FUSE_I_BTIME, &fi->state);
  211. fi->i_btime.tv_sec = sx->btime.tv_sec;
  212. fi->i_btime.tv_nsec = sx->btime.tv_nsec;
  213. }
  214. }
  215. if (attr->blksize != 0)
  216. inode->i_blkbits = ilog2(attr->blksize);
  217. else
  218. inode->i_blkbits = inode->i_sb->s_blocksize_bits;
  219. /*
  220. * Don't set the sticky bit in i_mode, unless we want the VFS
  221. * to check permissions. This prevents failures due to the
  222. * check in may_delete().
  223. */
  224. fi->orig_i_mode = inode->i_mode;
  225. if (!fc->default_permissions)
  226. inode->i_mode &= ~S_ISVTX;
  227. fi->orig_ino = attr->ino;
  228. /*
  229. * We are refreshing inode data and it is possible that another
  230. * client set suid/sgid or security.capability xattr. So clear
  231. * S_NOSEC. Ideally, we could have cleared it only if suid/sgid
  232. * was set or if security.capability xattr was set. But we don't
  233. * know if security.capability has been set or not. So clear it
  234. * anyway. Its less efficient but should be safe.
  235. */
  236. inode->i_flags &= ~S_NOSEC;
  237. }
  238. u32 fuse_get_cache_mask(struct inode *inode)
  239. {
  240. struct fuse_conn *fc = get_fuse_conn(inode);
  241. if (!fc->writeback_cache || !S_ISREG(inode->i_mode))
  242. return 0;
  243. return STATX_MTIME | STATX_CTIME | STATX_SIZE;
  244. }
  245. void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
  246. struct fuse_statx *sx,
  247. u64 attr_valid, u64 attr_version)
  248. {
  249. struct fuse_conn *fc = get_fuse_conn(inode);
  250. struct fuse_inode *fi = get_fuse_inode(inode);
  251. u32 cache_mask;
  252. loff_t oldsize;
  253. struct timespec64 old_mtime;
  254. spin_lock(&fi->lock);
  255. /*
  256. * In case of writeback_cache enabled, writes update mtime, ctime and
  257. * may update i_size. In these cases trust the cached value in the
  258. * inode.
  259. */
  260. cache_mask = fuse_get_cache_mask(inode);
  261. if (cache_mask & STATX_SIZE)
  262. attr->size = i_size_read(inode);
  263. if (cache_mask & STATX_MTIME) {
  264. attr->mtime = inode_get_mtime_sec(inode);
  265. attr->mtimensec = inode_get_mtime_nsec(inode);
  266. }
  267. if (cache_mask & STATX_CTIME) {
  268. attr->ctime = inode_get_ctime_sec(inode);
  269. attr->ctimensec = inode_get_ctime_nsec(inode);
  270. }
  271. if ((attr_version != 0 && fi->attr_version > attr_version) ||
  272. test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
  273. spin_unlock(&fi->lock);
  274. return;
  275. }
  276. old_mtime = inode_get_mtime(inode);
  277. fuse_change_attributes_common(inode, attr, sx, attr_valid, cache_mask);
  278. oldsize = inode->i_size;
  279. /*
  280. * In case of writeback_cache enabled, the cached writes beyond EOF
  281. * extend local i_size without keeping userspace server in sync. So,
  282. * attr->size coming from server can be stale. We cannot trust it.
  283. */
  284. if (!(cache_mask & STATX_SIZE))
  285. i_size_write(inode, attr->size);
  286. spin_unlock(&fi->lock);
  287. if (!cache_mask && S_ISREG(inode->i_mode)) {
  288. bool inval = false;
  289. if (oldsize != attr->size) {
  290. truncate_pagecache(inode, attr->size);
  291. if (!fc->explicit_inval_data)
  292. inval = true;
  293. } else if (fc->auto_inval_data) {
  294. struct timespec64 new_mtime = {
  295. .tv_sec = attr->mtime,
  296. .tv_nsec = attr->mtimensec,
  297. };
  298. /*
  299. * Auto inval mode also checks and invalidates if mtime
  300. * has changed.
  301. */
  302. if (!timespec64_equal(&old_mtime, &new_mtime))
  303. inval = true;
  304. }
  305. if (inval)
  306. invalidate_inode_pages2(inode->i_mapping);
  307. }
  308. if (IS_ENABLED(CONFIG_FUSE_DAX))
  309. fuse_dax_dontcache(inode, attr->flags);
  310. }
  311. static void fuse_init_submount_lookup(struct fuse_submount_lookup *sl,
  312. u64 nodeid)
  313. {
  314. sl->nodeid = nodeid;
  315. refcount_set(&sl->count, 1);
  316. }
  317. static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr,
  318. struct fuse_conn *fc)
  319. {
  320. inode->i_mode = attr->mode & S_IFMT;
  321. inode->i_size = attr->size;
  322. inode_set_mtime(inode, attr->mtime, attr->mtimensec);
  323. inode_set_ctime(inode, attr->ctime, attr->ctimensec);
  324. if (S_ISREG(inode->i_mode)) {
  325. fuse_init_common(inode);
  326. fuse_init_file_inode(inode, attr->flags);
  327. } else if (S_ISDIR(inode->i_mode))
  328. fuse_init_dir(inode);
  329. else if (S_ISLNK(inode->i_mode))
  330. fuse_init_symlink(inode);
  331. else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
  332. S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
  333. fuse_init_common(inode);
  334. init_special_inode(inode, inode->i_mode,
  335. new_decode_dev(attr->rdev));
  336. } else
  337. BUG();
  338. /*
  339. * Ensure that we don't cache acls for daemons without FUSE_POSIX_ACL
  340. * so they see the exact same behavior as before.
  341. */
  342. if (!fc->posix_acl)
  343. inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
  344. }
  345. static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
  346. {
  347. u64 nodeid = *(u64 *) _nodeidp;
  348. if (get_node_id(inode) == nodeid)
  349. return 1;
  350. else
  351. return 0;
  352. }
  353. static int fuse_inode_set(struct inode *inode, void *_nodeidp)
  354. {
  355. u64 nodeid = *(u64 *) _nodeidp;
  356. get_fuse_inode(inode)->nodeid = nodeid;
  357. return 0;
  358. }
  359. struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
  360. int generation, struct fuse_attr *attr,
  361. u64 attr_valid, u64 attr_version)
  362. {
  363. struct inode *inode;
  364. struct fuse_inode *fi;
  365. struct fuse_conn *fc = get_fuse_conn_super(sb);
  366. /*
  367. * Auto mount points get their node id from the submount root, which is
  368. * not a unique identifier within this filesystem.
  369. *
  370. * To avoid conflicts, do not place submount points into the inode hash
  371. * table.
  372. */
  373. if (fc->auto_submounts && (attr->flags & FUSE_ATTR_SUBMOUNT) &&
  374. S_ISDIR(attr->mode)) {
  375. struct fuse_inode *fi;
  376. inode = new_inode(sb);
  377. if (!inode)
  378. return NULL;
  379. fuse_init_inode(inode, attr, fc);
  380. fi = get_fuse_inode(inode);
  381. fi->nodeid = nodeid;
  382. fi->submount_lookup = fuse_alloc_submount_lookup();
  383. if (!fi->submount_lookup) {
  384. iput(inode);
  385. return NULL;
  386. }
  387. /* Sets nlookup = 1 on fi->submount_lookup->nlookup */
  388. fuse_init_submount_lookup(fi->submount_lookup, nodeid);
  389. inode->i_flags |= S_AUTOMOUNT;
  390. goto done;
  391. }
  392. retry:
  393. inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid);
  394. if (!inode)
  395. return NULL;
  396. if ((inode->i_state & I_NEW)) {
  397. inode->i_flags |= S_NOATIME;
  398. if (!fc->writeback_cache || !S_ISREG(attr->mode))
  399. inode->i_flags |= S_NOCMTIME;
  400. inode->i_generation = generation;
  401. fuse_init_inode(inode, attr, fc);
  402. unlock_new_inode(inode);
  403. } else if (fuse_stale_inode(inode, generation, attr)) {
  404. /* nodeid was reused, any I/O on the old inode should fail */
  405. fuse_make_bad(inode);
  406. if (inode != d_inode(sb->s_root)) {
  407. remove_inode_hash(inode);
  408. iput(inode);
  409. goto retry;
  410. }
  411. }
  412. fi = get_fuse_inode(inode);
  413. spin_lock(&fi->lock);
  414. fi->nlookup++;
  415. spin_unlock(&fi->lock);
  416. done:
  417. fuse_change_attributes(inode, attr, NULL, attr_valid, attr_version);
  418. return inode;
  419. }
  420. struct inode *fuse_ilookup(struct fuse_conn *fc, u64 nodeid,
  421. struct fuse_mount **fm)
  422. {
  423. struct fuse_mount *fm_iter;
  424. struct inode *inode;
  425. WARN_ON(!rwsem_is_locked(&fc->killsb));
  426. list_for_each_entry(fm_iter, &fc->mounts, fc_entry) {
  427. if (!fm_iter->sb)
  428. continue;
  429. inode = ilookup5(fm_iter->sb, nodeid, fuse_inode_eq, &nodeid);
  430. if (inode) {
  431. if (fm)
  432. *fm = fm_iter;
  433. return inode;
  434. }
  435. }
  436. return NULL;
  437. }
  438. int fuse_reverse_inval_inode(struct fuse_conn *fc, u64 nodeid,
  439. loff_t offset, loff_t len)
  440. {
  441. struct fuse_inode *fi;
  442. struct inode *inode;
  443. pgoff_t pg_start;
  444. pgoff_t pg_end;
  445. inode = fuse_ilookup(fc, nodeid, NULL);
  446. if (!inode)
  447. return -ENOENT;
  448. fi = get_fuse_inode(inode);
  449. spin_lock(&fi->lock);
  450. fi->attr_version = atomic64_inc_return(&fc->attr_version);
  451. spin_unlock(&fi->lock);
  452. fuse_invalidate_attr(inode);
  453. forget_all_cached_acls(inode);
  454. if (offset >= 0) {
  455. pg_start = offset >> PAGE_SHIFT;
  456. if (len <= 0)
  457. pg_end = -1;
  458. else
  459. pg_end = (offset + len - 1) >> PAGE_SHIFT;
  460. invalidate_inode_pages2_range(inode->i_mapping,
  461. pg_start, pg_end);
  462. }
  463. iput(inode);
  464. return 0;
  465. }
  466. bool fuse_lock_inode(struct inode *inode)
  467. {
  468. bool locked = false;
  469. if (!get_fuse_conn(inode)->parallel_dirops) {
  470. mutex_lock(&get_fuse_inode(inode)->mutex);
  471. locked = true;
  472. }
  473. return locked;
  474. }
  475. void fuse_unlock_inode(struct inode *inode, bool locked)
  476. {
  477. if (locked)
  478. mutex_unlock(&get_fuse_inode(inode)->mutex);
  479. }
  480. static void fuse_umount_begin(struct super_block *sb)
  481. {
  482. struct fuse_conn *fc = get_fuse_conn_super(sb);
  483. if (fc->no_force_umount)
  484. return;
  485. fuse_abort_conn(fc);
  486. // Only retire block-device-based superblocks.
  487. if (sb->s_bdev != NULL)
  488. retire_super(sb);
  489. }
  490. static void fuse_send_destroy(struct fuse_mount *fm)
  491. {
  492. if (fm->fc->conn_init) {
  493. FUSE_ARGS(args);
  494. args.opcode = FUSE_DESTROY;
  495. args.force = true;
  496. args.nocreds = true;
  497. fuse_simple_request(fm, &args);
  498. }
  499. }
  500. static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
  501. {
  502. stbuf->f_type = FUSE_SUPER_MAGIC;
  503. stbuf->f_bsize = attr->bsize;
  504. stbuf->f_frsize = attr->frsize;
  505. stbuf->f_blocks = attr->blocks;
  506. stbuf->f_bfree = attr->bfree;
  507. stbuf->f_bavail = attr->bavail;
  508. stbuf->f_files = attr->files;
  509. stbuf->f_ffree = attr->ffree;
  510. stbuf->f_namelen = attr->namelen;
  511. /* fsid is left zero */
  512. }
  513. static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
  514. {
  515. struct super_block *sb = dentry->d_sb;
  516. struct fuse_mount *fm = get_fuse_mount_super(sb);
  517. FUSE_ARGS(args);
  518. struct fuse_statfs_out outarg;
  519. int err;
  520. if (!fuse_allow_current_process(fm->fc)) {
  521. buf->f_type = FUSE_SUPER_MAGIC;
  522. return 0;
  523. }
  524. memset(&outarg, 0, sizeof(outarg));
  525. args.in_numargs = 0;
  526. args.opcode = FUSE_STATFS;
  527. args.nodeid = get_node_id(d_inode(dentry));
  528. args.out_numargs = 1;
  529. args.out_args[0].size = sizeof(outarg);
  530. args.out_args[0].value = &outarg;
  531. err = fuse_simple_request(fm, &args);
  532. if (!err)
  533. convert_fuse_statfs(buf, &outarg.st);
  534. return err;
  535. }
  536. static struct fuse_sync_bucket *fuse_sync_bucket_alloc(void)
  537. {
  538. struct fuse_sync_bucket *bucket;
  539. bucket = kzalloc(sizeof(*bucket), GFP_KERNEL | __GFP_NOFAIL);
  540. if (bucket) {
  541. init_waitqueue_head(&bucket->waitq);
  542. /* Initial active count */
  543. atomic_set(&bucket->count, 1);
  544. }
  545. return bucket;
  546. }
  547. static void fuse_sync_fs_writes(struct fuse_conn *fc)
  548. {
  549. struct fuse_sync_bucket *bucket, *new_bucket;
  550. int count;
  551. new_bucket = fuse_sync_bucket_alloc();
  552. spin_lock(&fc->lock);
  553. bucket = rcu_dereference_protected(fc->curr_bucket, 1);
  554. count = atomic_read(&bucket->count);
  555. WARN_ON(count < 1);
  556. /* No outstanding writes? */
  557. if (count == 1) {
  558. spin_unlock(&fc->lock);
  559. kfree(new_bucket);
  560. return;
  561. }
  562. /*
  563. * Completion of new bucket depends on completion of this bucket, so add
  564. * one more count.
  565. */
  566. atomic_inc(&new_bucket->count);
  567. rcu_assign_pointer(fc->curr_bucket, new_bucket);
  568. spin_unlock(&fc->lock);
  569. /*
  570. * Drop initial active count. At this point if all writes in this and
  571. * ancestor buckets complete, the count will go to zero and this task
  572. * will be woken up.
  573. */
  574. atomic_dec(&bucket->count);
  575. wait_event(bucket->waitq, atomic_read(&bucket->count) == 0);
  576. /* Drop temp count on descendant bucket */
  577. fuse_sync_bucket_dec(new_bucket);
  578. kfree_rcu(bucket, rcu);
  579. }
  580. static int fuse_sync_fs(struct super_block *sb, int wait)
  581. {
  582. struct fuse_mount *fm = get_fuse_mount_super(sb);
  583. struct fuse_conn *fc = fm->fc;
  584. struct fuse_syncfs_in inarg;
  585. FUSE_ARGS(args);
  586. int err;
  587. /*
  588. * Userspace cannot handle the wait == 0 case. Avoid a
  589. * gratuitous roundtrip.
  590. */
  591. if (!wait)
  592. return 0;
  593. /* The filesystem is being unmounted. Nothing to do. */
  594. if (!sb->s_root)
  595. return 0;
  596. if (!fc->sync_fs)
  597. return 0;
  598. fuse_sync_fs_writes(fc);
  599. memset(&inarg, 0, sizeof(inarg));
  600. args.in_numargs = 1;
  601. args.in_args[0].size = sizeof(inarg);
  602. args.in_args[0].value = &inarg;
  603. args.opcode = FUSE_SYNCFS;
  604. args.nodeid = get_node_id(sb->s_root->d_inode);
  605. args.out_numargs = 0;
  606. err = fuse_simple_request(fm, &args);
  607. if (err == -ENOSYS) {
  608. fc->sync_fs = 0;
  609. err = 0;
  610. }
  611. return err;
  612. }
  613. enum {
  614. OPT_SOURCE,
  615. OPT_SUBTYPE,
  616. OPT_FD,
  617. OPT_ROOTMODE,
  618. OPT_USER_ID,
  619. OPT_GROUP_ID,
  620. OPT_DEFAULT_PERMISSIONS,
  621. OPT_ALLOW_OTHER,
  622. OPT_MAX_READ,
  623. OPT_BLKSIZE,
  624. OPT_ERR
  625. };
  626. static const struct fs_parameter_spec fuse_fs_parameters[] = {
  627. fsparam_string ("source", OPT_SOURCE),
  628. fsparam_u32 ("fd", OPT_FD),
  629. fsparam_u32oct ("rootmode", OPT_ROOTMODE),
  630. fsparam_uid ("user_id", OPT_USER_ID),
  631. fsparam_gid ("group_id", OPT_GROUP_ID),
  632. fsparam_flag ("default_permissions", OPT_DEFAULT_PERMISSIONS),
  633. fsparam_flag ("allow_other", OPT_ALLOW_OTHER),
  634. fsparam_u32 ("max_read", OPT_MAX_READ),
  635. fsparam_u32 ("blksize", OPT_BLKSIZE),
  636. fsparam_string ("subtype", OPT_SUBTYPE),
  637. {}
  638. };
  639. static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param)
  640. {
  641. struct fs_parse_result result;
  642. struct fuse_fs_context *ctx = fsc->fs_private;
  643. int opt;
  644. kuid_t kuid;
  645. kgid_t kgid;
  646. if (fsc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
  647. /*
  648. * Ignore options coming from mount(MS_REMOUNT) for backward
  649. * compatibility.
  650. */
  651. if (fsc->oldapi)
  652. return 0;
  653. return invalfc(fsc, "No changes allowed in reconfigure");
  654. }
  655. opt = fs_parse(fsc, fuse_fs_parameters, param, &result);
  656. if (opt < 0)
  657. return opt;
  658. switch (opt) {
  659. case OPT_SOURCE:
  660. if (fsc->source)
  661. return invalfc(fsc, "Multiple sources specified");
  662. fsc->source = param->string;
  663. param->string = NULL;
  664. break;
  665. case OPT_SUBTYPE:
  666. if (ctx->subtype)
  667. return invalfc(fsc, "Multiple subtypes specified");
  668. ctx->subtype = param->string;
  669. param->string = NULL;
  670. return 0;
  671. case OPT_FD:
  672. ctx->fd = result.uint_32;
  673. ctx->fd_present = true;
  674. break;
  675. case OPT_ROOTMODE:
  676. if (!fuse_valid_type(result.uint_32))
  677. return invalfc(fsc, "Invalid rootmode");
  678. ctx->rootmode = result.uint_32;
  679. ctx->rootmode_present = true;
  680. break;
  681. case OPT_USER_ID:
  682. kuid = result.uid;
  683. /*
  684. * The requested uid must be representable in the
  685. * filesystem's idmapping.
  686. */
  687. if (!kuid_has_mapping(fsc->user_ns, kuid))
  688. return invalfc(fsc, "Invalid user_id");
  689. ctx->user_id = kuid;
  690. ctx->user_id_present = true;
  691. break;
  692. case OPT_GROUP_ID:
  693. kgid = result.gid;
  694. /*
  695. * The requested gid must be representable in the
  696. * filesystem's idmapping.
  697. */
  698. if (!kgid_has_mapping(fsc->user_ns, kgid))
  699. return invalfc(fsc, "Invalid group_id");
  700. ctx->group_id = kgid;
  701. ctx->group_id_present = true;
  702. break;
  703. case OPT_DEFAULT_PERMISSIONS:
  704. ctx->default_permissions = true;
  705. break;
  706. case OPT_ALLOW_OTHER:
  707. ctx->allow_other = true;
  708. break;
  709. case OPT_MAX_READ:
  710. ctx->max_read = result.uint_32;
  711. break;
  712. case OPT_BLKSIZE:
  713. if (!ctx->is_bdev)
  714. return invalfc(fsc, "blksize only supported for fuseblk");
  715. ctx->blksize = result.uint_32;
  716. break;
  717. default:
  718. return -EINVAL;
  719. }
  720. return 0;
  721. }
  722. static void fuse_free_fsc(struct fs_context *fsc)
  723. {
  724. struct fuse_fs_context *ctx = fsc->fs_private;
  725. if (ctx) {
  726. kfree(ctx->subtype);
  727. kfree(ctx);
  728. }
  729. }
  730. static int fuse_show_options(struct seq_file *m, struct dentry *root)
  731. {
  732. struct super_block *sb = root->d_sb;
  733. struct fuse_conn *fc = get_fuse_conn_super(sb);
  734. if (fc->legacy_opts_show) {
  735. seq_printf(m, ",user_id=%u",
  736. from_kuid_munged(fc->user_ns, fc->user_id));
  737. seq_printf(m, ",group_id=%u",
  738. from_kgid_munged(fc->user_ns, fc->group_id));
  739. if (fc->default_permissions)
  740. seq_puts(m, ",default_permissions");
  741. if (fc->allow_other)
  742. seq_puts(m, ",allow_other");
  743. if (fc->max_read != ~0)
  744. seq_printf(m, ",max_read=%u", fc->max_read);
  745. if (sb->s_bdev && sb->s_blocksize != FUSE_DEFAULT_BLKSIZE)
  746. seq_printf(m, ",blksize=%lu", sb->s_blocksize);
  747. }
  748. #ifdef CONFIG_FUSE_DAX
  749. if (fc->dax_mode == FUSE_DAX_ALWAYS)
  750. seq_puts(m, ",dax=always");
  751. else if (fc->dax_mode == FUSE_DAX_NEVER)
  752. seq_puts(m, ",dax=never");
  753. else if (fc->dax_mode == FUSE_DAX_INODE_USER)
  754. seq_puts(m, ",dax=inode");
  755. #endif
  756. return 0;
  757. }
  758. static void fuse_iqueue_init(struct fuse_iqueue *fiq,
  759. const struct fuse_iqueue_ops *ops,
  760. void *priv)
  761. {
  762. memset(fiq, 0, sizeof(struct fuse_iqueue));
  763. spin_lock_init(&fiq->lock);
  764. init_waitqueue_head(&fiq->waitq);
  765. INIT_LIST_HEAD(&fiq->pending);
  766. INIT_LIST_HEAD(&fiq->interrupts);
  767. fiq->forget_list_tail = &fiq->forget_list_head;
  768. fiq->connected = 1;
  769. fiq->ops = ops;
  770. fiq->priv = priv;
  771. }
  772. static void fuse_pqueue_init(struct fuse_pqueue *fpq)
  773. {
  774. unsigned int i;
  775. spin_lock_init(&fpq->lock);
  776. for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
  777. INIT_LIST_HEAD(&fpq->processing[i]);
  778. INIT_LIST_HEAD(&fpq->io);
  779. fpq->connected = 1;
  780. }
  781. void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
  782. struct user_namespace *user_ns,
  783. const struct fuse_iqueue_ops *fiq_ops, void *fiq_priv)
  784. {
  785. memset(fc, 0, sizeof(*fc));
  786. spin_lock_init(&fc->lock);
  787. spin_lock_init(&fc->bg_lock);
  788. init_rwsem(&fc->killsb);
  789. refcount_set(&fc->count, 1);
  790. atomic_set(&fc->dev_count, 1);
  791. init_waitqueue_head(&fc->blocked_waitq);
  792. fuse_iqueue_init(&fc->iq, fiq_ops, fiq_priv);
  793. INIT_LIST_HEAD(&fc->bg_queue);
  794. INIT_LIST_HEAD(&fc->entry);
  795. INIT_LIST_HEAD(&fc->devices);
  796. atomic_set(&fc->num_waiting, 0);
  797. fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND;
  798. fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD;
  799. atomic64_set(&fc->khctr, 0);
  800. fc->polled_files = RB_ROOT;
  801. fc->blocked = 0;
  802. fc->initialized = 0;
  803. fc->connected = 1;
  804. atomic64_set(&fc->attr_version, 1);
  805. get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
  806. fc->pid_ns = get_pid_ns(task_active_pid_ns(current));
  807. fc->user_ns = get_user_ns(user_ns);
  808. fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
  809. fc->max_pages_limit = FUSE_MAX_MAX_PAGES;
  810. if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
  811. fuse_backing_files_init(fc);
  812. INIT_LIST_HEAD(&fc->mounts);
  813. list_add(&fm->fc_entry, &fc->mounts);
  814. fm->fc = fc;
  815. }
  816. EXPORT_SYMBOL_GPL(fuse_conn_init);
  817. static void delayed_release(struct rcu_head *p)
  818. {
  819. struct fuse_conn *fc = container_of(p, struct fuse_conn, rcu);
  820. put_user_ns(fc->user_ns);
  821. fc->release(fc);
  822. }
  823. void fuse_conn_put(struct fuse_conn *fc)
  824. {
  825. if (refcount_dec_and_test(&fc->count)) {
  826. struct fuse_iqueue *fiq = &fc->iq;
  827. struct fuse_sync_bucket *bucket;
  828. if (IS_ENABLED(CONFIG_FUSE_DAX))
  829. fuse_dax_conn_free(fc);
  830. if (fiq->ops->release)
  831. fiq->ops->release(fiq);
  832. put_pid_ns(fc->pid_ns);
  833. bucket = rcu_dereference_protected(fc->curr_bucket, 1);
  834. if (bucket) {
  835. WARN_ON(atomic_read(&bucket->count) != 1);
  836. kfree(bucket);
  837. }
  838. if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
  839. fuse_backing_files_free(fc);
  840. call_rcu(&fc->rcu, delayed_release);
  841. }
  842. }
  843. EXPORT_SYMBOL_GPL(fuse_conn_put);
  844. struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
  845. {
  846. refcount_inc(&fc->count);
  847. return fc;
  848. }
  849. EXPORT_SYMBOL_GPL(fuse_conn_get);
  850. static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode)
  851. {
  852. struct fuse_attr attr;
  853. memset(&attr, 0, sizeof(attr));
  854. attr.mode = mode;
  855. attr.ino = FUSE_ROOT_ID;
  856. attr.nlink = 1;
  857. return fuse_iget(sb, FUSE_ROOT_ID, 0, &attr, 0, 0);
  858. }
  859. struct fuse_inode_handle {
  860. u64 nodeid;
  861. u32 generation;
  862. };
  863. static struct dentry *fuse_get_dentry(struct super_block *sb,
  864. struct fuse_inode_handle *handle)
  865. {
  866. struct fuse_conn *fc = get_fuse_conn_super(sb);
  867. struct inode *inode;
  868. struct dentry *entry;
  869. int err = -ESTALE;
  870. if (handle->nodeid == 0)
  871. goto out_err;
  872. inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid);
  873. if (!inode) {
  874. struct fuse_entry_out outarg;
  875. const struct qstr name = QSTR_INIT(".", 1);
  876. if (!fc->export_support)
  877. goto out_err;
  878. err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg,
  879. &inode);
  880. if (err && err != -ENOENT)
  881. goto out_err;
  882. if (err || !inode) {
  883. err = -ESTALE;
  884. goto out_err;
  885. }
  886. err = -EIO;
  887. if (get_node_id(inode) != handle->nodeid)
  888. goto out_iput;
  889. }
  890. err = -ESTALE;
  891. if (inode->i_generation != handle->generation)
  892. goto out_iput;
  893. entry = d_obtain_alias(inode);
  894. if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID)
  895. fuse_invalidate_entry_cache(entry);
  896. return entry;
  897. out_iput:
  898. iput(inode);
  899. out_err:
  900. return ERR_PTR(err);
  901. }
  902. static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len,
  903. struct inode *parent)
  904. {
  905. int len = parent ? 6 : 3;
  906. u64 nodeid;
  907. u32 generation;
  908. if (*max_len < len) {
  909. *max_len = len;
  910. return FILEID_INVALID;
  911. }
  912. nodeid = get_fuse_inode(inode)->nodeid;
  913. generation = inode->i_generation;
  914. fh[0] = (u32)(nodeid >> 32);
  915. fh[1] = (u32)(nodeid & 0xffffffff);
  916. fh[2] = generation;
  917. if (parent) {
  918. nodeid = get_fuse_inode(parent)->nodeid;
  919. generation = parent->i_generation;
  920. fh[3] = (u32)(nodeid >> 32);
  921. fh[4] = (u32)(nodeid & 0xffffffff);
  922. fh[5] = generation;
  923. }
  924. *max_len = len;
  925. return parent ? FILEID_INO64_GEN_PARENT : FILEID_INO64_GEN;
  926. }
  927. static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
  928. struct fid *fid, int fh_len, int fh_type)
  929. {
  930. struct fuse_inode_handle handle;
  931. if ((fh_type != FILEID_INO64_GEN &&
  932. fh_type != FILEID_INO64_GEN_PARENT) || fh_len < 3)
  933. return NULL;
  934. handle.nodeid = (u64) fid->raw[0] << 32;
  935. handle.nodeid |= (u64) fid->raw[1];
  936. handle.generation = fid->raw[2];
  937. return fuse_get_dentry(sb, &handle);
  938. }
  939. static struct dentry *fuse_fh_to_parent(struct super_block *sb,
  940. struct fid *fid, int fh_len, int fh_type)
  941. {
  942. struct fuse_inode_handle parent;
  943. if (fh_type != FILEID_INO64_GEN_PARENT || fh_len < 6)
  944. return NULL;
  945. parent.nodeid = (u64) fid->raw[3] << 32;
  946. parent.nodeid |= (u64) fid->raw[4];
  947. parent.generation = fid->raw[5];
  948. return fuse_get_dentry(sb, &parent);
  949. }
  950. static struct dentry *fuse_get_parent(struct dentry *child)
  951. {
  952. struct inode *child_inode = d_inode(child);
  953. struct fuse_conn *fc = get_fuse_conn(child_inode);
  954. struct inode *inode;
  955. struct dentry *parent;
  956. struct fuse_entry_out outarg;
  957. int err;
  958. if (!fc->export_support)
  959. return ERR_PTR(-ESTALE);
  960. err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode),
  961. &dotdot_name, &outarg, &inode);
  962. if (err) {
  963. if (err == -ENOENT)
  964. return ERR_PTR(-ESTALE);
  965. return ERR_PTR(err);
  966. }
  967. parent = d_obtain_alias(inode);
  968. if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID)
  969. fuse_invalidate_entry_cache(parent);
  970. return parent;
  971. }
  972. /* only for fid encoding; no support for file handle */
  973. static const struct export_operations fuse_export_fid_operations = {
  974. .encode_fh = fuse_encode_fh,
  975. };
  976. static const struct export_operations fuse_export_operations = {
  977. .fh_to_dentry = fuse_fh_to_dentry,
  978. .fh_to_parent = fuse_fh_to_parent,
  979. .encode_fh = fuse_encode_fh,
  980. .get_parent = fuse_get_parent,
  981. };
  982. static const struct super_operations fuse_super_operations = {
  983. .alloc_inode = fuse_alloc_inode,
  984. .free_inode = fuse_free_inode,
  985. .evict_inode = fuse_evict_inode,
  986. .write_inode = fuse_write_inode,
  987. .drop_inode = generic_delete_inode,
  988. .umount_begin = fuse_umount_begin,
  989. .statfs = fuse_statfs,
  990. .sync_fs = fuse_sync_fs,
  991. .show_options = fuse_show_options,
  992. };
  993. static void sanitize_global_limit(unsigned *limit)
  994. {
  995. /*
  996. * The default maximum number of async requests is calculated to consume
  997. * 1/2^13 of the total memory, assuming 392 bytes per request.
  998. */
  999. if (*limit == 0)
  1000. *limit = ((totalram_pages() << PAGE_SHIFT) >> 13) / 392;
  1001. if (*limit >= 1 << 16)
  1002. *limit = (1 << 16) - 1;
  1003. }
  1004. static int set_global_limit(const char *val, const struct kernel_param *kp)
  1005. {
  1006. int rv;
  1007. rv = param_set_uint(val, kp);
  1008. if (rv)
  1009. return rv;
  1010. sanitize_global_limit((unsigned *)kp->arg);
  1011. return 0;
  1012. }
  1013. static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
  1014. {
  1015. int cap_sys_admin = capable(CAP_SYS_ADMIN);
  1016. if (arg->minor < 13)
  1017. return;
  1018. sanitize_global_limit(&max_user_bgreq);
  1019. sanitize_global_limit(&max_user_congthresh);
  1020. spin_lock(&fc->bg_lock);
  1021. if (arg->max_background) {
  1022. fc->max_background = arg->max_background;
  1023. if (!cap_sys_admin && fc->max_background > max_user_bgreq)
  1024. fc->max_background = max_user_bgreq;
  1025. }
  1026. if (arg->congestion_threshold) {
  1027. fc->congestion_threshold = arg->congestion_threshold;
  1028. if (!cap_sys_admin &&
  1029. fc->congestion_threshold > max_user_congthresh)
  1030. fc->congestion_threshold = max_user_congthresh;
  1031. }
  1032. spin_unlock(&fc->bg_lock);
  1033. }
  1034. struct fuse_init_args {
  1035. struct fuse_args args;
  1036. struct fuse_init_in in;
  1037. struct fuse_init_out out;
  1038. };
  1039. static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
  1040. int error)
  1041. {
  1042. struct fuse_conn *fc = fm->fc;
  1043. struct fuse_init_args *ia = container_of(args, typeof(*ia), args);
  1044. struct fuse_init_out *arg = &ia->out;
  1045. bool ok = true;
  1046. if (error || arg->major != FUSE_KERNEL_VERSION)
  1047. ok = false;
  1048. else {
  1049. unsigned long ra_pages;
  1050. process_init_limits(fc, arg);
  1051. if (arg->minor >= 6) {
  1052. u64 flags = arg->flags;
  1053. if (flags & FUSE_INIT_EXT)
  1054. flags |= (u64) arg->flags2 << 32;
  1055. ra_pages = arg->max_readahead / PAGE_SIZE;
  1056. if (flags & FUSE_ASYNC_READ)
  1057. fc->async_read = 1;
  1058. if (!(flags & FUSE_POSIX_LOCKS))
  1059. fc->no_lock = 1;
  1060. if (arg->minor >= 17) {
  1061. if (!(flags & FUSE_FLOCK_LOCKS))
  1062. fc->no_flock = 1;
  1063. } else {
  1064. if (!(flags & FUSE_POSIX_LOCKS))
  1065. fc->no_flock = 1;
  1066. }
  1067. if (flags & FUSE_ATOMIC_O_TRUNC)
  1068. fc->atomic_o_trunc = 1;
  1069. if (arg->minor >= 9) {
  1070. /* LOOKUP has dependency on proto version */
  1071. if (flags & FUSE_EXPORT_SUPPORT)
  1072. fc->export_support = 1;
  1073. }
  1074. if (flags & FUSE_BIG_WRITES)
  1075. fc->big_writes = 1;
  1076. if (flags & FUSE_DONT_MASK)
  1077. fc->dont_mask = 1;
  1078. if (flags & FUSE_AUTO_INVAL_DATA)
  1079. fc->auto_inval_data = 1;
  1080. else if (flags & FUSE_EXPLICIT_INVAL_DATA)
  1081. fc->explicit_inval_data = 1;
  1082. if (flags & FUSE_DO_READDIRPLUS) {
  1083. fc->do_readdirplus = 1;
  1084. if (flags & FUSE_READDIRPLUS_AUTO)
  1085. fc->readdirplus_auto = 1;
  1086. }
  1087. if (flags & FUSE_ASYNC_DIO)
  1088. fc->async_dio = 1;
  1089. if (flags & FUSE_WRITEBACK_CACHE)
  1090. fc->writeback_cache = 1;
  1091. if (flags & FUSE_PARALLEL_DIROPS)
  1092. fc->parallel_dirops = 1;
  1093. if (flags & FUSE_HANDLE_KILLPRIV)
  1094. fc->handle_killpriv = 1;
  1095. if (arg->time_gran && arg->time_gran <= 1000000000)
  1096. fm->sb->s_time_gran = arg->time_gran;
  1097. if ((flags & FUSE_POSIX_ACL)) {
  1098. fc->default_permissions = 1;
  1099. fc->posix_acl = 1;
  1100. }
  1101. if (flags & FUSE_CACHE_SYMLINKS)
  1102. fc->cache_symlinks = 1;
  1103. if (flags & FUSE_ABORT_ERROR)
  1104. fc->abort_err = 1;
  1105. if (flags & FUSE_MAX_PAGES) {
  1106. fc->max_pages =
  1107. min_t(unsigned int, fc->max_pages_limit,
  1108. max_t(unsigned int, arg->max_pages, 1));
  1109. }
  1110. if (IS_ENABLED(CONFIG_FUSE_DAX)) {
  1111. if (flags & FUSE_MAP_ALIGNMENT &&
  1112. !fuse_dax_check_alignment(fc, arg->map_alignment)) {
  1113. ok = false;
  1114. }
  1115. if (flags & FUSE_HAS_INODE_DAX)
  1116. fc->inode_dax = 1;
  1117. }
  1118. if (flags & FUSE_HANDLE_KILLPRIV_V2) {
  1119. fc->handle_killpriv_v2 = 1;
  1120. fm->sb->s_flags |= SB_NOSEC;
  1121. }
  1122. if (flags & FUSE_SETXATTR_EXT)
  1123. fc->setxattr_ext = 1;
  1124. if (flags & FUSE_SECURITY_CTX)
  1125. fc->init_security = 1;
  1126. if (flags & FUSE_CREATE_SUPP_GROUP)
  1127. fc->create_supp_group = 1;
  1128. if (flags & FUSE_DIRECT_IO_ALLOW_MMAP)
  1129. fc->direct_io_allow_mmap = 1;
  1130. /*
  1131. * max_stack_depth is the max stack depth of FUSE fs,
  1132. * so it has to be at least 1 to support passthrough
  1133. * to backing files.
  1134. *
  1135. * with max_stack_depth > 1, the backing files can be
  1136. * on a stacked fs (e.g. overlayfs) themselves and with
  1137. * max_stack_depth == 1, FUSE fs can be stacked as the
  1138. * underlying fs of a stacked fs (e.g. overlayfs).
  1139. *
  1140. * Also don't allow the combination of FUSE_PASSTHROUGH
  1141. * and FUSE_WRITEBACK_CACHE, current design doesn't handle
  1142. * them together.
  1143. */
  1144. if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH) &&
  1145. (flags & FUSE_PASSTHROUGH) &&
  1146. arg->max_stack_depth > 0 &&
  1147. arg->max_stack_depth <= FILESYSTEM_MAX_STACK_DEPTH &&
  1148. !(flags & FUSE_WRITEBACK_CACHE)) {
  1149. fc->passthrough = 1;
  1150. fc->max_stack_depth = arg->max_stack_depth;
  1151. fm->sb->s_stack_depth = arg->max_stack_depth;
  1152. }
  1153. if (flags & FUSE_NO_EXPORT_SUPPORT)
  1154. fm->sb->s_export_op = &fuse_export_fid_operations;
  1155. if (flags & FUSE_ALLOW_IDMAP) {
  1156. if (fc->default_permissions)
  1157. fm->sb->s_iflags &= ~SB_I_NOIDMAP;
  1158. else
  1159. ok = false;
  1160. }
  1161. } else {
  1162. ra_pages = fc->max_read / PAGE_SIZE;
  1163. fc->no_lock = 1;
  1164. fc->no_flock = 1;
  1165. }
  1166. fm->sb->s_bdi->ra_pages =
  1167. min(fm->sb->s_bdi->ra_pages, ra_pages);
  1168. fc->minor = arg->minor;
  1169. fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
  1170. fc->max_write = max_t(unsigned, 4096, fc->max_write);
  1171. fc->conn_init = 1;
  1172. }
  1173. kfree(ia);
  1174. if (!ok) {
  1175. fc->conn_init = 0;
  1176. fc->conn_error = 1;
  1177. }
  1178. fuse_set_initialized(fc);
  1179. wake_up_all(&fc->blocked_waitq);
  1180. }
  1181. void fuse_send_init(struct fuse_mount *fm)
  1182. {
  1183. struct fuse_init_args *ia;
  1184. u64 flags;
  1185. ia = kzalloc(sizeof(*ia), GFP_KERNEL | __GFP_NOFAIL);
  1186. ia->in.major = FUSE_KERNEL_VERSION;
  1187. ia->in.minor = FUSE_KERNEL_MINOR_VERSION;
  1188. ia->in.max_readahead = fm->sb->s_bdi->ra_pages * PAGE_SIZE;
  1189. flags =
  1190. FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
  1191. FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
  1192. FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
  1193. FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
  1194. FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
  1195. FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
  1196. FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL |
  1197. FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS |
  1198. FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA |
  1199. FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
  1200. FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP |
  1201. FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP |
  1202. FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP;
  1203. #ifdef CONFIG_FUSE_DAX
  1204. if (fm->fc->dax)
  1205. flags |= FUSE_MAP_ALIGNMENT;
  1206. if (fuse_is_inode_dax_mode(fm->fc->dax_mode))
  1207. flags |= FUSE_HAS_INODE_DAX;
  1208. #endif
  1209. if (fm->fc->auto_submounts)
  1210. flags |= FUSE_SUBMOUNTS;
  1211. if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
  1212. flags |= FUSE_PASSTHROUGH;
  1213. ia->in.flags = flags;
  1214. ia->in.flags2 = flags >> 32;
  1215. ia->args.opcode = FUSE_INIT;
  1216. ia->args.in_numargs = 1;
  1217. ia->args.in_args[0].size = sizeof(ia->in);
  1218. ia->args.in_args[0].value = &ia->in;
  1219. ia->args.out_numargs = 1;
  1220. /* Variable length argument used for backward compatibility
  1221. with interface version < 7.5. Rest of init_out is zeroed
  1222. by do_get_request(), so a short reply is not a problem */
  1223. ia->args.out_argvar = true;
  1224. ia->args.out_args[0].size = sizeof(ia->out);
  1225. ia->args.out_args[0].value = &ia->out;
  1226. ia->args.force = true;
  1227. ia->args.nocreds = true;
  1228. ia->args.end = process_init_reply;
  1229. if (fuse_simple_background(fm, &ia->args, GFP_KERNEL) != 0)
  1230. process_init_reply(fm, &ia->args, -ENOTCONN);
  1231. }
  1232. EXPORT_SYMBOL_GPL(fuse_send_init);
  1233. void fuse_free_conn(struct fuse_conn *fc)
  1234. {
  1235. WARN_ON(!list_empty(&fc->devices));
  1236. kfree(fc);
  1237. }
  1238. EXPORT_SYMBOL_GPL(fuse_free_conn);
  1239. static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
  1240. {
  1241. int err;
  1242. char *suffix = "";
  1243. if (sb->s_bdev) {
  1244. suffix = "-fuseblk";
  1245. /*
  1246. * sb->s_bdi points to blkdev's bdi however we want to redirect
  1247. * it to our private bdi...
  1248. */
  1249. bdi_put(sb->s_bdi);
  1250. sb->s_bdi = &noop_backing_dev_info;
  1251. }
  1252. err = super_setup_bdi_name(sb, "%u:%u%s", MAJOR(fc->dev),
  1253. MINOR(fc->dev), suffix);
  1254. if (err)
  1255. return err;
  1256. /* fuse does it's own writeback accounting */
  1257. sb->s_bdi->capabilities &= ~BDI_CAP_WRITEBACK_ACCT;
  1258. sb->s_bdi->capabilities |= BDI_CAP_STRICTLIMIT;
  1259. /*
  1260. * For a single fuse filesystem use max 1% of dirty +
  1261. * writeback threshold.
  1262. *
  1263. * This gives about 1M of write buffer for memory maps on a
  1264. * machine with 1G and 10% dirty_ratio, which should be more
  1265. * than enough.
  1266. *
  1267. * Privileged users can raise it by writing to
  1268. *
  1269. * /sys/class/bdi/<bdi>/max_ratio
  1270. */
  1271. bdi_set_max_ratio(sb->s_bdi, 1);
  1272. return 0;
  1273. }
  1274. struct fuse_dev *fuse_dev_alloc(void)
  1275. {
  1276. struct fuse_dev *fud;
  1277. struct list_head *pq;
  1278. fud = kzalloc(sizeof(struct fuse_dev), GFP_KERNEL);
  1279. if (!fud)
  1280. return NULL;
  1281. pq = kcalloc(FUSE_PQ_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL);
  1282. if (!pq) {
  1283. kfree(fud);
  1284. return NULL;
  1285. }
  1286. fud->pq.processing = pq;
  1287. fuse_pqueue_init(&fud->pq);
  1288. return fud;
  1289. }
  1290. EXPORT_SYMBOL_GPL(fuse_dev_alloc);
  1291. void fuse_dev_install(struct fuse_dev *fud, struct fuse_conn *fc)
  1292. {
  1293. fud->fc = fuse_conn_get(fc);
  1294. spin_lock(&fc->lock);
  1295. list_add_tail(&fud->entry, &fc->devices);
  1296. spin_unlock(&fc->lock);
  1297. }
  1298. EXPORT_SYMBOL_GPL(fuse_dev_install);
  1299. struct fuse_dev *fuse_dev_alloc_install(struct fuse_conn *fc)
  1300. {
  1301. struct fuse_dev *fud;
  1302. fud = fuse_dev_alloc();
  1303. if (!fud)
  1304. return NULL;
  1305. fuse_dev_install(fud, fc);
  1306. return fud;
  1307. }
  1308. EXPORT_SYMBOL_GPL(fuse_dev_alloc_install);
  1309. void fuse_dev_free(struct fuse_dev *fud)
  1310. {
  1311. struct fuse_conn *fc = fud->fc;
  1312. if (fc) {
  1313. spin_lock(&fc->lock);
  1314. list_del(&fud->entry);
  1315. spin_unlock(&fc->lock);
  1316. fuse_conn_put(fc);
  1317. }
  1318. kfree(fud->pq.processing);
  1319. kfree(fud);
  1320. }
  1321. EXPORT_SYMBOL_GPL(fuse_dev_free);
  1322. static void fuse_fill_attr_from_inode(struct fuse_attr *attr,
  1323. const struct fuse_inode *fi)
  1324. {
  1325. struct timespec64 atime = inode_get_atime(&fi->inode);
  1326. struct timespec64 mtime = inode_get_mtime(&fi->inode);
  1327. struct timespec64 ctime = inode_get_ctime(&fi->inode);
  1328. *attr = (struct fuse_attr){
  1329. .ino = fi->inode.i_ino,
  1330. .size = fi->inode.i_size,
  1331. .blocks = fi->inode.i_blocks,
  1332. .atime = atime.tv_sec,
  1333. .mtime = mtime.tv_sec,
  1334. .ctime = ctime.tv_sec,
  1335. .atimensec = atime.tv_nsec,
  1336. .mtimensec = mtime.tv_nsec,
  1337. .ctimensec = ctime.tv_nsec,
  1338. .mode = fi->inode.i_mode,
  1339. .nlink = fi->inode.i_nlink,
  1340. .uid = __kuid_val(fi->inode.i_uid),
  1341. .gid = __kgid_val(fi->inode.i_gid),
  1342. .rdev = fi->inode.i_rdev,
  1343. .blksize = 1u << fi->inode.i_blkbits,
  1344. };
  1345. }
  1346. static void fuse_sb_defaults(struct super_block *sb)
  1347. {
  1348. sb->s_magic = FUSE_SUPER_MAGIC;
  1349. sb->s_op = &fuse_super_operations;
  1350. sb->s_xattr = fuse_xattr_handlers;
  1351. sb->s_maxbytes = MAX_LFS_FILESIZE;
  1352. sb->s_time_gran = 1;
  1353. sb->s_export_op = &fuse_export_operations;
  1354. sb->s_iflags |= SB_I_IMA_UNVERIFIABLE_SIGNATURE;
  1355. sb->s_iflags |= SB_I_NOIDMAP;
  1356. if (sb->s_user_ns != &init_user_ns)
  1357. sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER;
  1358. sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
  1359. }
  1360. static int fuse_fill_super_submount(struct super_block *sb,
  1361. struct fuse_inode *parent_fi)
  1362. {
  1363. struct fuse_mount *fm = get_fuse_mount_super(sb);
  1364. struct super_block *parent_sb = parent_fi->inode.i_sb;
  1365. struct fuse_attr root_attr;
  1366. struct inode *root;
  1367. struct fuse_submount_lookup *sl;
  1368. struct fuse_inode *fi;
  1369. fuse_sb_defaults(sb);
  1370. fm->sb = sb;
  1371. WARN_ON(sb->s_bdi != &noop_backing_dev_info);
  1372. sb->s_bdi = bdi_get(parent_sb->s_bdi);
  1373. sb->s_xattr = parent_sb->s_xattr;
  1374. sb->s_export_op = parent_sb->s_export_op;
  1375. sb->s_time_gran = parent_sb->s_time_gran;
  1376. sb->s_blocksize = parent_sb->s_blocksize;
  1377. sb->s_blocksize_bits = parent_sb->s_blocksize_bits;
  1378. sb->s_subtype = kstrdup(parent_sb->s_subtype, GFP_KERNEL);
  1379. if (parent_sb->s_subtype && !sb->s_subtype)
  1380. return -ENOMEM;
  1381. fuse_fill_attr_from_inode(&root_attr, parent_fi);
  1382. root = fuse_iget(sb, parent_fi->nodeid, 0, &root_attr, 0, 0);
  1383. /*
  1384. * This inode is just a duplicate, so it is not looked up and
  1385. * its nlookup should not be incremented. fuse_iget() does
  1386. * that, though, so undo it here.
  1387. */
  1388. fi = get_fuse_inode(root);
  1389. fi->nlookup--;
  1390. sb->s_d_op = &fuse_dentry_operations;
  1391. sb->s_root = d_make_root(root);
  1392. if (!sb->s_root)
  1393. return -ENOMEM;
  1394. /*
  1395. * Grab the parent's submount_lookup pointer and take a
  1396. * reference on the shared nlookup from the parent. This is to
  1397. * prevent the last forget for this nodeid from getting
  1398. * triggered until all users have finished with it.
  1399. */
  1400. sl = parent_fi->submount_lookup;
  1401. WARN_ON(!sl);
  1402. if (sl) {
  1403. refcount_inc(&sl->count);
  1404. fi->submount_lookup = sl;
  1405. }
  1406. return 0;
  1407. }
  1408. /* Filesystem context private data holds the FUSE inode of the mount point */
  1409. static int fuse_get_tree_submount(struct fs_context *fsc)
  1410. {
  1411. struct fuse_mount *fm;
  1412. struct fuse_inode *mp_fi = fsc->fs_private;
  1413. struct fuse_conn *fc = get_fuse_conn(&mp_fi->inode);
  1414. struct super_block *sb;
  1415. int err;
  1416. fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
  1417. if (!fm)
  1418. return -ENOMEM;
  1419. fm->fc = fuse_conn_get(fc);
  1420. fsc->s_fs_info = fm;
  1421. sb = sget_fc(fsc, NULL, set_anon_super_fc);
  1422. if (fsc->s_fs_info)
  1423. fuse_mount_destroy(fm);
  1424. if (IS_ERR(sb))
  1425. return PTR_ERR(sb);
  1426. /* Initialize superblock, making @mp_fi its root */
  1427. err = fuse_fill_super_submount(sb, mp_fi);
  1428. if (err) {
  1429. deactivate_locked_super(sb);
  1430. return err;
  1431. }
  1432. down_write(&fc->killsb);
  1433. list_add_tail(&fm->fc_entry, &fc->mounts);
  1434. up_write(&fc->killsb);
  1435. sb->s_flags |= SB_ACTIVE;
  1436. fsc->root = dget(sb->s_root);
  1437. return 0;
  1438. }
  1439. static const struct fs_context_operations fuse_context_submount_ops = {
  1440. .get_tree = fuse_get_tree_submount,
  1441. };
  1442. int fuse_init_fs_context_submount(struct fs_context *fsc)
  1443. {
  1444. fsc->ops = &fuse_context_submount_ops;
  1445. return 0;
  1446. }
  1447. EXPORT_SYMBOL_GPL(fuse_init_fs_context_submount);
  1448. int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
  1449. {
  1450. struct fuse_dev *fud = NULL;
  1451. struct fuse_mount *fm = get_fuse_mount_super(sb);
  1452. struct fuse_conn *fc = fm->fc;
  1453. struct inode *root;
  1454. struct dentry *root_dentry;
  1455. int err;
  1456. err = -EINVAL;
  1457. if (sb->s_flags & SB_MANDLOCK)
  1458. goto err;
  1459. rcu_assign_pointer(fc->curr_bucket, fuse_sync_bucket_alloc());
  1460. fuse_sb_defaults(sb);
  1461. if (ctx->is_bdev) {
  1462. #ifdef CONFIG_BLOCK
  1463. err = -EINVAL;
  1464. if (!sb_set_blocksize(sb, ctx->blksize))
  1465. goto err;
  1466. #endif
  1467. } else {
  1468. sb->s_blocksize = PAGE_SIZE;
  1469. sb->s_blocksize_bits = PAGE_SHIFT;
  1470. }
  1471. sb->s_subtype = ctx->subtype;
  1472. ctx->subtype = NULL;
  1473. if (IS_ENABLED(CONFIG_FUSE_DAX)) {
  1474. err = fuse_dax_conn_alloc(fc, ctx->dax_mode, ctx->dax_dev);
  1475. if (err)
  1476. goto err;
  1477. }
  1478. if (ctx->fudptr) {
  1479. err = -ENOMEM;
  1480. fud = fuse_dev_alloc_install(fc);
  1481. if (!fud)
  1482. goto err_free_dax;
  1483. }
  1484. fc->dev = sb->s_dev;
  1485. fm->sb = sb;
  1486. err = fuse_bdi_init(fc, sb);
  1487. if (err)
  1488. goto err_dev_free;
  1489. /* Handle umasking inside the fuse code */
  1490. if (sb->s_flags & SB_POSIXACL)
  1491. fc->dont_mask = 1;
  1492. sb->s_flags |= SB_POSIXACL;
  1493. fc->default_permissions = ctx->default_permissions;
  1494. fc->allow_other = ctx->allow_other;
  1495. fc->user_id = ctx->user_id;
  1496. fc->group_id = ctx->group_id;
  1497. fc->legacy_opts_show = ctx->legacy_opts_show;
  1498. fc->max_read = max_t(unsigned int, 4096, ctx->max_read);
  1499. fc->destroy = ctx->destroy;
  1500. fc->no_control = ctx->no_control;
  1501. fc->no_force_umount = ctx->no_force_umount;
  1502. err = -ENOMEM;
  1503. root = fuse_get_root_inode(sb, ctx->rootmode);
  1504. sb->s_d_op = &fuse_root_dentry_operations;
  1505. root_dentry = d_make_root(root);
  1506. if (!root_dentry)
  1507. goto err_dev_free;
  1508. /* Root dentry doesn't have .d_revalidate */
  1509. sb->s_d_op = &fuse_dentry_operations;
  1510. mutex_lock(&fuse_mutex);
  1511. err = -EINVAL;
  1512. if (ctx->fudptr && *ctx->fudptr)
  1513. goto err_unlock;
  1514. err = fuse_ctl_add_conn(fc);
  1515. if (err)
  1516. goto err_unlock;
  1517. list_add_tail(&fc->entry, &fuse_conn_list);
  1518. sb->s_root = root_dentry;
  1519. if (ctx->fudptr)
  1520. *ctx->fudptr = fud;
  1521. mutex_unlock(&fuse_mutex);
  1522. return 0;
  1523. err_unlock:
  1524. mutex_unlock(&fuse_mutex);
  1525. dput(root_dentry);
  1526. err_dev_free:
  1527. if (fud)
  1528. fuse_dev_free(fud);
  1529. err_free_dax:
  1530. if (IS_ENABLED(CONFIG_FUSE_DAX))
  1531. fuse_dax_conn_free(fc);
  1532. err:
  1533. return err;
  1534. }
  1535. EXPORT_SYMBOL_GPL(fuse_fill_super_common);
  1536. static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
  1537. {
  1538. struct fuse_fs_context *ctx = fsc->fs_private;
  1539. int err;
  1540. if (!ctx->file || !ctx->rootmode_present ||
  1541. !ctx->user_id_present || !ctx->group_id_present)
  1542. return -EINVAL;
  1543. /*
  1544. * Require mount to happen from the same user namespace which
  1545. * opened /dev/fuse to prevent potential attacks.
  1546. */
  1547. if ((ctx->file->f_op != &fuse_dev_operations) ||
  1548. (ctx->file->f_cred->user_ns != sb->s_user_ns))
  1549. return -EINVAL;
  1550. ctx->fudptr = &ctx->file->private_data;
  1551. err = fuse_fill_super_common(sb, ctx);
  1552. if (err)
  1553. return err;
  1554. /* file->private_data shall be visible on all CPUs after this */
  1555. smp_mb();
  1556. fuse_send_init(get_fuse_mount_super(sb));
  1557. return 0;
  1558. }
  1559. /*
  1560. * This is the path where user supplied an already initialized fuse dev. In
  1561. * this case never create a new super if the old one is gone.
  1562. */
  1563. static int fuse_set_no_super(struct super_block *sb, struct fs_context *fsc)
  1564. {
  1565. return -ENOTCONN;
  1566. }
  1567. static int fuse_test_super(struct super_block *sb, struct fs_context *fsc)
  1568. {
  1569. return fsc->sget_key == get_fuse_conn_super(sb);
  1570. }
  1571. static int fuse_get_tree(struct fs_context *fsc)
  1572. {
  1573. struct fuse_fs_context *ctx = fsc->fs_private;
  1574. struct fuse_dev *fud;
  1575. struct fuse_conn *fc;
  1576. struct fuse_mount *fm;
  1577. struct super_block *sb;
  1578. int err;
  1579. fc = kmalloc(sizeof(*fc), GFP_KERNEL);
  1580. if (!fc)
  1581. return -ENOMEM;
  1582. fm = kzalloc(sizeof(*fm), GFP_KERNEL);
  1583. if (!fm) {
  1584. kfree(fc);
  1585. return -ENOMEM;
  1586. }
  1587. fuse_conn_init(fc, fm, fsc->user_ns, &fuse_dev_fiq_ops, NULL);
  1588. fc->release = fuse_free_conn;
  1589. fsc->s_fs_info = fm;
  1590. if (ctx->fd_present)
  1591. ctx->file = fget(ctx->fd);
  1592. if (IS_ENABLED(CONFIG_BLOCK) && ctx->is_bdev) {
  1593. err = get_tree_bdev(fsc, fuse_fill_super);
  1594. goto out;
  1595. }
  1596. /*
  1597. * While block dev mount can be initialized with a dummy device fd
  1598. * (found by device name), normal fuse mounts can't
  1599. */
  1600. err = -EINVAL;
  1601. if (!ctx->file)
  1602. goto out;
  1603. /*
  1604. * Allow creating a fuse mount with an already initialized fuse
  1605. * connection
  1606. */
  1607. fud = READ_ONCE(ctx->file->private_data);
  1608. if (ctx->file->f_op == &fuse_dev_operations && fud) {
  1609. fsc->sget_key = fud->fc;
  1610. sb = sget_fc(fsc, fuse_test_super, fuse_set_no_super);
  1611. err = PTR_ERR_OR_ZERO(sb);
  1612. if (!IS_ERR(sb))
  1613. fsc->root = dget(sb->s_root);
  1614. } else {
  1615. err = get_tree_nodev(fsc, fuse_fill_super);
  1616. }
  1617. out:
  1618. if (fsc->s_fs_info)
  1619. fuse_mount_destroy(fm);
  1620. if (ctx->file)
  1621. fput(ctx->file);
  1622. return err;
  1623. }
  1624. static const struct fs_context_operations fuse_context_ops = {
  1625. .free = fuse_free_fsc,
  1626. .parse_param = fuse_parse_param,
  1627. .reconfigure = fuse_reconfigure,
  1628. .get_tree = fuse_get_tree,
  1629. };
  1630. /*
  1631. * Set up the filesystem mount context.
  1632. */
  1633. static int fuse_init_fs_context(struct fs_context *fsc)
  1634. {
  1635. struct fuse_fs_context *ctx;
  1636. ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
  1637. if (!ctx)
  1638. return -ENOMEM;
  1639. ctx->max_read = ~0;
  1640. ctx->blksize = FUSE_DEFAULT_BLKSIZE;
  1641. ctx->legacy_opts_show = true;
  1642. #ifdef CONFIG_BLOCK
  1643. if (fsc->fs_type == &fuseblk_fs_type) {
  1644. ctx->is_bdev = true;
  1645. ctx->destroy = true;
  1646. }
  1647. #endif
  1648. fsc->fs_private = ctx;
  1649. fsc->ops = &fuse_context_ops;
  1650. return 0;
  1651. }
  1652. bool fuse_mount_remove(struct fuse_mount *fm)
  1653. {
  1654. struct fuse_conn *fc = fm->fc;
  1655. bool last = false;
  1656. down_write(&fc->killsb);
  1657. list_del_init(&fm->fc_entry);
  1658. if (list_empty(&fc->mounts))
  1659. last = true;
  1660. up_write(&fc->killsb);
  1661. return last;
  1662. }
  1663. EXPORT_SYMBOL_GPL(fuse_mount_remove);
  1664. void fuse_conn_destroy(struct fuse_mount *fm)
  1665. {
  1666. struct fuse_conn *fc = fm->fc;
  1667. if (fc->destroy)
  1668. fuse_send_destroy(fm);
  1669. fuse_abort_conn(fc);
  1670. fuse_wait_aborted(fc);
  1671. if (!list_empty(&fc->entry)) {
  1672. mutex_lock(&fuse_mutex);
  1673. list_del(&fc->entry);
  1674. fuse_ctl_remove_conn(fc);
  1675. mutex_unlock(&fuse_mutex);
  1676. }
  1677. }
  1678. EXPORT_SYMBOL_GPL(fuse_conn_destroy);
  1679. static void fuse_sb_destroy(struct super_block *sb)
  1680. {
  1681. struct fuse_mount *fm = get_fuse_mount_super(sb);
  1682. bool last;
  1683. if (sb->s_root) {
  1684. last = fuse_mount_remove(fm);
  1685. if (last)
  1686. fuse_conn_destroy(fm);
  1687. }
  1688. }
  1689. void fuse_mount_destroy(struct fuse_mount *fm)
  1690. {
  1691. fuse_conn_put(fm->fc);
  1692. kfree_rcu(fm, rcu);
  1693. }
  1694. EXPORT_SYMBOL(fuse_mount_destroy);
  1695. static void fuse_kill_sb_anon(struct super_block *sb)
  1696. {
  1697. fuse_sb_destroy(sb);
  1698. kill_anon_super(sb);
  1699. fuse_mount_destroy(get_fuse_mount_super(sb));
  1700. }
  1701. static struct file_system_type fuse_fs_type = {
  1702. .owner = THIS_MODULE,
  1703. .name = "fuse",
  1704. .fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
  1705. .init_fs_context = fuse_init_fs_context,
  1706. .parameters = fuse_fs_parameters,
  1707. .kill_sb = fuse_kill_sb_anon,
  1708. };
  1709. MODULE_ALIAS_FS("fuse");
  1710. #ifdef CONFIG_BLOCK
  1711. static void fuse_kill_sb_blk(struct super_block *sb)
  1712. {
  1713. fuse_sb_destroy(sb);
  1714. kill_block_super(sb);
  1715. fuse_mount_destroy(get_fuse_mount_super(sb));
  1716. }
  1717. static struct file_system_type fuseblk_fs_type = {
  1718. .owner = THIS_MODULE,
  1719. .name = "fuseblk",
  1720. .init_fs_context = fuse_init_fs_context,
  1721. .parameters = fuse_fs_parameters,
  1722. .kill_sb = fuse_kill_sb_blk,
  1723. .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE | FS_ALLOW_IDMAP,
  1724. };
  1725. MODULE_ALIAS_FS("fuseblk");
  1726. static inline int register_fuseblk(void)
  1727. {
  1728. return register_filesystem(&fuseblk_fs_type);
  1729. }
  1730. static inline void unregister_fuseblk(void)
  1731. {
  1732. unregister_filesystem(&fuseblk_fs_type);
  1733. }
  1734. #else
  1735. static inline int register_fuseblk(void)
  1736. {
  1737. return 0;
  1738. }
  1739. static inline void unregister_fuseblk(void)
  1740. {
  1741. }
  1742. #endif
  1743. static void fuse_inode_init_once(void *foo)
  1744. {
  1745. struct inode *inode = foo;
  1746. inode_init_once(inode);
  1747. }
  1748. static int __init fuse_fs_init(void)
  1749. {
  1750. int err;
  1751. fuse_inode_cachep = kmem_cache_create("fuse_inode",
  1752. sizeof(struct fuse_inode), 0,
  1753. SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT,
  1754. fuse_inode_init_once);
  1755. err = -ENOMEM;
  1756. if (!fuse_inode_cachep)
  1757. goto out;
  1758. err = register_fuseblk();
  1759. if (err)
  1760. goto out2;
  1761. err = register_filesystem(&fuse_fs_type);
  1762. if (err)
  1763. goto out3;
  1764. return 0;
  1765. out3:
  1766. unregister_fuseblk();
  1767. out2:
  1768. kmem_cache_destroy(fuse_inode_cachep);
  1769. out:
  1770. return err;
  1771. }
  1772. static void fuse_fs_cleanup(void)
  1773. {
  1774. unregister_filesystem(&fuse_fs_type);
  1775. unregister_fuseblk();
  1776. /*
  1777. * Make sure all delayed rcu free inodes are flushed before we
  1778. * destroy cache.
  1779. */
  1780. rcu_barrier();
  1781. kmem_cache_destroy(fuse_inode_cachep);
  1782. }
  1783. static struct kobject *fuse_kobj;
  1784. static int fuse_sysfs_init(void)
  1785. {
  1786. int err;
  1787. fuse_kobj = kobject_create_and_add("fuse", fs_kobj);
  1788. if (!fuse_kobj) {
  1789. err = -ENOMEM;
  1790. goto out_err;
  1791. }
  1792. err = sysfs_create_mount_point(fuse_kobj, "connections");
  1793. if (err)
  1794. goto out_fuse_unregister;
  1795. return 0;
  1796. out_fuse_unregister:
  1797. kobject_put(fuse_kobj);
  1798. out_err:
  1799. return err;
  1800. }
  1801. static void fuse_sysfs_cleanup(void)
  1802. {
  1803. sysfs_remove_mount_point(fuse_kobj, "connections");
  1804. kobject_put(fuse_kobj);
  1805. }
  1806. static int __init fuse_init(void)
  1807. {
  1808. int res;
  1809. pr_info("init (API version %i.%i)\n",
  1810. FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
  1811. INIT_LIST_HEAD(&fuse_conn_list);
  1812. res = fuse_fs_init();
  1813. if (res)
  1814. goto err;
  1815. res = fuse_dev_init();
  1816. if (res)
  1817. goto err_fs_cleanup;
  1818. res = fuse_sysfs_init();
  1819. if (res)
  1820. goto err_dev_cleanup;
  1821. res = fuse_ctl_init();
  1822. if (res)
  1823. goto err_sysfs_cleanup;
  1824. sanitize_global_limit(&max_user_bgreq);
  1825. sanitize_global_limit(&max_user_congthresh);
  1826. return 0;
  1827. err_sysfs_cleanup:
  1828. fuse_sysfs_cleanup();
  1829. err_dev_cleanup:
  1830. fuse_dev_cleanup();
  1831. err_fs_cleanup:
  1832. fuse_fs_cleanup();
  1833. err:
  1834. return res;
  1835. }
  1836. static void __exit fuse_exit(void)
  1837. {
  1838. pr_debug("exit\n");
  1839. fuse_ctl_cleanup();
  1840. fuse_sysfs_cleanup();
  1841. fuse_fs_cleanup();
  1842. fuse_dev_cleanup();
  1843. }
  1844. module_init(fuse_init);
  1845. module_exit(fuse_exit);