namespace.c 85 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478
  1. /*
  2. * linux/fs/namespace.c
  3. *
  4. * (C) Copyright Al Viro 2000, 2001
  5. * Released under GPL v2.
  6. *
  7. * Based on code from fs/super.c, copyright Linus Torvalds and others.
  8. * Heavily rewritten.
  9. */
  10. #include <linux/syscalls.h>
  11. #include <linux/export.h>
  12. #include <linux/capability.h>
  13. #include <linux/mnt_namespace.h>
  14. #include <linux/user_namespace.h>
  15. #include <linux/namei.h>
  16. #include <linux/security.h>
  17. #include <linux/cred.h>
  18. #include <linux/idr.h>
  19. #include <linux/init.h> /* init_rootfs */
  20. #include <linux/fs_struct.h> /* get_fs_root et.al. */
  21. #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
  22. #include <linux/uaccess.h>
  23. #include <linux/proc_ns.h>
  24. #include <linux/magic.h>
  25. #include <linux/bootmem.h>
  26. #include <linux/task_work.h>
  27. #include <linux/sched/task.h>
  28. #include "pnode.h"
  29. #include "internal.h"
  30. /* Maximum number of mounts in a mount namespace */
  31. unsigned int sysctl_mount_max __read_mostly = 100000;
  32. static unsigned int m_hash_mask __read_mostly;
  33. static unsigned int m_hash_shift __read_mostly;
  34. static unsigned int mp_hash_mask __read_mostly;
  35. static unsigned int mp_hash_shift __read_mostly;
  36. static __initdata unsigned long mhash_entries;
  37. static int __init set_mhash_entries(char *str)
  38. {
  39. if (!str)
  40. return 0;
  41. mhash_entries = simple_strtoul(str, &str, 0);
  42. return 1;
  43. }
  44. __setup("mhash_entries=", set_mhash_entries);
  45. static __initdata unsigned long mphash_entries;
  46. static int __init set_mphash_entries(char *str)
  47. {
  48. if (!str)
  49. return 0;
  50. mphash_entries = simple_strtoul(str, &str, 0);
  51. return 1;
  52. }
  53. __setup("mphash_entries=", set_mphash_entries);
  54. static u64 event;
  55. static DEFINE_IDA(mnt_id_ida);
  56. static DEFINE_IDA(mnt_group_ida);
  57. static struct hlist_head *mount_hashtable __read_mostly;
  58. static struct hlist_head *mountpoint_hashtable __read_mostly;
  59. static struct kmem_cache *mnt_cache __read_mostly;
  60. static DECLARE_RWSEM(namespace_sem);
  61. /* /sys/fs */
  62. struct kobject *fs_kobj;
  63. EXPORT_SYMBOL_GPL(fs_kobj);
  64. /*
  65. * vfsmount lock may be taken for read to prevent changes to the
  66. * vfsmount hash, ie. during mountpoint lookups or walking back
  67. * up the tree.
  68. *
  69. * It should be taken for write in all cases where the vfsmount
  70. * tree or hash is modified or when a vfsmount structure is modified.
  71. */
  72. __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
  73. static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
  74. {
  75. unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
  76. tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
  77. tmp = tmp + (tmp >> m_hash_shift);
  78. return &mount_hashtable[tmp & m_hash_mask];
  79. }
  80. static inline struct hlist_head *mp_hash(struct dentry *dentry)
  81. {
  82. unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
  83. tmp = tmp + (tmp >> mp_hash_shift);
  84. return &mountpoint_hashtable[tmp & mp_hash_mask];
  85. }
  86. static int mnt_alloc_id(struct mount *mnt)
  87. {
  88. int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
  89. if (res < 0)
  90. return res;
  91. mnt->mnt_id = res;
  92. return 0;
  93. }
  94. static void mnt_free_id(struct mount *mnt)
  95. {
  96. ida_free(&mnt_id_ida, mnt->mnt_id);
  97. }
  98. /*
  99. * Allocate a new peer group ID
  100. */
  101. static int mnt_alloc_group_id(struct mount *mnt)
  102. {
  103. int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
  104. if (res < 0)
  105. return res;
  106. mnt->mnt_group_id = res;
  107. return 0;
  108. }
  109. /*
  110. * Release a peer group ID
  111. */
  112. void mnt_release_group_id(struct mount *mnt)
  113. {
  114. ida_free(&mnt_group_ida, mnt->mnt_group_id);
  115. mnt->mnt_group_id = 0;
  116. }
  117. /*
  118. * vfsmount lock must be held for read
  119. */
  120. static inline void mnt_add_count(struct mount *mnt, int n)
  121. {
  122. #ifdef CONFIG_SMP
  123. this_cpu_add(mnt->mnt_pcp->mnt_count, n);
  124. #else
  125. preempt_disable();
  126. mnt->mnt_count += n;
  127. preempt_enable();
  128. #endif
  129. }
  130. /*
  131. * vfsmount lock must be held for write
  132. */
  133. unsigned int mnt_get_count(struct mount *mnt)
  134. {
  135. #ifdef CONFIG_SMP
  136. unsigned int count = 0;
  137. int cpu;
  138. for_each_possible_cpu(cpu) {
  139. count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
  140. }
  141. return count;
  142. #else
  143. return mnt->mnt_count;
  144. #endif
  145. }
  146. static void drop_mountpoint(struct fs_pin *p)
  147. {
  148. struct mount *m = container_of(p, struct mount, mnt_umount);
  149. dput(m->mnt_ex_mountpoint);
  150. pin_remove(p);
  151. mntput(&m->mnt);
  152. }
  153. static struct mount *alloc_vfsmnt(const char *name)
  154. {
  155. struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
  156. if (mnt) {
  157. int err;
  158. err = mnt_alloc_id(mnt);
  159. if (err)
  160. goto out_free_cache;
  161. if (name) {
  162. mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL);
  163. if (!mnt->mnt_devname)
  164. goto out_free_id;
  165. }
  166. #ifdef CONFIG_SMP
  167. mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
  168. if (!mnt->mnt_pcp)
  169. goto out_free_devname;
  170. this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
  171. #else
  172. mnt->mnt_count = 1;
  173. mnt->mnt_writers = 0;
  174. #endif
  175. INIT_HLIST_NODE(&mnt->mnt_hash);
  176. INIT_LIST_HEAD(&mnt->mnt_child);
  177. INIT_LIST_HEAD(&mnt->mnt_mounts);
  178. INIT_LIST_HEAD(&mnt->mnt_list);
  179. INIT_LIST_HEAD(&mnt->mnt_expire);
  180. INIT_LIST_HEAD(&mnt->mnt_share);
  181. INIT_LIST_HEAD(&mnt->mnt_slave_list);
  182. INIT_LIST_HEAD(&mnt->mnt_slave);
  183. INIT_HLIST_NODE(&mnt->mnt_mp_list);
  184. INIT_LIST_HEAD(&mnt->mnt_umounting);
  185. init_fs_pin(&mnt->mnt_umount, drop_mountpoint);
  186. }
  187. return mnt;
  188. #ifdef CONFIG_SMP
  189. out_free_devname:
  190. kfree_const(mnt->mnt_devname);
  191. #endif
  192. out_free_id:
  193. mnt_free_id(mnt);
  194. out_free_cache:
  195. kmem_cache_free(mnt_cache, mnt);
  196. return NULL;
  197. }
  198. /*
  199. * Most r/o checks on a fs are for operations that take
  200. * discrete amounts of time, like a write() or unlink().
  201. * We must keep track of when those operations start
  202. * (for permission checks) and when they end, so that
  203. * we can determine when writes are able to occur to
  204. * a filesystem.
  205. */
  206. /*
  207. * __mnt_is_readonly: check whether a mount is read-only
  208. * @mnt: the mount to check for its write status
  209. *
  210. * This shouldn't be used directly ouside of the VFS.
  211. * It does not guarantee that the filesystem will stay
  212. * r/w, just that it is right *now*. This can not and
  213. * should not be used in place of IS_RDONLY(inode).
  214. * mnt_want/drop_write() will _keep_ the filesystem
  215. * r/w.
  216. */
  217. int __mnt_is_readonly(struct vfsmount *mnt)
  218. {
  219. if (mnt->mnt_flags & MNT_READONLY)
  220. return 1;
  221. if (sb_rdonly(mnt->mnt_sb))
  222. return 1;
  223. return 0;
  224. }
  225. EXPORT_SYMBOL_GPL(__mnt_is_readonly);
  226. static inline void mnt_inc_writers(struct mount *mnt)
  227. {
  228. #ifdef CONFIG_SMP
  229. this_cpu_inc(mnt->mnt_pcp->mnt_writers);
  230. #else
  231. mnt->mnt_writers++;
  232. #endif
  233. }
  234. static inline void mnt_dec_writers(struct mount *mnt)
  235. {
  236. #ifdef CONFIG_SMP
  237. this_cpu_dec(mnt->mnt_pcp->mnt_writers);
  238. #else
  239. mnt->mnt_writers--;
  240. #endif
  241. }
  242. static unsigned int mnt_get_writers(struct mount *mnt)
  243. {
  244. #ifdef CONFIG_SMP
  245. unsigned int count = 0;
  246. int cpu;
  247. for_each_possible_cpu(cpu) {
  248. count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
  249. }
  250. return count;
  251. #else
  252. return mnt->mnt_writers;
  253. #endif
  254. }
  255. static int mnt_is_readonly(struct vfsmount *mnt)
  256. {
  257. if (mnt->mnt_sb->s_readonly_remount)
  258. return 1;
  259. /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
  260. smp_rmb();
  261. return __mnt_is_readonly(mnt);
  262. }
  263. /*
  264. * Most r/o & frozen checks on a fs are for operations that take discrete
  265. * amounts of time, like a write() or unlink(). We must keep track of when
  266. * those operations start (for permission checks) and when they end, so that we
  267. * can determine when writes are able to occur to a filesystem.
  268. */
  269. /**
  270. * __mnt_want_write - get write access to a mount without freeze protection
  271. * @m: the mount on which to take a write
  272. *
  273. * This tells the low-level filesystem that a write is about to be performed to
  274. * it, and makes sure that writes are allowed (mnt it read-write) before
  275. * returning success. This operation does not protect against filesystem being
  276. * frozen. When the write operation is finished, __mnt_drop_write() must be
  277. * called. This is effectively a refcount.
  278. */
  279. int __mnt_want_write(struct vfsmount *m)
  280. {
  281. struct mount *mnt = real_mount(m);
  282. int ret = 0;
  283. preempt_disable();
  284. mnt_inc_writers(mnt);
  285. /*
  286. * The store to mnt_inc_writers must be visible before we pass
  287. * MNT_WRITE_HOLD loop below, so that the slowpath can see our
  288. * incremented count after it has set MNT_WRITE_HOLD.
  289. */
  290. smp_mb();
  291. while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
  292. cpu_relax();
  293. /*
  294. * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
  295. * be set to match its requirements. So we must not load that until
  296. * MNT_WRITE_HOLD is cleared.
  297. */
  298. smp_rmb();
  299. if (mnt_is_readonly(m)) {
  300. mnt_dec_writers(mnt);
  301. ret = -EROFS;
  302. }
  303. preempt_enable();
  304. return ret;
  305. }
  306. /**
  307. * mnt_want_write - get write access to a mount
  308. * @m: the mount on which to take a write
  309. *
  310. * This tells the low-level filesystem that a write is about to be performed to
  311. * it, and makes sure that writes are allowed (mount is read-write, filesystem
  312. * is not frozen) before returning success. When the write operation is
  313. * finished, mnt_drop_write() must be called. This is effectively a refcount.
  314. */
  315. int mnt_want_write(struct vfsmount *m)
  316. {
  317. int ret;
  318. sb_start_write(m->mnt_sb);
  319. ret = __mnt_want_write(m);
  320. if (ret)
  321. sb_end_write(m->mnt_sb);
  322. return ret;
  323. }
  324. EXPORT_SYMBOL_GPL(mnt_want_write);
  325. /**
  326. * mnt_clone_write - get write access to a mount
  327. * @mnt: the mount on which to take a write
  328. *
  329. * This is effectively like mnt_want_write, except
  330. * it must only be used to take an extra write reference
  331. * on a mountpoint that we already know has a write reference
  332. * on it. This allows some optimisation.
  333. *
  334. * After finished, mnt_drop_write must be called as usual to
  335. * drop the reference.
  336. */
  337. int mnt_clone_write(struct vfsmount *mnt)
  338. {
  339. /* superblock may be r/o */
  340. if (__mnt_is_readonly(mnt))
  341. return -EROFS;
  342. preempt_disable();
  343. mnt_inc_writers(real_mount(mnt));
  344. preempt_enable();
  345. return 0;
  346. }
  347. EXPORT_SYMBOL_GPL(mnt_clone_write);
  348. /**
  349. * __mnt_want_write_file - get write access to a file's mount
  350. * @file: the file who's mount on which to take a write
  351. *
  352. * This is like __mnt_want_write, but it takes a file and can
  353. * do some optimisations if the file is open for write already
  354. */
  355. int __mnt_want_write_file(struct file *file)
  356. {
  357. if (!(file->f_mode & FMODE_WRITER))
  358. return __mnt_want_write(file->f_path.mnt);
  359. else
  360. return mnt_clone_write(file->f_path.mnt);
  361. }
  362. /**
  363. * mnt_want_write_file - get write access to a file's mount
  364. * @file: the file who's mount on which to take a write
  365. *
  366. * This is like mnt_want_write, but it takes a file and can
  367. * do some optimisations if the file is open for write already
  368. */
  369. int mnt_want_write_file(struct file *file)
  370. {
  371. int ret;
  372. sb_start_write(file_inode(file)->i_sb);
  373. ret = __mnt_want_write_file(file);
  374. if (ret)
  375. sb_end_write(file_inode(file)->i_sb);
  376. return ret;
  377. }
  378. EXPORT_SYMBOL_GPL(mnt_want_write_file);
  379. /**
  380. * __mnt_drop_write - give up write access to a mount
  381. * @mnt: the mount on which to give up write access
  382. *
  383. * Tells the low-level filesystem that we are done
  384. * performing writes to it. Must be matched with
  385. * __mnt_want_write() call above.
  386. */
  387. void __mnt_drop_write(struct vfsmount *mnt)
  388. {
  389. preempt_disable();
  390. mnt_dec_writers(real_mount(mnt));
  391. preempt_enable();
  392. }
  393. /**
  394. * mnt_drop_write - give up write access to a mount
  395. * @mnt: the mount on which to give up write access
  396. *
  397. * Tells the low-level filesystem that we are done performing writes to it and
  398. * also allows filesystem to be frozen again. Must be matched with
  399. * mnt_want_write() call above.
  400. */
  401. void mnt_drop_write(struct vfsmount *mnt)
  402. {
  403. __mnt_drop_write(mnt);
  404. sb_end_write(mnt->mnt_sb);
  405. }
  406. EXPORT_SYMBOL_GPL(mnt_drop_write);
  407. void __mnt_drop_write_file(struct file *file)
  408. {
  409. __mnt_drop_write(file->f_path.mnt);
  410. }
  411. void mnt_drop_write_file(struct file *file)
  412. {
  413. __mnt_drop_write_file(file);
  414. sb_end_write(file_inode(file)->i_sb);
  415. }
  416. EXPORT_SYMBOL(mnt_drop_write_file);
  417. static int mnt_make_readonly(struct mount *mnt)
  418. {
  419. int ret = 0;
  420. lock_mount_hash();
  421. mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
  422. /*
  423. * After storing MNT_WRITE_HOLD, we'll read the counters. This store
  424. * should be visible before we do.
  425. */
  426. smp_mb();
  427. /*
  428. * With writers on hold, if this value is zero, then there are
  429. * definitely no active writers (although held writers may subsequently
  430. * increment the count, they'll have to wait, and decrement it after
  431. * seeing MNT_READONLY).
  432. *
  433. * It is OK to have counter incremented on one CPU and decremented on
  434. * another: the sum will add up correctly. The danger would be when we
  435. * sum up each counter, if we read a counter before it is incremented,
  436. * but then read another CPU's count which it has been subsequently
  437. * decremented from -- we would see more decrements than we should.
  438. * MNT_WRITE_HOLD protects against this scenario, because
  439. * mnt_want_write first increments count, then smp_mb, then spins on
  440. * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
  441. * we're counting up here.
  442. */
  443. if (mnt_get_writers(mnt) > 0)
  444. ret = -EBUSY;
  445. else
  446. mnt->mnt.mnt_flags |= MNT_READONLY;
  447. /*
  448. * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
  449. * that become unheld will see MNT_READONLY.
  450. */
  451. smp_wmb();
  452. mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
  453. unlock_mount_hash();
  454. return ret;
  455. }
  456. static void __mnt_unmake_readonly(struct mount *mnt)
  457. {
  458. lock_mount_hash();
  459. mnt->mnt.mnt_flags &= ~MNT_READONLY;
  460. unlock_mount_hash();
  461. }
  462. int sb_prepare_remount_readonly(struct super_block *sb)
  463. {
  464. struct mount *mnt;
  465. int err = 0;
  466. /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
  467. if (atomic_long_read(&sb->s_remove_count))
  468. return -EBUSY;
  469. lock_mount_hash();
  470. list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
  471. if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
  472. mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
  473. smp_mb();
  474. if (mnt_get_writers(mnt) > 0) {
  475. err = -EBUSY;
  476. break;
  477. }
  478. }
  479. }
  480. if (!err && atomic_long_read(&sb->s_remove_count))
  481. err = -EBUSY;
  482. if (!err) {
  483. sb->s_readonly_remount = 1;
  484. smp_wmb();
  485. }
  486. list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
  487. if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
  488. mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
  489. }
  490. unlock_mount_hash();
  491. return err;
  492. }
  493. static void free_vfsmnt(struct mount *mnt)
  494. {
  495. kfree_const(mnt->mnt_devname);
  496. #ifdef CONFIG_SMP
  497. free_percpu(mnt->mnt_pcp);
  498. #endif
  499. kmem_cache_free(mnt_cache, mnt);
  500. }
  501. static void delayed_free_vfsmnt(struct rcu_head *head)
  502. {
  503. free_vfsmnt(container_of(head, struct mount, mnt_rcu));
  504. }
  505. /* call under rcu_read_lock */
  506. int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
  507. {
  508. struct mount *mnt;
  509. if (read_seqretry(&mount_lock, seq))
  510. return 1;
  511. if (bastard == NULL)
  512. return 0;
  513. mnt = real_mount(bastard);
  514. mnt_add_count(mnt, 1);
  515. smp_mb(); // see mntput_no_expire()
  516. if (likely(!read_seqretry(&mount_lock, seq)))
  517. return 0;
  518. if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
  519. mnt_add_count(mnt, -1);
  520. return 1;
  521. }
  522. lock_mount_hash();
  523. if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
  524. mnt_add_count(mnt, -1);
  525. unlock_mount_hash();
  526. return 1;
  527. }
  528. unlock_mount_hash();
  529. /* caller will mntput() */
  530. return -1;
  531. }
  532. /* call under rcu_read_lock */
  533. bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
  534. {
  535. int res = __legitimize_mnt(bastard, seq);
  536. if (likely(!res))
  537. return true;
  538. if (unlikely(res < 0)) {
  539. rcu_read_unlock();
  540. mntput(bastard);
  541. rcu_read_lock();
  542. }
  543. return false;
  544. }
  545. /*
  546. * find the first mount at @dentry on vfsmount @mnt.
  547. * call under rcu_read_lock()
  548. */
  549. struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
  550. {
  551. struct hlist_head *head = m_hash(mnt, dentry);
  552. struct mount *p;
  553. hlist_for_each_entry_rcu(p, head, mnt_hash)
  554. if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
  555. return p;
  556. return NULL;
  557. }
  558. /*
  559. * lookup_mnt - Return the first child mount mounted at path
  560. *
  561. * "First" means first mounted chronologically. If you create the
  562. * following mounts:
  563. *
  564. * mount /dev/sda1 /mnt
  565. * mount /dev/sda2 /mnt
  566. * mount /dev/sda3 /mnt
  567. *
  568. * Then lookup_mnt() on the base /mnt dentry in the root mount will
  569. * return successively the root dentry and vfsmount of /dev/sda1, then
  570. * /dev/sda2, then /dev/sda3, then NULL.
  571. *
  572. * lookup_mnt takes a reference to the found vfsmount.
  573. */
  574. struct vfsmount *lookup_mnt(const struct path *path)
  575. {
  576. struct mount *child_mnt;
  577. struct vfsmount *m;
  578. unsigned seq;
  579. rcu_read_lock();
  580. do {
  581. seq = read_seqbegin(&mount_lock);
  582. child_mnt = __lookup_mnt(path->mnt, path->dentry);
  583. m = child_mnt ? &child_mnt->mnt : NULL;
  584. } while (!legitimize_mnt(m, seq));
  585. rcu_read_unlock();
  586. return m;
  587. }
  588. /*
  589. * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
  590. * current mount namespace.
  591. *
  592. * The common case is dentries are not mountpoints at all and that
  593. * test is handled inline. For the slow case when we are actually
  594. * dealing with a mountpoint of some kind, walk through all of the
  595. * mounts in the current mount namespace and test to see if the dentry
  596. * is a mountpoint.
  597. *
  598. * The mount_hashtable is not usable in the context because we
  599. * need to identify all mounts that may be in the current mount
  600. * namespace not just a mount that happens to have some specified
  601. * parent mount.
  602. */
  603. bool __is_local_mountpoint(struct dentry *dentry)
  604. {
  605. struct mnt_namespace *ns = current->nsproxy->mnt_ns;
  606. struct mount *mnt;
  607. bool is_covered = false;
  608. if (!d_mountpoint(dentry))
  609. goto out;
  610. down_read(&namespace_sem);
  611. list_for_each_entry(mnt, &ns->list, mnt_list) {
  612. is_covered = (mnt->mnt_mountpoint == dentry);
  613. if (is_covered)
  614. break;
  615. }
  616. up_read(&namespace_sem);
  617. out:
  618. return is_covered;
  619. }
  620. static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
  621. {
  622. struct hlist_head *chain = mp_hash(dentry);
  623. struct mountpoint *mp;
  624. hlist_for_each_entry(mp, chain, m_hash) {
  625. if (mp->m_dentry == dentry) {
  626. mp->m_count++;
  627. return mp;
  628. }
  629. }
  630. return NULL;
  631. }
  632. static struct mountpoint *get_mountpoint(struct dentry *dentry)
  633. {
  634. struct mountpoint *mp, *new = NULL;
  635. int ret;
  636. if (d_mountpoint(dentry)) {
  637. /* might be worth a WARN_ON() */
  638. if (d_unlinked(dentry))
  639. return ERR_PTR(-ENOENT);
  640. mountpoint:
  641. read_seqlock_excl(&mount_lock);
  642. mp = lookup_mountpoint(dentry);
  643. read_sequnlock_excl(&mount_lock);
  644. if (mp)
  645. goto done;
  646. }
  647. if (!new)
  648. new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
  649. if (!new)
  650. return ERR_PTR(-ENOMEM);
  651. /* Exactly one processes may set d_mounted */
  652. ret = d_set_mounted(dentry);
  653. /* Someone else set d_mounted? */
  654. if (ret == -EBUSY)
  655. goto mountpoint;
  656. /* The dentry is not available as a mountpoint? */
  657. mp = ERR_PTR(ret);
  658. if (ret)
  659. goto done;
  660. /* Add the new mountpoint to the hash table */
  661. read_seqlock_excl(&mount_lock);
  662. new->m_dentry = dentry;
  663. new->m_count = 1;
  664. hlist_add_head(&new->m_hash, mp_hash(dentry));
  665. INIT_HLIST_HEAD(&new->m_list);
  666. read_sequnlock_excl(&mount_lock);
  667. mp = new;
  668. new = NULL;
  669. done:
  670. kfree(new);
  671. return mp;
  672. }
  673. static void put_mountpoint(struct mountpoint *mp)
  674. {
  675. if (!--mp->m_count) {
  676. struct dentry *dentry = mp->m_dentry;
  677. BUG_ON(!hlist_empty(&mp->m_list));
  678. spin_lock(&dentry->d_lock);
  679. dentry->d_flags &= ~DCACHE_MOUNTED;
  680. spin_unlock(&dentry->d_lock);
  681. hlist_del(&mp->m_hash);
  682. kfree(mp);
  683. }
  684. }
  685. static inline int check_mnt(struct mount *mnt)
  686. {
  687. return mnt->mnt_ns == current->nsproxy->mnt_ns;
  688. }
  689. /*
  690. * vfsmount lock must be held for write
  691. */
  692. static void touch_mnt_namespace(struct mnt_namespace *ns)
  693. {
  694. if (ns) {
  695. ns->event = ++event;
  696. wake_up_interruptible(&ns->poll);
  697. }
  698. }
  699. /*
  700. * vfsmount lock must be held for write
  701. */
  702. static void __touch_mnt_namespace(struct mnt_namespace *ns)
  703. {
  704. if (ns && ns->event != event) {
  705. ns->event = event;
  706. wake_up_interruptible(&ns->poll);
  707. }
  708. }
  709. /*
  710. * vfsmount lock must be held for write
  711. */
  712. static void unhash_mnt(struct mount *mnt)
  713. {
  714. mnt->mnt_parent = mnt;
  715. mnt->mnt_mountpoint = mnt->mnt.mnt_root;
  716. list_del_init(&mnt->mnt_child);
  717. hlist_del_init_rcu(&mnt->mnt_hash);
  718. hlist_del_init(&mnt->mnt_mp_list);
  719. put_mountpoint(mnt->mnt_mp);
  720. mnt->mnt_mp = NULL;
  721. }
  722. /*
  723. * vfsmount lock must be held for write
  724. */
  725. static void detach_mnt(struct mount *mnt, struct path *old_path)
  726. {
  727. old_path->dentry = mnt->mnt_mountpoint;
  728. old_path->mnt = &mnt->mnt_parent->mnt;
  729. unhash_mnt(mnt);
  730. }
  731. /*
  732. * vfsmount lock must be held for write
  733. */
  734. static void umount_mnt(struct mount *mnt)
  735. {
  736. /* old mountpoint will be dropped when we can do that */
  737. mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint;
  738. unhash_mnt(mnt);
  739. }
  740. /*
  741. * vfsmount lock must be held for write
  742. */
  743. void mnt_set_mountpoint(struct mount *mnt,
  744. struct mountpoint *mp,
  745. struct mount *child_mnt)
  746. {
  747. mp->m_count++;
  748. mnt_add_count(mnt, 1); /* essentially, that's mntget */
  749. child_mnt->mnt_mountpoint = dget(mp->m_dentry);
  750. child_mnt->mnt_parent = mnt;
  751. child_mnt->mnt_mp = mp;
  752. hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
  753. }
  754. static void __attach_mnt(struct mount *mnt, struct mount *parent)
  755. {
  756. hlist_add_head_rcu(&mnt->mnt_hash,
  757. m_hash(&parent->mnt, mnt->mnt_mountpoint));
  758. list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
  759. }
  760. /*
  761. * vfsmount lock must be held for write
  762. */
  763. static void attach_mnt(struct mount *mnt,
  764. struct mount *parent,
  765. struct mountpoint *mp)
  766. {
  767. mnt_set_mountpoint(parent, mp, mnt);
  768. __attach_mnt(mnt, parent);
  769. }
  770. void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
  771. {
  772. struct mountpoint *old_mp = mnt->mnt_mp;
  773. struct dentry *old_mountpoint = mnt->mnt_mountpoint;
  774. struct mount *old_parent = mnt->mnt_parent;
  775. list_del_init(&mnt->mnt_child);
  776. hlist_del_init(&mnt->mnt_mp_list);
  777. hlist_del_init_rcu(&mnt->mnt_hash);
  778. attach_mnt(mnt, parent, mp);
  779. put_mountpoint(old_mp);
  780. /*
  781. * Safely avoid even the suggestion this code might sleep or
  782. * lock the mount hash by taking advantage of the knowledge that
  783. * mnt_change_mountpoint will not release the final reference
  784. * to a mountpoint.
  785. *
  786. * During mounting, the mount passed in as the parent mount will
  787. * continue to use the old mountpoint and during unmounting, the
  788. * old mountpoint will continue to exist until namespace_unlock,
  789. * which happens well after mnt_change_mountpoint.
  790. */
  791. spin_lock(&old_mountpoint->d_lock);
  792. old_mountpoint->d_lockref.count--;
  793. spin_unlock(&old_mountpoint->d_lock);
  794. mnt_add_count(old_parent, -1);
  795. }
  796. /*
  797. * vfsmount lock must be held for write
  798. */
  799. static void commit_tree(struct mount *mnt)
  800. {
  801. struct mount *parent = mnt->mnt_parent;
  802. struct mount *m;
  803. LIST_HEAD(head);
  804. struct mnt_namespace *n = parent->mnt_ns;
  805. BUG_ON(parent == mnt);
  806. list_add_tail(&head, &mnt->mnt_list);
  807. list_for_each_entry(m, &head, mnt_list)
  808. m->mnt_ns = n;
  809. list_splice(&head, n->list.prev);
  810. n->mounts += n->pending_mounts;
  811. n->pending_mounts = 0;
  812. __attach_mnt(mnt, parent);
  813. touch_mnt_namespace(n);
  814. }
  815. static struct mount *next_mnt(struct mount *p, struct mount *root)
  816. {
  817. struct list_head *next = p->mnt_mounts.next;
  818. if (next == &p->mnt_mounts) {
  819. while (1) {
  820. if (p == root)
  821. return NULL;
  822. next = p->mnt_child.next;
  823. if (next != &p->mnt_parent->mnt_mounts)
  824. break;
  825. p = p->mnt_parent;
  826. }
  827. }
  828. return list_entry(next, struct mount, mnt_child);
  829. }
  830. static struct mount *skip_mnt_tree(struct mount *p)
  831. {
  832. struct list_head *prev = p->mnt_mounts.prev;
  833. while (prev != &p->mnt_mounts) {
  834. p = list_entry(prev, struct mount, mnt_child);
  835. prev = p->mnt_mounts.prev;
  836. }
  837. return p;
  838. }
  839. struct vfsmount *
  840. vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
  841. {
  842. struct mount *mnt;
  843. struct dentry *root;
  844. if (!type)
  845. return ERR_PTR(-ENODEV);
  846. mnt = alloc_vfsmnt(name);
  847. if (!mnt)
  848. return ERR_PTR(-ENOMEM);
  849. if (flags & SB_KERNMOUNT)
  850. mnt->mnt.mnt_flags = MNT_INTERNAL;
  851. root = mount_fs(type, flags, name, data);
  852. if (IS_ERR(root)) {
  853. mnt_free_id(mnt);
  854. free_vfsmnt(mnt);
  855. return ERR_CAST(root);
  856. }
  857. mnt->mnt.mnt_root = root;
  858. mnt->mnt.mnt_sb = root->d_sb;
  859. mnt->mnt_mountpoint = mnt->mnt.mnt_root;
  860. mnt->mnt_parent = mnt;
  861. lock_mount_hash();
  862. list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
  863. unlock_mount_hash();
  864. return &mnt->mnt;
  865. }
  866. EXPORT_SYMBOL_GPL(vfs_kern_mount);
  867. struct vfsmount *
  868. vfs_submount(const struct dentry *mountpoint, struct file_system_type *type,
  869. const char *name, void *data)
  870. {
  871. /* Until it is worked out how to pass the user namespace
  872. * through from the parent mount to the submount don't support
  873. * unprivileged mounts with submounts.
  874. */
  875. if (mountpoint->d_sb->s_user_ns != &init_user_ns)
  876. return ERR_PTR(-EPERM);
  877. return vfs_kern_mount(type, SB_SUBMOUNT, name, data);
  878. }
  879. EXPORT_SYMBOL_GPL(vfs_submount);
  880. static struct mount *clone_mnt(struct mount *old, struct dentry *root,
  881. int flag)
  882. {
  883. struct super_block *sb = old->mnt.mnt_sb;
  884. struct mount *mnt;
  885. int err;
  886. mnt = alloc_vfsmnt(old->mnt_devname);
  887. if (!mnt)
  888. return ERR_PTR(-ENOMEM);
  889. if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
  890. mnt->mnt_group_id = 0; /* not a peer of original */
  891. else
  892. mnt->mnt_group_id = old->mnt_group_id;
  893. if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
  894. err = mnt_alloc_group_id(mnt);
  895. if (err)
  896. goto out_free;
  897. }
  898. mnt->mnt.mnt_flags = old->mnt.mnt_flags;
  899. mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
  900. /* Don't allow unprivileged users to change mount flags */
  901. if (flag & CL_UNPRIVILEGED) {
  902. mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
  903. if (mnt->mnt.mnt_flags & MNT_READONLY)
  904. mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
  905. if (mnt->mnt.mnt_flags & MNT_NODEV)
  906. mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
  907. if (mnt->mnt.mnt_flags & MNT_NOSUID)
  908. mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
  909. if (mnt->mnt.mnt_flags & MNT_NOEXEC)
  910. mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
  911. }
  912. /* Don't allow unprivileged users to reveal what is under a mount */
  913. if ((flag & CL_UNPRIVILEGED) &&
  914. (!(flag & CL_EXPIRE) || list_empty(&old->mnt_expire)))
  915. mnt->mnt.mnt_flags |= MNT_LOCKED;
  916. atomic_inc(&sb->s_active);
  917. mnt->mnt.mnt_sb = sb;
  918. mnt->mnt.mnt_root = dget(root);
  919. mnt->mnt_mountpoint = mnt->mnt.mnt_root;
  920. mnt->mnt_parent = mnt;
  921. lock_mount_hash();
  922. list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
  923. unlock_mount_hash();
  924. if ((flag & CL_SLAVE) ||
  925. ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
  926. list_add(&mnt->mnt_slave, &old->mnt_slave_list);
  927. mnt->mnt_master = old;
  928. CLEAR_MNT_SHARED(mnt);
  929. } else if (!(flag & CL_PRIVATE)) {
  930. if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
  931. list_add(&mnt->mnt_share, &old->mnt_share);
  932. if (IS_MNT_SLAVE(old))
  933. list_add(&mnt->mnt_slave, &old->mnt_slave);
  934. mnt->mnt_master = old->mnt_master;
  935. } else {
  936. CLEAR_MNT_SHARED(mnt);
  937. }
  938. if (flag & CL_MAKE_SHARED)
  939. set_mnt_shared(mnt);
  940. /* stick the duplicate mount on the same expiry list
  941. * as the original if that was on one */
  942. if (flag & CL_EXPIRE) {
  943. if (!list_empty(&old->mnt_expire))
  944. list_add(&mnt->mnt_expire, &old->mnt_expire);
  945. }
  946. return mnt;
  947. out_free:
  948. mnt_free_id(mnt);
  949. free_vfsmnt(mnt);
  950. return ERR_PTR(err);
  951. }
  952. static void cleanup_mnt(struct mount *mnt)
  953. {
  954. /*
  955. * This probably indicates that somebody messed
  956. * up a mnt_want/drop_write() pair. If this
  957. * happens, the filesystem was probably unable
  958. * to make r/w->r/o transitions.
  959. */
  960. /*
  961. * The locking used to deal with mnt_count decrement provides barriers,
  962. * so mnt_get_writers() below is safe.
  963. */
  964. WARN_ON(mnt_get_writers(mnt));
  965. if (unlikely(mnt->mnt_pins.first))
  966. mnt_pin_kill(mnt);
  967. fsnotify_vfsmount_delete(&mnt->mnt);
  968. dput(mnt->mnt.mnt_root);
  969. deactivate_super(mnt->mnt.mnt_sb);
  970. mnt_free_id(mnt);
  971. call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
  972. }
  973. static void __cleanup_mnt(struct rcu_head *head)
  974. {
  975. cleanup_mnt(container_of(head, struct mount, mnt_rcu));
  976. }
  977. static LLIST_HEAD(delayed_mntput_list);
  978. static void delayed_mntput(struct work_struct *unused)
  979. {
  980. struct llist_node *node = llist_del_all(&delayed_mntput_list);
  981. struct mount *m, *t;
  982. llist_for_each_entry_safe(m, t, node, mnt_llist)
  983. cleanup_mnt(m);
  984. }
  985. static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
  986. static void mntput_no_expire(struct mount *mnt)
  987. {
  988. rcu_read_lock();
  989. if (likely(READ_ONCE(mnt->mnt_ns))) {
  990. /*
  991. * Since we don't do lock_mount_hash() here,
  992. * ->mnt_ns can change under us. However, if it's
  993. * non-NULL, then there's a reference that won't
  994. * be dropped until after an RCU delay done after
  995. * turning ->mnt_ns NULL. So if we observe it
  996. * non-NULL under rcu_read_lock(), the reference
  997. * we are dropping is not the final one.
  998. */
  999. mnt_add_count(mnt, -1);
  1000. rcu_read_unlock();
  1001. return;
  1002. }
  1003. lock_mount_hash();
  1004. /*
  1005. * make sure that if __legitimize_mnt() has not seen us grab
  1006. * mount_lock, we'll see their refcount increment here.
  1007. */
  1008. smp_mb();
  1009. mnt_add_count(mnt, -1);
  1010. if (mnt_get_count(mnt)) {
  1011. rcu_read_unlock();
  1012. unlock_mount_hash();
  1013. return;
  1014. }
  1015. if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
  1016. rcu_read_unlock();
  1017. unlock_mount_hash();
  1018. return;
  1019. }
  1020. mnt->mnt.mnt_flags |= MNT_DOOMED;
  1021. rcu_read_unlock();
  1022. list_del(&mnt->mnt_instance);
  1023. if (unlikely(!list_empty(&mnt->mnt_mounts))) {
  1024. struct mount *p, *tmp;
  1025. list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
  1026. umount_mnt(p);
  1027. }
  1028. }
  1029. unlock_mount_hash();
  1030. if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
  1031. struct task_struct *task = current;
  1032. if (likely(!(task->flags & PF_KTHREAD))) {
  1033. init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
  1034. if (!task_work_add(task, &mnt->mnt_rcu, true))
  1035. return;
  1036. }
  1037. if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
  1038. schedule_delayed_work(&delayed_mntput_work, 1);
  1039. return;
  1040. }
  1041. cleanup_mnt(mnt);
  1042. }
  1043. void mntput(struct vfsmount *mnt)
  1044. {
  1045. if (mnt) {
  1046. struct mount *m = real_mount(mnt);
  1047. /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
  1048. if (unlikely(m->mnt_expiry_mark))
  1049. m->mnt_expiry_mark = 0;
  1050. mntput_no_expire(m);
  1051. }
  1052. }
  1053. EXPORT_SYMBOL(mntput);
  1054. struct vfsmount *mntget(struct vfsmount *mnt)
  1055. {
  1056. if (mnt)
  1057. mnt_add_count(real_mount(mnt), 1);
  1058. return mnt;
  1059. }
  1060. EXPORT_SYMBOL(mntget);
  1061. /* path_is_mountpoint() - Check if path is a mount in the current
  1062. * namespace.
  1063. *
  1064. * d_mountpoint() can only be used reliably to establish if a dentry is
  1065. * not mounted in any namespace and that common case is handled inline.
  1066. * d_mountpoint() isn't aware of the possibility there may be multiple
  1067. * mounts using a given dentry in a different namespace. This function
  1068. * checks if the passed in path is a mountpoint rather than the dentry
  1069. * alone.
  1070. */
  1071. bool path_is_mountpoint(const struct path *path)
  1072. {
  1073. unsigned seq;
  1074. bool res;
  1075. if (!d_mountpoint(path->dentry))
  1076. return false;
  1077. rcu_read_lock();
  1078. do {
  1079. seq = read_seqbegin(&mount_lock);
  1080. res = __path_is_mountpoint(path);
  1081. } while (read_seqretry(&mount_lock, seq));
  1082. rcu_read_unlock();
  1083. return res;
  1084. }
  1085. EXPORT_SYMBOL(path_is_mountpoint);
  1086. struct vfsmount *mnt_clone_internal(const struct path *path)
  1087. {
  1088. struct mount *p;
  1089. p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
  1090. if (IS_ERR(p))
  1091. return ERR_CAST(p);
  1092. p->mnt.mnt_flags |= MNT_INTERNAL;
  1093. return &p->mnt;
  1094. }
  1095. #ifdef CONFIG_PROC_FS
  1096. /* iterator; we want it to have access to namespace_sem, thus here... */
  1097. static void *m_start(struct seq_file *m, loff_t *pos)
  1098. {
  1099. struct proc_mounts *p = m->private;
  1100. down_read(&namespace_sem);
  1101. if (p->cached_event == p->ns->event) {
  1102. void *v = p->cached_mount;
  1103. if (*pos == p->cached_index)
  1104. return v;
  1105. if (*pos == p->cached_index + 1) {
  1106. v = seq_list_next(v, &p->ns->list, &p->cached_index);
  1107. return p->cached_mount = v;
  1108. }
  1109. }
  1110. p->cached_event = p->ns->event;
  1111. p->cached_mount = seq_list_start(&p->ns->list, *pos);
  1112. p->cached_index = *pos;
  1113. return p->cached_mount;
  1114. }
  1115. static void *m_next(struct seq_file *m, void *v, loff_t *pos)
  1116. {
  1117. struct proc_mounts *p = m->private;
  1118. p->cached_mount = seq_list_next(v, &p->ns->list, pos);
  1119. p->cached_index = *pos;
  1120. return p->cached_mount;
  1121. }
  1122. static void m_stop(struct seq_file *m, void *v)
  1123. {
  1124. up_read(&namespace_sem);
  1125. }
  1126. static int m_show(struct seq_file *m, void *v)
  1127. {
  1128. struct proc_mounts *p = m->private;
  1129. struct mount *r = list_entry(v, struct mount, mnt_list);
  1130. return p->show(m, &r->mnt);
  1131. }
  1132. const struct seq_operations mounts_op = {
  1133. .start = m_start,
  1134. .next = m_next,
  1135. .stop = m_stop,
  1136. .show = m_show,
  1137. };
  1138. #endif /* CONFIG_PROC_FS */
  1139. /**
  1140. * may_umount_tree - check if a mount tree is busy
  1141. * @mnt: root of mount tree
  1142. *
  1143. * This is called to check if a tree of mounts has any
  1144. * open files, pwds, chroots or sub mounts that are
  1145. * busy.
  1146. */
  1147. int may_umount_tree(struct vfsmount *m)
  1148. {
  1149. struct mount *mnt = real_mount(m);
  1150. int actual_refs = 0;
  1151. int minimum_refs = 0;
  1152. struct mount *p;
  1153. BUG_ON(!m);
  1154. /* write lock needed for mnt_get_count */
  1155. lock_mount_hash();
  1156. for (p = mnt; p; p = next_mnt(p, mnt)) {
  1157. actual_refs += mnt_get_count(p);
  1158. minimum_refs += 2;
  1159. }
  1160. unlock_mount_hash();
  1161. if (actual_refs > minimum_refs)
  1162. return 0;
  1163. return 1;
  1164. }
  1165. EXPORT_SYMBOL(may_umount_tree);
  1166. /**
  1167. * may_umount - check if a mount point is busy
  1168. * @mnt: root of mount
  1169. *
  1170. * This is called to check if a mount point has any
  1171. * open files, pwds, chroots or sub mounts. If the
  1172. * mount has sub mounts this will return busy
  1173. * regardless of whether the sub mounts are busy.
  1174. *
  1175. * Doesn't take quota and stuff into account. IOW, in some cases it will
  1176. * give false negatives. The main reason why it's here is that we need
  1177. * a non-destructive way to look for easily umountable filesystems.
  1178. */
  1179. int may_umount(struct vfsmount *mnt)
  1180. {
  1181. int ret = 1;
  1182. down_read(&namespace_sem);
  1183. lock_mount_hash();
  1184. if (propagate_mount_busy(real_mount(mnt), 2))
  1185. ret = 0;
  1186. unlock_mount_hash();
  1187. up_read(&namespace_sem);
  1188. return ret;
  1189. }
  1190. EXPORT_SYMBOL(may_umount);
  1191. static HLIST_HEAD(unmounted); /* protected by namespace_sem */
  1192. static void namespace_unlock(void)
  1193. {
  1194. struct hlist_head head;
  1195. hlist_move_list(&unmounted, &head);
  1196. up_write(&namespace_sem);
  1197. if (likely(hlist_empty(&head)))
  1198. return;
  1199. synchronize_rcu();
  1200. group_pin_kill(&head);
  1201. }
  1202. static inline void namespace_lock(void)
  1203. {
  1204. down_write(&namespace_sem);
  1205. }
  1206. enum umount_tree_flags {
  1207. UMOUNT_SYNC = 1,
  1208. UMOUNT_PROPAGATE = 2,
  1209. UMOUNT_CONNECTED = 4,
  1210. };
  1211. static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
  1212. {
  1213. /* Leaving mounts connected is only valid for lazy umounts */
  1214. if (how & UMOUNT_SYNC)
  1215. return true;
  1216. /* A mount without a parent has nothing to be connected to */
  1217. if (!mnt_has_parent(mnt))
  1218. return true;
  1219. /* Because the reference counting rules change when mounts are
  1220. * unmounted and connected, umounted mounts may not be
  1221. * connected to mounted mounts.
  1222. */
  1223. if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
  1224. return true;
  1225. /* Has it been requested that the mount remain connected? */
  1226. if (how & UMOUNT_CONNECTED)
  1227. return false;
  1228. /* Is the mount locked such that it needs to remain connected? */
  1229. if (IS_MNT_LOCKED(mnt))
  1230. return false;
  1231. /* By default disconnect the mount */
  1232. return true;
  1233. }
  1234. /*
  1235. * mount_lock must be held
  1236. * namespace_sem must be held for write
  1237. */
  1238. static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
  1239. {
  1240. LIST_HEAD(tmp_list);
  1241. struct mount *p;
  1242. if (how & UMOUNT_PROPAGATE)
  1243. propagate_mount_unlock(mnt);
  1244. /* Gather the mounts to umount */
  1245. for (p = mnt; p; p = next_mnt(p, mnt)) {
  1246. p->mnt.mnt_flags |= MNT_UMOUNT;
  1247. list_move(&p->mnt_list, &tmp_list);
  1248. }
  1249. /* Hide the mounts from mnt_mounts */
  1250. list_for_each_entry(p, &tmp_list, mnt_list) {
  1251. list_del_init(&p->mnt_child);
  1252. }
  1253. /* Add propogated mounts to the tmp_list */
  1254. if (how & UMOUNT_PROPAGATE)
  1255. propagate_umount(&tmp_list);
  1256. while (!list_empty(&tmp_list)) {
  1257. struct mnt_namespace *ns;
  1258. bool disconnect;
  1259. p = list_first_entry(&tmp_list, struct mount, mnt_list);
  1260. list_del_init(&p->mnt_expire);
  1261. list_del_init(&p->mnt_list);
  1262. ns = p->mnt_ns;
  1263. if (ns) {
  1264. ns->mounts--;
  1265. __touch_mnt_namespace(ns);
  1266. }
  1267. p->mnt_ns = NULL;
  1268. if (how & UMOUNT_SYNC)
  1269. p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
  1270. disconnect = disconnect_mount(p, how);
  1271. pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
  1272. disconnect ? &unmounted : NULL);
  1273. if (mnt_has_parent(p)) {
  1274. mnt_add_count(p->mnt_parent, -1);
  1275. if (!disconnect) {
  1276. /* Don't forget about p */
  1277. list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
  1278. } else {
  1279. umount_mnt(p);
  1280. }
  1281. }
  1282. change_mnt_propagation(p, MS_PRIVATE);
  1283. }
  1284. }
  1285. static void shrink_submounts(struct mount *mnt);
  1286. static int do_umount(struct mount *mnt, int flags)
  1287. {
  1288. struct super_block *sb = mnt->mnt.mnt_sb;
  1289. int retval;
  1290. retval = security_sb_umount(&mnt->mnt, flags);
  1291. if (retval)
  1292. return retval;
  1293. /*
  1294. * Allow userspace to request a mountpoint be expired rather than
  1295. * unmounting unconditionally. Unmount only happens if:
  1296. * (1) the mark is already set (the mark is cleared by mntput())
  1297. * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
  1298. */
  1299. if (flags & MNT_EXPIRE) {
  1300. if (&mnt->mnt == current->fs->root.mnt ||
  1301. flags & (MNT_FORCE | MNT_DETACH))
  1302. return -EINVAL;
  1303. /*
  1304. * probably don't strictly need the lock here if we examined
  1305. * all race cases, but it's a slowpath.
  1306. */
  1307. lock_mount_hash();
  1308. if (mnt_get_count(mnt) != 2) {
  1309. unlock_mount_hash();
  1310. return -EBUSY;
  1311. }
  1312. unlock_mount_hash();
  1313. if (!xchg(&mnt->mnt_expiry_mark, 1))
  1314. return -EAGAIN;
  1315. }
  1316. /*
  1317. * If we may have to abort operations to get out of this
  1318. * mount, and they will themselves hold resources we must
  1319. * allow the fs to do things. In the Unix tradition of
  1320. * 'Gee thats tricky lets do it in userspace' the umount_begin
  1321. * might fail to complete on the first run through as other tasks
  1322. * must return, and the like. Thats for the mount program to worry
  1323. * about for the moment.
  1324. */
  1325. if (flags & MNT_FORCE && sb->s_op->umount_begin) {
  1326. sb->s_op->umount_begin(sb);
  1327. }
  1328. /*
  1329. * No sense to grab the lock for this test, but test itself looks
  1330. * somewhat bogus. Suggestions for better replacement?
  1331. * Ho-hum... In principle, we might treat that as umount + switch
  1332. * to rootfs. GC would eventually take care of the old vfsmount.
  1333. * Actually it makes sense, especially if rootfs would contain a
  1334. * /reboot - static binary that would close all descriptors and
  1335. * call reboot(9). Then init(8) could umount root and exec /reboot.
  1336. */
  1337. if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
  1338. /*
  1339. * Special case for "unmounting" root ...
  1340. * we just try to remount it readonly.
  1341. */
  1342. if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
  1343. return -EPERM;
  1344. down_write(&sb->s_umount);
  1345. if (!sb_rdonly(sb))
  1346. retval = do_remount_sb(sb, SB_RDONLY, NULL, 0);
  1347. up_write(&sb->s_umount);
  1348. return retval;
  1349. }
  1350. namespace_lock();
  1351. lock_mount_hash();
  1352. /* Recheck MNT_LOCKED with the locks held */
  1353. retval = -EINVAL;
  1354. if (mnt->mnt.mnt_flags & MNT_LOCKED)
  1355. goto out;
  1356. event++;
  1357. if (flags & MNT_DETACH) {
  1358. if (!list_empty(&mnt->mnt_list))
  1359. umount_tree(mnt, UMOUNT_PROPAGATE);
  1360. retval = 0;
  1361. } else {
  1362. shrink_submounts(mnt);
  1363. retval = -EBUSY;
  1364. if (!propagate_mount_busy(mnt, 2)) {
  1365. if (!list_empty(&mnt->mnt_list))
  1366. umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
  1367. retval = 0;
  1368. }
  1369. }
  1370. out:
  1371. unlock_mount_hash();
  1372. namespace_unlock();
  1373. return retval;
  1374. }
  1375. /*
  1376. * __detach_mounts - lazily unmount all mounts on the specified dentry
  1377. *
  1378. * During unlink, rmdir, and d_drop it is possible to loose the path
  1379. * to an existing mountpoint, and wind up leaking the mount.
  1380. * detach_mounts allows lazily unmounting those mounts instead of
  1381. * leaking them.
  1382. *
  1383. * The caller may hold dentry->d_inode->i_mutex.
  1384. */
  1385. void __detach_mounts(struct dentry *dentry)
  1386. {
  1387. struct mountpoint *mp;
  1388. struct mount *mnt;
  1389. namespace_lock();
  1390. lock_mount_hash();
  1391. mp = lookup_mountpoint(dentry);
  1392. if (IS_ERR_OR_NULL(mp))
  1393. goto out_unlock;
  1394. event++;
  1395. while (!hlist_empty(&mp->m_list)) {
  1396. mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
  1397. if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
  1398. hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
  1399. umount_mnt(mnt);
  1400. }
  1401. else umount_tree(mnt, UMOUNT_CONNECTED);
  1402. }
  1403. put_mountpoint(mp);
  1404. out_unlock:
  1405. unlock_mount_hash();
  1406. namespace_unlock();
  1407. }
  1408. /*
  1409. * Is the caller allowed to modify his namespace?
  1410. */
  1411. static inline bool may_mount(void)
  1412. {
  1413. return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
  1414. }
  1415. static inline bool may_mandlock(void)
  1416. {
  1417. #ifndef CONFIG_MANDATORY_FILE_LOCKING
  1418. return false;
  1419. #endif
  1420. return capable(CAP_SYS_ADMIN);
  1421. }
  1422. /*
  1423. * Now umount can handle mount points as well as block devices.
  1424. * This is important for filesystems which use unnamed block devices.
  1425. *
  1426. * We now support a flag for forced unmount like the other 'big iron'
  1427. * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
  1428. */
  1429. int ksys_umount(char __user *name, int flags)
  1430. {
  1431. struct path path;
  1432. struct mount *mnt;
  1433. int retval;
  1434. int lookup_flags = 0;
  1435. if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
  1436. return -EINVAL;
  1437. if (!may_mount())
  1438. return -EPERM;
  1439. if (!(flags & UMOUNT_NOFOLLOW))
  1440. lookup_flags |= LOOKUP_FOLLOW;
  1441. retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path);
  1442. if (retval)
  1443. goto out;
  1444. mnt = real_mount(path.mnt);
  1445. retval = -EINVAL;
  1446. if (path.dentry != path.mnt->mnt_root)
  1447. goto dput_and_out;
  1448. if (!check_mnt(mnt))
  1449. goto dput_and_out;
  1450. if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
  1451. goto dput_and_out;
  1452. retval = -EPERM;
  1453. if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
  1454. goto dput_and_out;
  1455. retval = do_umount(mnt, flags);
  1456. dput_and_out:
  1457. /* we mustn't call path_put() as that would clear mnt_expiry_mark */
  1458. dput(path.dentry);
  1459. mntput_no_expire(mnt);
  1460. out:
  1461. return retval;
  1462. }
  1463. SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
  1464. {
  1465. return ksys_umount(name, flags);
  1466. }
  1467. #ifdef __ARCH_WANT_SYS_OLDUMOUNT
  1468. /*
  1469. * The 2.0 compatible umount. No flags.
  1470. */
  1471. SYSCALL_DEFINE1(oldumount, char __user *, name)
  1472. {
  1473. return ksys_umount(name, 0);
  1474. }
  1475. #endif
  1476. static bool is_mnt_ns_file(struct dentry *dentry)
  1477. {
  1478. /* Is this a proxy for a mount namespace? */
  1479. return dentry->d_op == &ns_dentry_operations &&
  1480. dentry->d_fsdata == &mntns_operations;
  1481. }
  1482. struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
  1483. {
  1484. return container_of(ns, struct mnt_namespace, ns);
  1485. }
  1486. static bool mnt_ns_loop(struct dentry *dentry)
  1487. {
  1488. /* Could bind mounting the mount namespace inode cause a
  1489. * mount namespace loop?
  1490. */
  1491. struct mnt_namespace *mnt_ns;
  1492. if (!is_mnt_ns_file(dentry))
  1493. return false;
  1494. mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
  1495. return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
  1496. }
  1497. struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
  1498. int flag)
  1499. {
  1500. struct mount *res, *p, *q, *r, *parent;
  1501. if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
  1502. return ERR_PTR(-EINVAL);
  1503. if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
  1504. return ERR_PTR(-EINVAL);
  1505. res = q = clone_mnt(mnt, dentry, flag);
  1506. if (IS_ERR(q))
  1507. return q;
  1508. q->mnt_mountpoint = mnt->mnt_mountpoint;
  1509. p = mnt;
  1510. list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
  1511. struct mount *s;
  1512. if (!is_subdir(r->mnt_mountpoint, dentry))
  1513. continue;
  1514. for (s = r; s; s = next_mnt(s, r)) {
  1515. if (!(flag & CL_COPY_UNBINDABLE) &&
  1516. IS_MNT_UNBINDABLE(s)) {
  1517. if (s->mnt.mnt_flags & MNT_LOCKED) {
  1518. /* Both unbindable and locked. */
  1519. q = ERR_PTR(-EPERM);
  1520. goto out;
  1521. } else {
  1522. s = skip_mnt_tree(s);
  1523. continue;
  1524. }
  1525. }
  1526. if (!(flag & CL_COPY_MNT_NS_FILE) &&
  1527. is_mnt_ns_file(s->mnt.mnt_root)) {
  1528. s = skip_mnt_tree(s);
  1529. continue;
  1530. }
  1531. while (p != s->mnt_parent) {
  1532. p = p->mnt_parent;
  1533. q = q->mnt_parent;
  1534. }
  1535. p = s;
  1536. parent = q;
  1537. q = clone_mnt(p, p->mnt.mnt_root, flag);
  1538. if (IS_ERR(q))
  1539. goto out;
  1540. lock_mount_hash();
  1541. list_add_tail(&q->mnt_list, &res->mnt_list);
  1542. attach_mnt(q, parent, p->mnt_mp);
  1543. unlock_mount_hash();
  1544. }
  1545. }
  1546. return res;
  1547. out:
  1548. if (res) {
  1549. lock_mount_hash();
  1550. umount_tree(res, UMOUNT_SYNC);
  1551. unlock_mount_hash();
  1552. }
  1553. return q;
  1554. }
  1555. /* Caller should check returned pointer for errors */
  1556. struct vfsmount *collect_mounts(const struct path *path)
  1557. {
  1558. struct mount *tree;
  1559. namespace_lock();
  1560. if (!check_mnt(real_mount(path->mnt)))
  1561. tree = ERR_PTR(-EINVAL);
  1562. else
  1563. tree = copy_tree(real_mount(path->mnt), path->dentry,
  1564. CL_COPY_ALL | CL_PRIVATE);
  1565. namespace_unlock();
  1566. if (IS_ERR(tree))
  1567. return ERR_CAST(tree);
  1568. return &tree->mnt;
  1569. }
  1570. void drop_collected_mounts(struct vfsmount *mnt)
  1571. {
  1572. namespace_lock();
  1573. lock_mount_hash();
  1574. umount_tree(real_mount(mnt), 0);
  1575. unlock_mount_hash();
  1576. namespace_unlock();
  1577. }
  1578. /**
  1579. * clone_private_mount - create a private clone of a path
  1580. *
  1581. * This creates a new vfsmount, which will be the clone of @path. The new will
  1582. * not be attached anywhere in the namespace and will be private (i.e. changes
  1583. * to the originating mount won't be propagated into this).
  1584. *
  1585. * Release with mntput().
  1586. */
  1587. struct vfsmount *clone_private_mount(const struct path *path)
  1588. {
  1589. struct mount *old_mnt = real_mount(path->mnt);
  1590. struct mount *new_mnt;
  1591. if (IS_MNT_UNBINDABLE(old_mnt))
  1592. return ERR_PTR(-EINVAL);
  1593. new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
  1594. if (IS_ERR(new_mnt))
  1595. return ERR_CAST(new_mnt);
  1596. return &new_mnt->mnt;
  1597. }
  1598. EXPORT_SYMBOL_GPL(clone_private_mount);
  1599. int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
  1600. struct vfsmount *root)
  1601. {
  1602. struct mount *mnt;
  1603. int res = f(root, arg);
  1604. if (res)
  1605. return res;
  1606. list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
  1607. res = f(&mnt->mnt, arg);
  1608. if (res)
  1609. return res;
  1610. }
  1611. return 0;
  1612. }
  1613. static void cleanup_group_ids(struct mount *mnt, struct mount *end)
  1614. {
  1615. struct mount *p;
  1616. for (p = mnt; p != end; p = next_mnt(p, mnt)) {
  1617. if (p->mnt_group_id && !IS_MNT_SHARED(p))
  1618. mnt_release_group_id(p);
  1619. }
  1620. }
  1621. static int invent_group_ids(struct mount *mnt, bool recurse)
  1622. {
  1623. struct mount *p;
  1624. for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
  1625. if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
  1626. int err = mnt_alloc_group_id(p);
  1627. if (err) {
  1628. cleanup_group_ids(mnt, p);
  1629. return err;
  1630. }
  1631. }
  1632. }
  1633. return 0;
  1634. }
  1635. int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
  1636. {
  1637. unsigned int max = READ_ONCE(sysctl_mount_max);
  1638. unsigned int mounts = 0, old, pending, sum;
  1639. struct mount *p;
  1640. for (p = mnt; p; p = next_mnt(p, mnt))
  1641. mounts++;
  1642. old = ns->mounts;
  1643. pending = ns->pending_mounts;
  1644. sum = old + pending;
  1645. if ((old > sum) ||
  1646. (pending > sum) ||
  1647. (max < sum) ||
  1648. (mounts > (max - sum)))
  1649. return -ENOSPC;
  1650. ns->pending_mounts = pending + mounts;
  1651. return 0;
  1652. }
  1653. /*
  1654. * @source_mnt : mount tree to be attached
  1655. * @nd : place the mount tree @source_mnt is attached
  1656. * @parent_nd : if non-null, detach the source_mnt from its parent and
  1657. * store the parent mount and mountpoint dentry.
  1658. * (done when source_mnt is moved)
  1659. *
  1660. * NOTE: in the table below explains the semantics when a source mount
  1661. * of a given type is attached to a destination mount of a given type.
  1662. * ---------------------------------------------------------------------------
  1663. * | BIND MOUNT OPERATION |
  1664. * |**************************************************************************
  1665. * | source-->| shared | private | slave | unbindable |
  1666. * | dest | | | | |
  1667. * | | | | | | |
  1668. * | v | | | | |
  1669. * |**************************************************************************
  1670. * | shared | shared (++) | shared (+) | shared(+++)| invalid |
  1671. * | | | | | |
  1672. * |non-shared| shared (+) | private | slave (*) | invalid |
  1673. * ***************************************************************************
  1674. * A bind operation clones the source mount and mounts the clone on the
  1675. * destination mount.
  1676. *
  1677. * (++) the cloned mount is propagated to all the mounts in the propagation
  1678. * tree of the destination mount and the cloned mount is added to
  1679. * the peer group of the source mount.
  1680. * (+) the cloned mount is created under the destination mount and is marked
  1681. * as shared. The cloned mount is added to the peer group of the source
  1682. * mount.
  1683. * (+++) the mount is propagated to all the mounts in the propagation tree
  1684. * of the destination mount and the cloned mount is made slave
  1685. * of the same master as that of the source mount. The cloned mount
  1686. * is marked as 'shared and slave'.
  1687. * (*) the cloned mount is made a slave of the same master as that of the
  1688. * source mount.
  1689. *
  1690. * ---------------------------------------------------------------------------
  1691. * | MOVE MOUNT OPERATION |
  1692. * |**************************************************************************
  1693. * | source-->| shared | private | slave | unbindable |
  1694. * | dest | | | | |
  1695. * | | | | | | |
  1696. * | v | | | | |
  1697. * |**************************************************************************
  1698. * | shared | shared (+) | shared (+) | shared(+++) | invalid |
  1699. * | | | | | |
  1700. * |non-shared| shared (+*) | private | slave (*) | unbindable |
  1701. * ***************************************************************************
  1702. *
  1703. * (+) the mount is moved to the destination. And is then propagated to
  1704. * all the mounts in the propagation tree of the destination mount.
  1705. * (+*) the mount is moved to the destination.
  1706. * (+++) the mount is moved to the destination and is then propagated to
  1707. * all the mounts belonging to the destination mount's propagation tree.
  1708. * the mount is marked as 'shared and slave'.
  1709. * (*) the mount continues to be a slave at the new location.
  1710. *
  1711. * if the source mount is a tree, the operations explained above is
  1712. * applied to each mount in the tree.
  1713. * Must be called without spinlocks held, since this function can sleep
  1714. * in allocations.
  1715. */
  1716. static int attach_recursive_mnt(struct mount *source_mnt,
  1717. struct mount *dest_mnt,
  1718. struct mountpoint *dest_mp,
  1719. struct path *parent_path)
  1720. {
  1721. HLIST_HEAD(tree_list);
  1722. struct mnt_namespace *ns = dest_mnt->mnt_ns;
  1723. struct mountpoint *smp;
  1724. struct mount *child, *p;
  1725. struct hlist_node *n;
  1726. int err;
  1727. /* Preallocate a mountpoint in case the new mounts need
  1728. * to be tucked under other mounts.
  1729. */
  1730. smp = get_mountpoint(source_mnt->mnt.mnt_root);
  1731. if (IS_ERR(smp))
  1732. return PTR_ERR(smp);
  1733. /* Is there space to add these mounts to the mount namespace? */
  1734. if (!parent_path) {
  1735. err = count_mounts(ns, source_mnt);
  1736. if (err)
  1737. goto out;
  1738. }
  1739. if (IS_MNT_SHARED(dest_mnt)) {
  1740. err = invent_group_ids(source_mnt, true);
  1741. if (err)
  1742. goto out;
  1743. err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
  1744. lock_mount_hash();
  1745. if (err)
  1746. goto out_cleanup_ids;
  1747. for (p = source_mnt; p; p = next_mnt(p, source_mnt))
  1748. set_mnt_shared(p);
  1749. } else {
  1750. lock_mount_hash();
  1751. }
  1752. if (parent_path) {
  1753. detach_mnt(source_mnt, parent_path);
  1754. attach_mnt(source_mnt, dest_mnt, dest_mp);
  1755. touch_mnt_namespace(source_mnt->mnt_ns);
  1756. } else {
  1757. mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
  1758. commit_tree(source_mnt);
  1759. }
  1760. hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
  1761. struct mount *q;
  1762. hlist_del_init(&child->mnt_hash);
  1763. q = __lookup_mnt(&child->mnt_parent->mnt,
  1764. child->mnt_mountpoint);
  1765. if (q)
  1766. mnt_change_mountpoint(child, smp, q);
  1767. commit_tree(child);
  1768. }
  1769. put_mountpoint(smp);
  1770. unlock_mount_hash();
  1771. return 0;
  1772. out_cleanup_ids:
  1773. while (!hlist_empty(&tree_list)) {
  1774. child = hlist_entry(tree_list.first, struct mount, mnt_hash);
  1775. child->mnt_parent->mnt_ns->pending_mounts = 0;
  1776. umount_tree(child, UMOUNT_SYNC);
  1777. }
  1778. unlock_mount_hash();
  1779. cleanup_group_ids(source_mnt, NULL);
  1780. out:
  1781. ns->pending_mounts = 0;
  1782. read_seqlock_excl(&mount_lock);
  1783. put_mountpoint(smp);
  1784. read_sequnlock_excl(&mount_lock);
  1785. return err;
  1786. }
  1787. static struct mountpoint *lock_mount(struct path *path)
  1788. {
  1789. struct vfsmount *mnt;
  1790. struct dentry *dentry = path->dentry;
  1791. retry:
  1792. inode_lock(dentry->d_inode);
  1793. if (unlikely(cant_mount(dentry))) {
  1794. inode_unlock(dentry->d_inode);
  1795. return ERR_PTR(-ENOENT);
  1796. }
  1797. namespace_lock();
  1798. mnt = lookup_mnt(path);
  1799. if (likely(!mnt)) {
  1800. struct mountpoint *mp = get_mountpoint(dentry);
  1801. if (IS_ERR(mp)) {
  1802. namespace_unlock();
  1803. inode_unlock(dentry->d_inode);
  1804. return mp;
  1805. }
  1806. return mp;
  1807. }
  1808. namespace_unlock();
  1809. inode_unlock(path->dentry->d_inode);
  1810. path_put(path);
  1811. path->mnt = mnt;
  1812. dentry = path->dentry = dget(mnt->mnt_root);
  1813. goto retry;
  1814. }
  1815. static void unlock_mount(struct mountpoint *where)
  1816. {
  1817. struct dentry *dentry = where->m_dentry;
  1818. read_seqlock_excl(&mount_lock);
  1819. put_mountpoint(where);
  1820. read_sequnlock_excl(&mount_lock);
  1821. namespace_unlock();
  1822. inode_unlock(dentry->d_inode);
  1823. }
  1824. static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
  1825. {
  1826. if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
  1827. return -EINVAL;
  1828. if (d_is_dir(mp->m_dentry) !=
  1829. d_is_dir(mnt->mnt.mnt_root))
  1830. return -ENOTDIR;
  1831. return attach_recursive_mnt(mnt, p, mp, NULL);
  1832. }
  1833. /*
  1834. * Sanity check the flags to change_mnt_propagation.
  1835. */
  1836. static int flags_to_propagation_type(int ms_flags)
  1837. {
  1838. int type = ms_flags & ~(MS_REC | MS_SILENT);
  1839. /* Fail if any non-propagation flags are set */
  1840. if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
  1841. return 0;
  1842. /* Only one propagation flag should be set */
  1843. if (!is_power_of_2(type))
  1844. return 0;
  1845. return type;
  1846. }
  1847. /*
  1848. * recursively change the type of the mountpoint.
  1849. */
  1850. static int do_change_type(struct path *path, int ms_flags)
  1851. {
  1852. struct mount *m;
  1853. struct mount *mnt = real_mount(path->mnt);
  1854. int recurse = ms_flags & MS_REC;
  1855. int type;
  1856. int err = 0;
  1857. if (path->dentry != path->mnt->mnt_root)
  1858. return -EINVAL;
  1859. type = flags_to_propagation_type(ms_flags);
  1860. if (!type)
  1861. return -EINVAL;
  1862. namespace_lock();
  1863. if (type == MS_SHARED) {
  1864. err = invent_group_ids(mnt, recurse);
  1865. if (err)
  1866. goto out_unlock;
  1867. }
  1868. lock_mount_hash();
  1869. for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
  1870. change_mnt_propagation(m, type);
  1871. unlock_mount_hash();
  1872. out_unlock:
  1873. namespace_unlock();
  1874. return err;
  1875. }
  1876. static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
  1877. {
  1878. struct mount *child;
  1879. list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
  1880. if (!is_subdir(child->mnt_mountpoint, dentry))
  1881. continue;
  1882. if (child->mnt.mnt_flags & MNT_LOCKED)
  1883. return true;
  1884. }
  1885. return false;
  1886. }
  1887. /*
  1888. * do loopback mount.
  1889. */
  1890. static int do_loopback(struct path *path, const char *old_name,
  1891. int recurse)
  1892. {
  1893. struct path old_path;
  1894. struct mount *mnt = NULL, *old, *parent;
  1895. struct mountpoint *mp;
  1896. int err;
  1897. if (!old_name || !*old_name)
  1898. return -EINVAL;
  1899. err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
  1900. if (err)
  1901. return err;
  1902. err = -EINVAL;
  1903. if (mnt_ns_loop(old_path.dentry))
  1904. goto out;
  1905. mp = lock_mount(path);
  1906. err = PTR_ERR(mp);
  1907. if (IS_ERR(mp))
  1908. goto out;
  1909. old = real_mount(old_path.mnt);
  1910. parent = real_mount(path->mnt);
  1911. err = -EINVAL;
  1912. if (IS_MNT_UNBINDABLE(old))
  1913. goto out2;
  1914. if (!check_mnt(parent))
  1915. goto out2;
  1916. if (!check_mnt(old) && old_path.dentry->d_op != &ns_dentry_operations)
  1917. goto out2;
  1918. if (!recurse && has_locked_children(old, old_path.dentry))
  1919. goto out2;
  1920. if (recurse)
  1921. mnt = copy_tree(old, old_path.dentry, CL_COPY_MNT_NS_FILE);
  1922. else
  1923. mnt = clone_mnt(old, old_path.dentry, 0);
  1924. if (IS_ERR(mnt)) {
  1925. err = PTR_ERR(mnt);
  1926. goto out2;
  1927. }
  1928. mnt->mnt.mnt_flags &= ~MNT_LOCKED;
  1929. err = graft_tree(mnt, parent, mp);
  1930. if (err) {
  1931. lock_mount_hash();
  1932. umount_tree(mnt, UMOUNT_SYNC);
  1933. unlock_mount_hash();
  1934. }
  1935. out2:
  1936. unlock_mount(mp);
  1937. out:
  1938. path_put(&old_path);
  1939. return err;
  1940. }
  1941. static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
  1942. {
  1943. int error = 0;
  1944. int readonly_request = 0;
  1945. if (ms_flags & MS_RDONLY)
  1946. readonly_request = 1;
  1947. if (readonly_request == __mnt_is_readonly(mnt))
  1948. return 0;
  1949. if (readonly_request)
  1950. error = mnt_make_readonly(real_mount(mnt));
  1951. else
  1952. __mnt_unmake_readonly(real_mount(mnt));
  1953. return error;
  1954. }
  1955. /*
  1956. * change filesystem flags. dir should be a physical root of filesystem.
  1957. * If you've mounted a non-root directory somewhere and want to do remount
  1958. * on it - tough luck.
  1959. */
  1960. static int do_remount(struct path *path, int ms_flags, int sb_flags,
  1961. int mnt_flags, void *data)
  1962. {
  1963. int err;
  1964. struct super_block *sb = path->mnt->mnt_sb;
  1965. struct mount *mnt = real_mount(path->mnt);
  1966. if (!check_mnt(mnt))
  1967. return -EINVAL;
  1968. if (path->dentry != path->mnt->mnt_root)
  1969. return -EINVAL;
  1970. /* Don't allow changing of locked mnt flags.
  1971. *
  1972. * No locks need to be held here while testing the various
  1973. * MNT_LOCK flags because those flags can never be cleared
  1974. * once they are set.
  1975. */
  1976. if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
  1977. !(mnt_flags & MNT_READONLY)) {
  1978. return -EPERM;
  1979. }
  1980. if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
  1981. !(mnt_flags & MNT_NODEV)) {
  1982. return -EPERM;
  1983. }
  1984. if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
  1985. !(mnt_flags & MNT_NOSUID)) {
  1986. return -EPERM;
  1987. }
  1988. if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
  1989. !(mnt_flags & MNT_NOEXEC)) {
  1990. return -EPERM;
  1991. }
  1992. if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
  1993. ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
  1994. return -EPERM;
  1995. }
  1996. err = security_sb_remount(sb, data);
  1997. if (err)
  1998. return err;
  1999. down_write(&sb->s_umount);
  2000. if (ms_flags & MS_BIND)
  2001. err = change_mount_flags(path->mnt, ms_flags);
  2002. else if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
  2003. err = -EPERM;
  2004. else
  2005. err = do_remount_sb(sb, sb_flags, data, 0);
  2006. if (!err) {
  2007. lock_mount_hash();
  2008. mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
  2009. mnt->mnt.mnt_flags = mnt_flags;
  2010. touch_mnt_namespace(mnt->mnt_ns);
  2011. unlock_mount_hash();
  2012. }
  2013. up_write(&sb->s_umount);
  2014. return err;
  2015. }
  2016. static inline int tree_contains_unbindable(struct mount *mnt)
  2017. {
  2018. struct mount *p;
  2019. for (p = mnt; p; p = next_mnt(p, mnt)) {
  2020. if (IS_MNT_UNBINDABLE(p))
  2021. return 1;
  2022. }
  2023. return 0;
  2024. }
  2025. static int do_move_mount(struct path *path, const char *old_name)
  2026. {
  2027. struct path old_path, parent_path;
  2028. struct mount *p;
  2029. struct mount *old;
  2030. struct mountpoint *mp;
  2031. int err;
  2032. if (!old_name || !*old_name)
  2033. return -EINVAL;
  2034. err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
  2035. if (err)
  2036. return err;
  2037. mp = lock_mount(path);
  2038. err = PTR_ERR(mp);
  2039. if (IS_ERR(mp))
  2040. goto out;
  2041. old = real_mount(old_path.mnt);
  2042. p = real_mount(path->mnt);
  2043. err = -EINVAL;
  2044. if (!check_mnt(p) || !check_mnt(old))
  2045. goto out1;
  2046. if (old->mnt.mnt_flags & MNT_LOCKED)
  2047. goto out1;
  2048. err = -EINVAL;
  2049. if (old_path.dentry != old_path.mnt->mnt_root)
  2050. goto out1;
  2051. if (!mnt_has_parent(old))
  2052. goto out1;
  2053. if (d_is_dir(path->dentry) !=
  2054. d_is_dir(old_path.dentry))
  2055. goto out1;
  2056. /*
  2057. * Don't move a mount residing in a shared parent.
  2058. */
  2059. if (IS_MNT_SHARED(old->mnt_parent))
  2060. goto out1;
  2061. /*
  2062. * Don't move a mount tree containing unbindable mounts to a destination
  2063. * mount which is shared.
  2064. */
  2065. if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
  2066. goto out1;
  2067. err = -ELOOP;
  2068. for (; mnt_has_parent(p); p = p->mnt_parent)
  2069. if (p == old)
  2070. goto out1;
  2071. err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path);
  2072. if (err)
  2073. goto out1;
  2074. /* if the mount is moved, it should no longer be expire
  2075. * automatically */
  2076. list_del_init(&old->mnt_expire);
  2077. out1:
  2078. unlock_mount(mp);
  2079. out:
  2080. if (!err)
  2081. path_put(&parent_path);
  2082. path_put(&old_path);
  2083. return err;
  2084. }
  2085. static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
  2086. {
  2087. int err;
  2088. const char *subtype = strchr(fstype, '.');
  2089. if (subtype) {
  2090. subtype++;
  2091. err = -EINVAL;
  2092. if (!subtype[0])
  2093. goto err;
  2094. } else
  2095. subtype = "";
  2096. mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL);
  2097. err = -ENOMEM;
  2098. if (!mnt->mnt_sb->s_subtype)
  2099. goto err;
  2100. return mnt;
  2101. err:
  2102. mntput(mnt);
  2103. return ERR_PTR(err);
  2104. }
  2105. /*
  2106. * add a mount into a namespace's mount tree
  2107. */
  2108. static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
  2109. {
  2110. struct mountpoint *mp;
  2111. struct mount *parent;
  2112. int err;
  2113. mnt_flags &= ~MNT_INTERNAL_FLAGS;
  2114. mp = lock_mount(path);
  2115. if (IS_ERR(mp))
  2116. return PTR_ERR(mp);
  2117. parent = real_mount(path->mnt);
  2118. err = -EINVAL;
  2119. if (unlikely(!check_mnt(parent))) {
  2120. /* that's acceptable only for automounts done in private ns */
  2121. if (!(mnt_flags & MNT_SHRINKABLE))
  2122. goto unlock;
  2123. /* ... and for those we'd better have mountpoint still alive */
  2124. if (!parent->mnt_ns)
  2125. goto unlock;
  2126. }
  2127. /* Refuse the same filesystem on the same mount point */
  2128. err = -EBUSY;
  2129. if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
  2130. path->mnt->mnt_root == path->dentry)
  2131. goto unlock;
  2132. err = -EINVAL;
  2133. if (d_is_symlink(newmnt->mnt.mnt_root))
  2134. goto unlock;
  2135. newmnt->mnt.mnt_flags = mnt_flags;
  2136. err = graft_tree(newmnt, parent, mp);
  2137. unlock:
  2138. unlock_mount(mp);
  2139. return err;
  2140. }
  2141. static bool mount_too_revealing(struct vfsmount *mnt, int *new_mnt_flags);
  2142. /*
  2143. * create a new mount for userspace and request it to be added into the
  2144. * namespace's tree
  2145. */
  2146. static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
  2147. int mnt_flags, const char *name, void *data)
  2148. {
  2149. struct file_system_type *type;
  2150. struct vfsmount *mnt;
  2151. int err;
  2152. if (!fstype)
  2153. return -EINVAL;
  2154. type = get_fs_type(fstype);
  2155. if (!type)
  2156. return -ENODEV;
  2157. mnt = vfs_kern_mount(type, sb_flags, name, data);
  2158. if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
  2159. !mnt->mnt_sb->s_subtype)
  2160. mnt = fs_set_subtype(mnt, fstype);
  2161. put_filesystem(type);
  2162. if (IS_ERR(mnt))
  2163. return PTR_ERR(mnt);
  2164. if (mount_too_revealing(mnt, &mnt_flags)) {
  2165. mntput(mnt);
  2166. return -EPERM;
  2167. }
  2168. err = do_add_mount(real_mount(mnt), path, mnt_flags);
  2169. if (err)
  2170. mntput(mnt);
  2171. return err;
  2172. }
  2173. int finish_automount(struct vfsmount *m, struct path *path)
  2174. {
  2175. struct mount *mnt = real_mount(m);
  2176. int err;
  2177. /* The new mount record should have at least 2 refs to prevent it being
  2178. * expired before we get a chance to add it
  2179. */
  2180. BUG_ON(mnt_get_count(mnt) < 2);
  2181. if (m->mnt_sb == path->mnt->mnt_sb &&
  2182. m->mnt_root == path->dentry) {
  2183. err = -ELOOP;
  2184. goto fail;
  2185. }
  2186. err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
  2187. if (!err)
  2188. return 0;
  2189. fail:
  2190. /* remove m from any expiration list it may be on */
  2191. if (!list_empty(&mnt->mnt_expire)) {
  2192. namespace_lock();
  2193. list_del_init(&mnt->mnt_expire);
  2194. namespace_unlock();
  2195. }
  2196. mntput(m);
  2197. mntput(m);
  2198. return err;
  2199. }
  2200. /**
  2201. * mnt_set_expiry - Put a mount on an expiration list
  2202. * @mnt: The mount to list.
  2203. * @expiry_list: The list to add the mount to.
  2204. */
  2205. void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
  2206. {
  2207. namespace_lock();
  2208. list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
  2209. namespace_unlock();
  2210. }
  2211. EXPORT_SYMBOL(mnt_set_expiry);
  2212. /*
  2213. * process a list of expirable mountpoints with the intent of discarding any
  2214. * mountpoints that aren't in use and haven't been touched since last we came
  2215. * here
  2216. */
  2217. void mark_mounts_for_expiry(struct list_head *mounts)
  2218. {
  2219. struct mount *mnt, *next;
  2220. LIST_HEAD(graveyard);
  2221. if (list_empty(mounts))
  2222. return;
  2223. namespace_lock();
  2224. lock_mount_hash();
  2225. /* extract from the expiration list every vfsmount that matches the
  2226. * following criteria:
  2227. * - only referenced by its parent vfsmount
  2228. * - still marked for expiry (marked on the last call here; marks are
  2229. * cleared by mntput())
  2230. */
  2231. list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
  2232. if (!xchg(&mnt->mnt_expiry_mark, 1) ||
  2233. propagate_mount_busy(mnt, 1))
  2234. continue;
  2235. list_move(&mnt->mnt_expire, &graveyard);
  2236. }
  2237. while (!list_empty(&graveyard)) {
  2238. mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
  2239. touch_mnt_namespace(mnt->mnt_ns);
  2240. umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
  2241. }
  2242. unlock_mount_hash();
  2243. namespace_unlock();
  2244. }
  2245. EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
  2246. /*
  2247. * Ripoff of 'select_parent()'
  2248. *
  2249. * search the list of submounts for a given mountpoint, and move any
  2250. * shrinkable submounts to the 'graveyard' list.
  2251. */
  2252. static int select_submounts(struct mount *parent, struct list_head *graveyard)
  2253. {
  2254. struct mount *this_parent = parent;
  2255. struct list_head *next;
  2256. int found = 0;
  2257. repeat:
  2258. next = this_parent->mnt_mounts.next;
  2259. resume:
  2260. while (next != &this_parent->mnt_mounts) {
  2261. struct list_head *tmp = next;
  2262. struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
  2263. next = tmp->next;
  2264. if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
  2265. continue;
  2266. /*
  2267. * Descend a level if the d_mounts list is non-empty.
  2268. */
  2269. if (!list_empty(&mnt->mnt_mounts)) {
  2270. this_parent = mnt;
  2271. goto repeat;
  2272. }
  2273. if (!propagate_mount_busy(mnt, 1)) {
  2274. list_move_tail(&mnt->mnt_expire, graveyard);
  2275. found++;
  2276. }
  2277. }
  2278. /*
  2279. * All done at this level ... ascend and resume the search
  2280. */
  2281. if (this_parent != parent) {
  2282. next = this_parent->mnt_child.next;
  2283. this_parent = this_parent->mnt_parent;
  2284. goto resume;
  2285. }
  2286. return found;
  2287. }
  2288. /*
  2289. * process a list of expirable mountpoints with the intent of discarding any
  2290. * submounts of a specific parent mountpoint
  2291. *
  2292. * mount_lock must be held for write
  2293. */
  2294. static void shrink_submounts(struct mount *mnt)
  2295. {
  2296. LIST_HEAD(graveyard);
  2297. struct mount *m;
  2298. /* extract submounts of 'mountpoint' from the expiration list */
  2299. while (select_submounts(mnt, &graveyard)) {
  2300. while (!list_empty(&graveyard)) {
  2301. m = list_first_entry(&graveyard, struct mount,
  2302. mnt_expire);
  2303. touch_mnt_namespace(m->mnt_ns);
  2304. umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
  2305. }
  2306. }
  2307. }
  2308. /*
  2309. * Some copy_from_user() implementations do not return the exact number of
  2310. * bytes remaining to copy on a fault. But copy_mount_options() requires that.
  2311. * Note that this function differs from copy_from_user() in that it will oops
  2312. * on bad values of `to', rather than returning a short copy.
  2313. */
  2314. static long exact_copy_from_user(void *to, const void __user * from,
  2315. unsigned long n)
  2316. {
  2317. char *t = to;
  2318. const char __user *f = from;
  2319. char c;
  2320. if (!access_ok(VERIFY_READ, from, n))
  2321. return n;
  2322. while (n) {
  2323. if (__get_user(c, f)) {
  2324. memset(t, 0, n);
  2325. break;
  2326. }
  2327. *t++ = c;
  2328. f++;
  2329. n--;
  2330. }
  2331. return n;
  2332. }
  2333. void *copy_mount_options(const void __user * data)
  2334. {
  2335. int i;
  2336. unsigned long size;
  2337. char *copy;
  2338. if (!data)
  2339. return NULL;
  2340. copy = kmalloc(PAGE_SIZE, GFP_KERNEL);
  2341. if (!copy)
  2342. return ERR_PTR(-ENOMEM);
  2343. /* We only care that *some* data at the address the user
  2344. * gave us is valid. Just in case, we'll zero
  2345. * the remainder of the page.
  2346. */
  2347. /* copy_from_user cannot cross TASK_SIZE ! */
  2348. size = TASK_SIZE - (unsigned long)data;
  2349. if (size > PAGE_SIZE)
  2350. size = PAGE_SIZE;
  2351. i = size - exact_copy_from_user(copy, data, size);
  2352. if (!i) {
  2353. kfree(copy);
  2354. return ERR_PTR(-EFAULT);
  2355. }
  2356. if (i != PAGE_SIZE)
  2357. memset(copy + i, 0, PAGE_SIZE - i);
  2358. return copy;
  2359. }
  2360. char *copy_mount_string(const void __user *data)
  2361. {
  2362. return data ? strndup_user(data, PAGE_SIZE) : NULL;
  2363. }
  2364. /*
  2365. * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
  2366. * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
  2367. *
  2368. * data is a (void *) that can point to any structure up to
  2369. * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
  2370. * information (or be NULL).
  2371. *
  2372. * Pre-0.97 versions of mount() didn't have a flags word.
  2373. * When the flags word was introduced its top half was required
  2374. * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
  2375. * Therefore, if this magic number is present, it carries no information
  2376. * and must be discarded.
  2377. */
  2378. long do_mount(const char *dev_name, const char __user *dir_name,
  2379. const char *type_page, unsigned long flags, void *data_page)
  2380. {
  2381. struct path path;
  2382. unsigned int mnt_flags = 0, sb_flags;
  2383. int retval = 0;
  2384. /* Discard magic */
  2385. if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
  2386. flags &= ~MS_MGC_MSK;
  2387. /* Basic sanity checks */
  2388. if (data_page)
  2389. ((char *)data_page)[PAGE_SIZE - 1] = 0;
  2390. if (flags & MS_NOUSER)
  2391. return -EINVAL;
  2392. /* ... and get the mountpoint */
  2393. retval = user_path(dir_name, &path);
  2394. if (retval)
  2395. return retval;
  2396. retval = security_sb_mount(dev_name, &path,
  2397. type_page, flags, data_page);
  2398. if (!retval && !may_mount())
  2399. retval = -EPERM;
  2400. if (!retval && (flags & SB_MANDLOCK) && !may_mandlock())
  2401. retval = -EPERM;
  2402. if (retval)
  2403. goto dput_out;
  2404. /* Default to relatime unless overriden */
  2405. if (!(flags & MS_NOATIME))
  2406. mnt_flags |= MNT_RELATIME;
  2407. /* Separate the per-mountpoint flags */
  2408. if (flags & MS_NOSUID)
  2409. mnt_flags |= MNT_NOSUID;
  2410. if (flags & MS_NODEV)
  2411. mnt_flags |= MNT_NODEV;
  2412. if (flags & MS_NOEXEC)
  2413. mnt_flags |= MNT_NOEXEC;
  2414. if (flags & MS_NOATIME)
  2415. mnt_flags |= MNT_NOATIME;
  2416. if (flags & MS_NODIRATIME)
  2417. mnt_flags |= MNT_NODIRATIME;
  2418. if (flags & MS_STRICTATIME)
  2419. mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
  2420. if (flags & MS_RDONLY)
  2421. mnt_flags |= MNT_READONLY;
  2422. /* The default atime for remount is preservation */
  2423. if ((flags & MS_REMOUNT) &&
  2424. ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
  2425. MS_STRICTATIME)) == 0)) {
  2426. mnt_flags &= ~MNT_ATIME_MASK;
  2427. mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
  2428. }
  2429. sb_flags = flags & (SB_RDONLY |
  2430. SB_SYNCHRONOUS |
  2431. SB_MANDLOCK |
  2432. SB_DIRSYNC |
  2433. SB_SILENT |
  2434. SB_POSIXACL |
  2435. SB_LAZYTIME |
  2436. SB_I_VERSION);
  2437. if (flags & MS_REMOUNT)
  2438. retval = do_remount(&path, flags, sb_flags, mnt_flags,
  2439. data_page);
  2440. else if (flags & MS_BIND)
  2441. retval = do_loopback(&path, dev_name, flags & MS_REC);
  2442. else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
  2443. retval = do_change_type(&path, flags);
  2444. else if (flags & MS_MOVE)
  2445. retval = do_move_mount(&path, dev_name);
  2446. else
  2447. retval = do_new_mount(&path, type_page, sb_flags, mnt_flags,
  2448. dev_name, data_page);
  2449. dput_out:
  2450. path_put(&path);
  2451. return retval;
  2452. }
  2453. static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
  2454. {
  2455. return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES);
  2456. }
  2457. static void dec_mnt_namespaces(struct ucounts *ucounts)
  2458. {
  2459. dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES);
  2460. }
  2461. static void free_mnt_ns(struct mnt_namespace *ns)
  2462. {
  2463. ns_free_inum(&ns->ns);
  2464. dec_mnt_namespaces(ns->ucounts);
  2465. put_user_ns(ns->user_ns);
  2466. kfree(ns);
  2467. }
  2468. /*
  2469. * Assign a sequence number so we can detect when we attempt to bind
  2470. * mount a reference to an older mount namespace into the current
  2471. * mount namespace, preventing reference counting loops. A 64bit
  2472. * number incrementing at 10Ghz will take 12,427 years to wrap which
  2473. * is effectively never, so we can ignore the possibility.
  2474. */
  2475. static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
  2476. static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
  2477. {
  2478. struct mnt_namespace *new_ns;
  2479. struct ucounts *ucounts;
  2480. int ret;
  2481. ucounts = inc_mnt_namespaces(user_ns);
  2482. if (!ucounts)
  2483. return ERR_PTR(-ENOSPC);
  2484. new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
  2485. if (!new_ns) {
  2486. dec_mnt_namespaces(ucounts);
  2487. return ERR_PTR(-ENOMEM);
  2488. }
  2489. ret = ns_alloc_inum(&new_ns->ns);
  2490. if (ret) {
  2491. kfree(new_ns);
  2492. dec_mnt_namespaces(ucounts);
  2493. return ERR_PTR(ret);
  2494. }
  2495. new_ns->ns.ops = &mntns_operations;
  2496. new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
  2497. atomic_set(&new_ns->count, 1);
  2498. new_ns->root = NULL;
  2499. INIT_LIST_HEAD(&new_ns->list);
  2500. init_waitqueue_head(&new_ns->poll);
  2501. new_ns->event = 0;
  2502. new_ns->user_ns = get_user_ns(user_ns);
  2503. new_ns->ucounts = ucounts;
  2504. new_ns->mounts = 0;
  2505. new_ns->pending_mounts = 0;
  2506. return new_ns;
  2507. }
  2508. __latent_entropy
  2509. struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
  2510. struct user_namespace *user_ns, struct fs_struct *new_fs)
  2511. {
  2512. struct mnt_namespace *new_ns;
  2513. struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
  2514. struct mount *p, *q;
  2515. struct mount *old;
  2516. struct mount *new;
  2517. int copy_flags;
  2518. BUG_ON(!ns);
  2519. if (likely(!(flags & CLONE_NEWNS))) {
  2520. get_mnt_ns(ns);
  2521. return ns;
  2522. }
  2523. old = ns->root;
  2524. new_ns = alloc_mnt_ns(user_ns);
  2525. if (IS_ERR(new_ns))
  2526. return new_ns;
  2527. namespace_lock();
  2528. /* First pass: copy the tree topology */
  2529. copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
  2530. if (user_ns != ns->user_ns)
  2531. copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
  2532. new = copy_tree(old, old->mnt.mnt_root, copy_flags);
  2533. if (IS_ERR(new)) {
  2534. namespace_unlock();
  2535. free_mnt_ns(new_ns);
  2536. return ERR_CAST(new);
  2537. }
  2538. new_ns->root = new;
  2539. list_add_tail(&new_ns->list, &new->mnt_list);
  2540. /*
  2541. * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
  2542. * as belonging to new namespace. We have already acquired a private
  2543. * fs_struct, so tsk->fs->lock is not needed.
  2544. */
  2545. p = old;
  2546. q = new;
  2547. while (p) {
  2548. q->mnt_ns = new_ns;
  2549. new_ns->mounts++;
  2550. if (new_fs) {
  2551. if (&p->mnt == new_fs->root.mnt) {
  2552. new_fs->root.mnt = mntget(&q->mnt);
  2553. rootmnt = &p->mnt;
  2554. }
  2555. if (&p->mnt == new_fs->pwd.mnt) {
  2556. new_fs->pwd.mnt = mntget(&q->mnt);
  2557. pwdmnt = &p->mnt;
  2558. }
  2559. }
  2560. p = next_mnt(p, old);
  2561. q = next_mnt(q, new);
  2562. if (!q)
  2563. break;
  2564. while (p->mnt.mnt_root != q->mnt.mnt_root)
  2565. p = next_mnt(p, old);
  2566. }
  2567. namespace_unlock();
  2568. if (rootmnt)
  2569. mntput(rootmnt);
  2570. if (pwdmnt)
  2571. mntput(pwdmnt);
  2572. return new_ns;
  2573. }
  2574. /**
  2575. * create_mnt_ns - creates a private namespace and adds a root filesystem
  2576. * @mnt: pointer to the new root filesystem mountpoint
  2577. */
  2578. static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
  2579. {
  2580. struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns);
  2581. if (!IS_ERR(new_ns)) {
  2582. struct mount *mnt = real_mount(m);
  2583. mnt->mnt_ns = new_ns;
  2584. new_ns->root = mnt;
  2585. new_ns->mounts++;
  2586. list_add(&mnt->mnt_list, &new_ns->list);
  2587. } else {
  2588. mntput(m);
  2589. }
  2590. return new_ns;
  2591. }
  2592. struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
  2593. {
  2594. struct mnt_namespace *ns;
  2595. struct super_block *s;
  2596. struct path path;
  2597. int err;
  2598. ns = create_mnt_ns(mnt);
  2599. if (IS_ERR(ns))
  2600. return ERR_CAST(ns);
  2601. err = vfs_path_lookup(mnt->mnt_root, mnt,
  2602. name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
  2603. put_mnt_ns(ns);
  2604. if (err)
  2605. return ERR_PTR(err);
  2606. /* trade a vfsmount reference for active sb one */
  2607. s = path.mnt->mnt_sb;
  2608. atomic_inc(&s->s_active);
  2609. mntput(path.mnt);
  2610. /* lock the sucker */
  2611. down_write(&s->s_umount);
  2612. /* ... and return the root of (sub)tree on it */
  2613. return path.dentry;
  2614. }
  2615. EXPORT_SYMBOL(mount_subtree);
  2616. int ksys_mount(char __user *dev_name, char __user *dir_name, char __user *type,
  2617. unsigned long flags, void __user *data)
  2618. {
  2619. int ret;
  2620. char *kernel_type;
  2621. char *kernel_dev;
  2622. void *options;
  2623. kernel_type = copy_mount_string(type);
  2624. ret = PTR_ERR(kernel_type);
  2625. if (IS_ERR(kernel_type))
  2626. goto out_type;
  2627. kernel_dev = copy_mount_string(dev_name);
  2628. ret = PTR_ERR(kernel_dev);
  2629. if (IS_ERR(kernel_dev))
  2630. goto out_dev;
  2631. options = copy_mount_options(data);
  2632. ret = PTR_ERR(options);
  2633. if (IS_ERR(options))
  2634. goto out_data;
  2635. ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options);
  2636. kfree(options);
  2637. out_data:
  2638. kfree(kernel_dev);
  2639. out_dev:
  2640. kfree(kernel_type);
  2641. out_type:
  2642. return ret;
  2643. }
  2644. SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
  2645. char __user *, type, unsigned long, flags, void __user *, data)
  2646. {
  2647. return ksys_mount(dev_name, dir_name, type, flags, data);
  2648. }
  2649. /*
  2650. * Return true if path is reachable from root
  2651. *
  2652. * namespace_sem or mount_lock is held
  2653. */
  2654. bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
  2655. const struct path *root)
  2656. {
  2657. while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
  2658. dentry = mnt->mnt_mountpoint;
  2659. mnt = mnt->mnt_parent;
  2660. }
  2661. return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
  2662. }
  2663. bool path_is_under(const struct path *path1, const struct path *path2)
  2664. {
  2665. bool res;
  2666. read_seqlock_excl(&mount_lock);
  2667. res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
  2668. read_sequnlock_excl(&mount_lock);
  2669. return res;
  2670. }
  2671. EXPORT_SYMBOL(path_is_under);
  2672. /*
  2673. * pivot_root Semantics:
  2674. * Moves the root file system of the current process to the directory put_old,
  2675. * makes new_root as the new root file system of the current process, and sets
  2676. * root/cwd of all processes which had them on the current root to new_root.
  2677. *
  2678. * Restrictions:
  2679. * The new_root and put_old must be directories, and must not be on the
  2680. * same file system as the current process root. The put_old must be
  2681. * underneath new_root, i.e. adding a non-zero number of /.. to the string
  2682. * pointed to by put_old must yield the same directory as new_root. No other
  2683. * file system may be mounted on put_old. After all, new_root is a mountpoint.
  2684. *
  2685. * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
  2686. * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
  2687. * in this situation.
  2688. *
  2689. * Notes:
  2690. * - we don't move root/cwd if they are not at the root (reason: if something
  2691. * cared enough to change them, it's probably wrong to force them elsewhere)
  2692. * - it's okay to pick a root that isn't the root of a file system, e.g.
  2693. * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
  2694. * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
  2695. * first.
  2696. */
  2697. SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
  2698. const char __user *, put_old)
  2699. {
  2700. struct path new, old, parent_path, root_parent, root;
  2701. struct mount *new_mnt, *root_mnt, *old_mnt;
  2702. struct mountpoint *old_mp, *root_mp;
  2703. int error;
  2704. if (!may_mount())
  2705. return -EPERM;
  2706. error = user_path_dir(new_root, &new);
  2707. if (error)
  2708. goto out0;
  2709. error = user_path_dir(put_old, &old);
  2710. if (error)
  2711. goto out1;
  2712. error = security_sb_pivotroot(&old, &new);
  2713. if (error)
  2714. goto out2;
  2715. get_fs_root(current->fs, &root);
  2716. old_mp = lock_mount(&old);
  2717. error = PTR_ERR(old_mp);
  2718. if (IS_ERR(old_mp))
  2719. goto out3;
  2720. error = -EINVAL;
  2721. new_mnt = real_mount(new.mnt);
  2722. root_mnt = real_mount(root.mnt);
  2723. old_mnt = real_mount(old.mnt);
  2724. if (IS_MNT_SHARED(old_mnt) ||
  2725. IS_MNT_SHARED(new_mnt->mnt_parent) ||
  2726. IS_MNT_SHARED(root_mnt->mnt_parent))
  2727. goto out4;
  2728. if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
  2729. goto out4;
  2730. if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
  2731. goto out4;
  2732. error = -ENOENT;
  2733. if (d_unlinked(new.dentry))
  2734. goto out4;
  2735. error = -EBUSY;
  2736. if (new_mnt == root_mnt || old_mnt == root_mnt)
  2737. goto out4; /* loop, on the same file system */
  2738. error = -EINVAL;
  2739. if (root.mnt->mnt_root != root.dentry)
  2740. goto out4; /* not a mountpoint */
  2741. if (!mnt_has_parent(root_mnt))
  2742. goto out4; /* not attached */
  2743. root_mp = root_mnt->mnt_mp;
  2744. if (new.mnt->mnt_root != new.dentry)
  2745. goto out4; /* not a mountpoint */
  2746. if (!mnt_has_parent(new_mnt))
  2747. goto out4; /* not attached */
  2748. /* make sure we can reach put_old from new_root */
  2749. if (!is_path_reachable(old_mnt, old.dentry, &new))
  2750. goto out4;
  2751. /* make certain new is below the root */
  2752. if (!is_path_reachable(new_mnt, new.dentry, &root))
  2753. goto out4;
  2754. lock_mount_hash();
  2755. root_mp->m_count++; /* pin it so it won't go away */
  2756. detach_mnt(new_mnt, &parent_path);
  2757. detach_mnt(root_mnt, &root_parent);
  2758. if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
  2759. new_mnt->mnt.mnt_flags |= MNT_LOCKED;
  2760. root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
  2761. }
  2762. /* mount old root on put_old */
  2763. attach_mnt(root_mnt, old_mnt, old_mp);
  2764. /* mount new_root on / */
  2765. attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
  2766. touch_mnt_namespace(current->nsproxy->mnt_ns);
  2767. /* A moved mount should not expire automatically */
  2768. list_del_init(&new_mnt->mnt_expire);
  2769. put_mountpoint(root_mp);
  2770. unlock_mount_hash();
  2771. chroot_fs_refs(&root, &new);
  2772. error = 0;
  2773. out4:
  2774. unlock_mount(old_mp);
  2775. if (!error) {
  2776. path_put(&root_parent);
  2777. path_put(&parent_path);
  2778. }
  2779. out3:
  2780. path_put(&root);
  2781. out2:
  2782. path_put(&old);
  2783. out1:
  2784. path_put(&new);
  2785. out0:
  2786. return error;
  2787. }
  2788. static void __init init_mount_tree(void)
  2789. {
  2790. struct vfsmount *mnt;
  2791. struct mnt_namespace *ns;
  2792. struct path root;
  2793. struct file_system_type *type;
  2794. type = get_fs_type("rootfs");
  2795. if (!type)
  2796. panic("Can't find rootfs type");
  2797. mnt = vfs_kern_mount(type, 0, "rootfs", NULL);
  2798. put_filesystem(type);
  2799. if (IS_ERR(mnt))
  2800. panic("Can't create rootfs");
  2801. ns = create_mnt_ns(mnt);
  2802. if (IS_ERR(ns))
  2803. panic("Can't allocate initial namespace");
  2804. init_task.nsproxy->mnt_ns = ns;
  2805. get_mnt_ns(ns);
  2806. root.mnt = mnt;
  2807. root.dentry = mnt->mnt_root;
  2808. mnt->mnt_flags |= MNT_LOCKED;
  2809. set_fs_pwd(current->fs, &root);
  2810. set_fs_root(current->fs, &root);
  2811. }
  2812. void __init mnt_init(void)
  2813. {
  2814. int err;
  2815. mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
  2816. 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
  2817. mount_hashtable = alloc_large_system_hash("Mount-cache",
  2818. sizeof(struct hlist_head),
  2819. mhash_entries, 19,
  2820. HASH_ZERO,
  2821. &m_hash_shift, &m_hash_mask, 0, 0);
  2822. mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
  2823. sizeof(struct hlist_head),
  2824. mphash_entries, 19,
  2825. HASH_ZERO,
  2826. &mp_hash_shift, &mp_hash_mask, 0, 0);
  2827. if (!mount_hashtable || !mountpoint_hashtable)
  2828. panic("Failed to allocate mount hash table\n");
  2829. kernfs_init();
  2830. err = sysfs_init();
  2831. if (err)
  2832. printk(KERN_WARNING "%s: sysfs_init error: %d\n",
  2833. __func__, err);
  2834. fs_kobj = kobject_create_and_add("fs", NULL);
  2835. if (!fs_kobj)
  2836. printk(KERN_WARNING "%s: kobj create error\n", __func__);
  2837. init_rootfs();
  2838. init_mount_tree();
  2839. }
  2840. void put_mnt_ns(struct mnt_namespace *ns)
  2841. {
  2842. if (!atomic_dec_and_test(&ns->count))
  2843. return;
  2844. drop_collected_mounts(&ns->root->mnt);
  2845. free_mnt_ns(ns);
  2846. }
  2847. struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
  2848. {
  2849. struct vfsmount *mnt;
  2850. mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, data);
  2851. if (!IS_ERR(mnt)) {
  2852. /*
  2853. * it is a longterm mount, don't release mnt until
  2854. * we unmount before file sys is unregistered
  2855. */
  2856. real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
  2857. }
  2858. return mnt;
  2859. }
  2860. EXPORT_SYMBOL_GPL(kern_mount_data);
  2861. void kern_unmount(struct vfsmount *mnt)
  2862. {
  2863. /* release long term mount so mount point can be released */
  2864. if (!IS_ERR_OR_NULL(mnt)) {
  2865. real_mount(mnt)->mnt_ns = NULL;
  2866. synchronize_rcu(); /* yecchhh... */
  2867. mntput(mnt);
  2868. }
  2869. }
  2870. EXPORT_SYMBOL(kern_unmount);
  2871. bool our_mnt(struct vfsmount *mnt)
  2872. {
  2873. return check_mnt(real_mount(mnt));
  2874. }
  2875. bool current_chrooted(void)
  2876. {
  2877. /* Does the current process have a non-standard root */
  2878. struct path ns_root;
  2879. struct path fs_root;
  2880. bool chrooted;
  2881. /* Find the namespace root */
  2882. ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
  2883. ns_root.dentry = ns_root.mnt->mnt_root;
  2884. path_get(&ns_root);
  2885. while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
  2886. ;
  2887. get_fs_root(current->fs, &fs_root);
  2888. chrooted = !path_equal(&fs_root, &ns_root);
  2889. path_put(&fs_root);
  2890. path_put(&ns_root);
  2891. return chrooted;
  2892. }
  2893. static bool mnt_already_visible(struct mnt_namespace *ns, struct vfsmount *new,
  2894. int *new_mnt_flags)
  2895. {
  2896. int new_flags = *new_mnt_flags;
  2897. struct mount *mnt;
  2898. bool visible = false;
  2899. down_read(&namespace_sem);
  2900. list_for_each_entry(mnt, &ns->list, mnt_list) {
  2901. struct mount *child;
  2902. int mnt_flags;
  2903. if (mnt->mnt.mnt_sb->s_type != new->mnt_sb->s_type)
  2904. continue;
  2905. /* This mount is not fully visible if it's root directory
  2906. * is not the root directory of the filesystem.
  2907. */
  2908. if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
  2909. continue;
  2910. /* A local view of the mount flags */
  2911. mnt_flags = mnt->mnt.mnt_flags;
  2912. /* Don't miss readonly hidden in the superblock flags */
  2913. if (sb_rdonly(mnt->mnt.mnt_sb))
  2914. mnt_flags |= MNT_LOCK_READONLY;
  2915. /* Verify the mount flags are equal to or more permissive
  2916. * than the proposed new mount.
  2917. */
  2918. if ((mnt_flags & MNT_LOCK_READONLY) &&
  2919. !(new_flags & MNT_READONLY))
  2920. continue;
  2921. if ((mnt_flags & MNT_LOCK_ATIME) &&
  2922. ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
  2923. continue;
  2924. /* This mount is not fully visible if there are any
  2925. * locked child mounts that cover anything except for
  2926. * empty directories.
  2927. */
  2928. list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
  2929. struct inode *inode = child->mnt_mountpoint->d_inode;
  2930. /* Only worry about locked mounts */
  2931. if (!(child->mnt.mnt_flags & MNT_LOCKED))
  2932. continue;
  2933. /* Is the directory permanetly empty? */
  2934. if (!is_empty_dir_inode(inode))
  2935. goto next;
  2936. }
  2937. /* Preserve the locked attributes */
  2938. *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
  2939. MNT_LOCK_ATIME);
  2940. visible = true;
  2941. goto found;
  2942. next: ;
  2943. }
  2944. found:
  2945. up_read(&namespace_sem);
  2946. return visible;
  2947. }
  2948. static bool mount_too_revealing(struct vfsmount *mnt, int *new_mnt_flags)
  2949. {
  2950. const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV;
  2951. struct mnt_namespace *ns = current->nsproxy->mnt_ns;
  2952. unsigned long s_iflags;
  2953. if (ns->user_ns == &init_user_ns)
  2954. return false;
  2955. /* Can this filesystem be too revealing? */
  2956. s_iflags = mnt->mnt_sb->s_iflags;
  2957. if (!(s_iflags & SB_I_USERNS_VISIBLE))
  2958. return false;
  2959. if ((s_iflags & required_iflags) != required_iflags) {
  2960. WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
  2961. required_iflags);
  2962. return true;
  2963. }
  2964. return !mnt_already_visible(ns, mnt, new_mnt_flags);
  2965. }
  2966. bool mnt_may_suid(struct vfsmount *mnt)
  2967. {
  2968. /*
  2969. * Foreign mounts (accessed via fchdir or through /proc
  2970. * symlinks) are always treated as if they are nosuid. This
  2971. * prevents namespaces from trusting potentially unsafe
  2972. * suid/sgid bits, file caps, or security labels that originate
  2973. * in other namespaces.
  2974. */
  2975. return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
  2976. current_in_userns(mnt->mnt_sb->s_user_ns);
  2977. }
  2978. static struct ns_common *mntns_get(struct task_struct *task)
  2979. {
  2980. struct ns_common *ns = NULL;
  2981. struct nsproxy *nsproxy;
  2982. task_lock(task);
  2983. nsproxy = task->nsproxy;
  2984. if (nsproxy) {
  2985. ns = &nsproxy->mnt_ns->ns;
  2986. get_mnt_ns(to_mnt_ns(ns));
  2987. }
  2988. task_unlock(task);
  2989. return ns;
  2990. }
  2991. static void mntns_put(struct ns_common *ns)
  2992. {
  2993. put_mnt_ns(to_mnt_ns(ns));
  2994. }
  2995. static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
  2996. {
  2997. struct fs_struct *fs = current->fs;
  2998. struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns;
  2999. struct path root;
  3000. int err;
  3001. if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
  3002. !ns_capable(current_user_ns(), CAP_SYS_CHROOT) ||
  3003. !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
  3004. return -EPERM;
  3005. if (fs->users != 1)
  3006. return -EINVAL;
  3007. get_mnt_ns(mnt_ns);
  3008. old_mnt_ns = nsproxy->mnt_ns;
  3009. nsproxy->mnt_ns = mnt_ns;
  3010. /* Find the root */
  3011. err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt,
  3012. "/", LOOKUP_DOWN, &root);
  3013. if (err) {
  3014. /* revert to old namespace */
  3015. nsproxy->mnt_ns = old_mnt_ns;
  3016. put_mnt_ns(mnt_ns);
  3017. return err;
  3018. }
  3019. put_mnt_ns(old_mnt_ns);
  3020. /* Update the pwd and root */
  3021. set_fs_pwd(fs, &root);
  3022. set_fs_root(fs, &root);
  3023. path_put(&root);
  3024. return 0;
  3025. }
  3026. static struct user_namespace *mntns_owner(struct ns_common *ns)
  3027. {
  3028. return to_mnt_ns(ns)->user_ns;
  3029. }
  3030. const struct proc_ns_operations mntns_operations = {
  3031. .name = "mnt",
  3032. .type = CLONE_NEWNS,
  3033. .get = mntns_get,
  3034. .put = mntns_put,
  3035. .install = mntns_install,
  3036. .owner = mntns_owner,
  3037. };