fs.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Landlock LSM - Filesystem management and hooks
  4. *
  5. * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
  6. * Copyright © 2018-2020 ANSSI
  7. * Copyright © 2021-2022 Microsoft Corporation
  8. * Copyright © 2022 Günther Noack <gnoack3000@gmail.com>
  9. * Copyright © 2023-2024 Google LLC
  10. */
  11. #include <asm/ioctls.h>
  12. #include <kunit/test.h>
  13. #include <linux/atomic.h>
  14. #include <linux/bitops.h>
  15. #include <linux/bits.h>
  16. #include <linux/compiler_types.h>
  17. #include <linux/dcache.h>
  18. #include <linux/err.h>
  19. #include <linux/falloc.h>
  20. #include <linux/fs.h>
  21. #include <linux/init.h>
  22. #include <linux/kernel.h>
  23. #include <linux/limits.h>
  24. #include <linux/list.h>
  25. #include <linux/lsm_hooks.h>
  26. #include <linux/mount.h>
  27. #include <linux/namei.h>
  28. #include <linux/path.h>
  29. #include <linux/pid.h>
  30. #include <linux/rcupdate.h>
  31. #include <linux/sched/signal.h>
  32. #include <linux/spinlock.h>
  33. #include <linux/stat.h>
  34. #include <linux/types.h>
  35. #include <linux/wait_bit.h>
  36. #include <linux/workqueue.h>
  37. #include <uapi/linux/fiemap.h>
  38. #include <uapi/linux/landlock.h>
  39. #include "common.h"
  40. #include "cred.h"
  41. #include "fs.h"
  42. #include "limits.h"
  43. #include "object.h"
  44. #include "ruleset.h"
  45. #include "setup.h"
  46. /* Underlying object management */
  47. static void release_inode(struct landlock_object *const object)
  48. __releases(object->lock)
  49. {
  50. struct inode *const inode = object->underobj;
  51. struct super_block *sb;
  52. if (!inode) {
  53. spin_unlock(&object->lock);
  54. return;
  55. }
  56. /*
  57. * Protects against concurrent use by hook_sb_delete() of the reference
  58. * to the underlying inode.
  59. */
  60. object->underobj = NULL;
  61. /*
  62. * Makes sure that if the filesystem is concurrently unmounted,
  63. * hook_sb_delete() will wait for us to finish iput().
  64. */
  65. sb = inode->i_sb;
  66. atomic_long_inc(&landlock_superblock(sb)->inode_refs);
  67. spin_unlock(&object->lock);
  68. /*
  69. * Because object->underobj was not NULL, hook_sb_delete() and
  70. * get_inode_object() guarantee that it is safe to reset
  71. * landlock_inode(inode)->object while it is not NULL. It is therefore
  72. * not necessary to lock inode->i_lock.
  73. */
  74. rcu_assign_pointer(landlock_inode(inode)->object, NULL);
  75. /*
  76. * Now, new rules can safely be tied to @inode with get_inode_object().
  77. */
  78. iput(inode);
  79. if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs))
  80. wake_up_var(&landlock_superblock(sb)->inode_refs);
  81. }
  82. static const struct landlock_object_underops landlock_fs_underops = {
  83. .release = release_inode
  84. };
  85. /* IOCTL helpers */
  86. /**
  87. * is_masked_device_ioctl - Determine whether an IOCTL command is always
  88. * permitted with Landlock for device files. These commands can not be
  89. * restricted on device files by enforcing a Landlock policy.
  90. *
  91. * @cmd: The IOCTL command that is supposed to be run.
  92. *
  93. * By default, any IOCTL on a device file requires the
  94. * LANDLOCK_ACCESS_FS_IOCTL_DEV right. However, we blanket-permit some
  95. * commands, if:
  96. *
  97. * 1. The command is implemented in fs/ioctl.c's do_vfs_ioctl(),
  98. * not in f_ops->unlocked_ioctl() or f_ops->compat_ioctl().
  99. *
  100. * 2. The command is harmless when invoked on devices.
  101. *
  102. * We also permit commands that do not make sense for devices, but where the
  103. * do_vfs_ioctl() implementation returns a more conventional error code.
  104. *
  105. * Any new IOCTL commands that are implemented in fs/ioctl.c's do_vfs_ioctl()
  106. * should be considered for inclusion here.
  107. *
  108. * Returns: true if the IOCTL @cmd can not be restricted with Landlock for
  109. * device files.
  110. */
  111. static __attribute_const__ bool is_masked_device_ioctl(const unsigned int cmd)
  112. {
  113. switch (cmd) {
  114. /*
  115. * FIOCLEX, FIONCLEX, FIONBIO and FIOASYNC manipulate the FD's
  116. * close-on-exec and the file's buffered-IO and async flags. These
  117. * operations are also available through fcntl(2), and are
  118. * unconditionally permitted in Landlock.
  119. */
  120. case FIOCLEX:
  121. case FIONCLEX:
  122. case FIONBIO:
  123. case FIOASYNC:
  124. /*
  125. * FIOQSIZE queries the size of a regular file, directory, or link.
  126. *
  127. * We still permit it, because it always returns -ENOTTY for
  128. * other file types.
  129. */
  130. case FIOQSIZE:
  131. /*
  132. * FIFREEZE and FITHAW freeze and thaw the file system which the
  133. * given file belongs to. Requires CAP_SYS_ADMIN.
  134. *
  135. * These commands operate on the file system's superblock rather
  136. * than on the file itself. The same operations can also be
  137. * done through any other file or directory on the same file
  138. * system, so it is safe to permit these.
  139. */
  140. case FIFREEZE:
  141. case FITHAW:
  142. /*
  143. * FS_IOC_FIEMAP queries information about the allocation of
  144. * blocks within a file.
  145. *
  146. * This IOCTL command only makes sense for regular files and is
  147. * not implemented by devices. It is harmless to permit.
  148. */
  149. case FS_IOC_FIEMAP:
  150. /*
  151. * FIGETBSZ queries the file system's block size for a file or
  152. * directory.
  153. *
  154. * This command operates on the file system's superblock rather
  155. * than on the file itself. The same operation can also be done
  156. * through any other file or directory on the same file system,
  157. * so it is safe to permit it.
  158. */
  159. case FIGETBSZ:
  160. /*
  161. * FICLONE, FICLONERANGE and FIDEDUPERANGE make files share
  162. * their underlying storage ("reflink") between source and
  163. * destination FDs, on file systems which support that.
  164. *
  165. * These IOCTL commands only apply to regular files
  166. * and are harmless to permit for device files.
  167. */
  168. case FICLONE:
  169. case FICLONERANGE:
  170. case FIDEDUPERANGE:
  171. /*
  172. * FS_IOC_GETFSUUID and FS_IOC_GETFSSYSFSPATH both operate on
  173. * the file system superblock, not on the specific file, so
  174. * these operations are available through any other file on the
  175. * same file system as well.
  176. */
  177. case FS_IOC_GETFSUUID:
  178. case FS_IOC_GETFSSYSFSPATH:
  179. return true;
  180. /*
  181. * FIONREAD, FS_IOC_GETFLAGS, FS_IOC_SETFLAGS, FS_IOC_FSGETXATTR and
  182. * FS_IOC_FSSETXATTR are forwarded to device implementations.
  183. */
  184. /*
  185. * file_ioctl() commands (FIBMAP, FS_IOC_RESVSP, FS_IOC_RESVSP64,
  186. * FS_IOC_UNRESVSP, FS_IOC_UNRESVSP64 and FS_IOC_ZERO_RANGE) are
  187. * forwarded to device implementations, so not permitted.
  188. */
  189. /* Other commands are guarded by the access right. */
  190. default:
  191. return false;
  192. }
  193. }
  194. /*
  195. * is_masked_device_ioctl_compat - same as the helper above, but checking the
  196. * "compat" IOCTL commands.
  197. *
  198. * The IOCTL commands with special handling in compat-mode should behave the
  199. * same as their non-compat counterparts.
  200. */
  201. static __attribute_const__ bool
  202. is_masked_device_ioctl_compat(const unsigned int cmd)
  203. {
  204. switch (cmd) {
  205. /* FICLONE is permitted, same as in the non-compat variant. */
  206. case FICLONE:
  207. return true;
  208. #if defined(CONFIG_X86_64)
  209. /*
  210. * FS_IOC_RESVSP_32, FS_IOC_RESVSP64_32, FS_IOC_UNRESVSP_32,
  211. * FS_IOC_UNRESVSP64_32, FS_IOC_ZERO_RANGE_32: not blanket-permitted,
  212. * for consistency with their non-compat variants.
  213. */
  214. case FS_IOC_RESVSP_32:
  215. case FS_IOC_RESVSP64_32:
  216. case FS_IOC_UNRESVSP_32:
  217. case FS_IOC_UNRESVSP64_32:
  218. case FS_IOC_ZERO_RANGE_32:
  219. #endif
  220. /*
  221. * FS_IOC32_GETFLAGS, FS_IOC32_SETFLAGS are forwarded to their device
  222. * implementations.
  223. */
  224. case FS_IOC32_GETFLAGS:
  225. case FS_IOC32_SETFLAGS:
  226. return false;
  227. default:
  228. return is_masked_device_ioctl(cmd);
  229. }
  230. }
  231. /* Ruleset management */
  232. static struct landlock_object *get_inode_object(struct inode *const inode)
  233. {
  234. struct landlock_object *object, *new_object;
  235. struct landlock_inode_security *inode_sec = landlock_inode(inode);
  236. rcu_read_lock();
  237. retry:
  238. object = rcu_dereference(inode_sec->object);
  239. if (object) {
  240. if (likely(refcount_inc_not_zero(&object->usage))) {
  241. rcu_read_unlock();
  242. return object;
  243. }
  244. /*
  245. * We are racing with release_inode(), the object is going
  246. * away. Wait for release_inode(), then retry.
  247. */
  248. spin_lock(&object->lock);
  249. spin_unlock(&object->lock);
  250. goto retry;
  251. }
  252. rcu_read_unlock();
  253. /*
  254. * If there is no object tied to @inode, then create a new one (without
  255. * holding any locks).
  256. */
  257. new_object = landlock_create_object(&landlock_fs_underops, inode);
  258. if (IS_ERR(new_object))
  259. return new_object;
  260. /*
  261. * Protects against concurrent calls to get_inode_object() or
  262. * hook_sb_delete().
  263. */
  264. spin_lock(&inode->i_lock);
  265. if (unlikely(rcu_access_pointer(inode_sec->object))) {
  266. /* Someone else just created the object, bail out and retry. */
  267. spin_unlock(&inode->i_lock);
  268. kfree(new_object);
  269. rcu_read_lock();
  270. goto retry;
  271. }
  272. /*
  273. * @inode will be released by hook_sb_delete() on its superblock
  274. * shutdown, or by release_inode() when no more ruleset references the
  275. * related object.
  276. */
  277. ihold(inode);
  278. rcu_assign_pointer(inode_sec->object, new_object);
  279. spin_unlock(&inode->i_lock);
  280. return new_object;
  281. }
  282. /* All access rights that can be tied to files. */
  283. /* clang-format off */
  284. #define ACCESS_FILE ( \
  285. LANDLOCK_ACCESS_FS_EXECUTE | \
  286. LANDLOCK_ACCESS_FS_WRITE_FILE | \
  287. LANDLOCK_ACCESS_FS_READ_FILE | \
  288. LANDLOCK_ACCESS_FS_TRUNCATE | \
  289. LANDLOCK_ACCESS_FS_IOCTL_DEV)
  290. /* clang-format on */
  291. /*
  292. * @path: Should have been checked by get_path_from_fd().
  293. */
  294. int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
  295. const struct path *const path,
  296. access_mask_t access_rights)
  297. {
  298. int err;
  299. struct landlock_id id = {
  300. .type = LANDLOCK_KEY_INODE,
  301. };
  302. /* Files only get access rights that make sense. */
  303. if (!d_is_dir(path->dentry) &&
  304. (access_rights | ACCESS_FILE) != ACCESS_FILE)
  305. return -EINVAL;
  306. if (WARN_ON_ONCE(ruleset->num_layers != 1))
  307. return -EINVAL;
  308. /* Transforms relative access rights to absolute ones. */
  309. access_rights |= LANDLOCK_MASK_ACCESS_FS &
  310. ~landlock_get_fs_access_mask(ruleset, 0);
  311. id.key.object = get_inode_object(d_backing_inode(path->dentry));
  312. if (IS_ERR(id.key.object))
  313. return PTR_ERR(id.key.object);
  314. mutex_lock(&ruleset->lock);
  315. err = landlock_insert_rule(ruleset, id, access_rights);
  316. mutex_unlock(&ruleset->lock);
  317. /*
  318. * No need to check for an error because landlock_insert_rule()
  319. * increments the refcount for the new object if needed.
  320. */
  321. landlock_put_object(id.key.object);
  322. return err;
  323. }
  324. /* Access-control management */
  325. /*
  326. * The lifetime of the returned rule is tied to @domain.
  327. *
  328. * Returns NULL if no rule is found or if @dentry is negative.
  329. */
  330. static const struct landlock_rule *
  331. find_rule(const struct landlock_ruleset *const domain,
  332. const struct dentry *const dentry)
  333. {
  334. const struct landlock_rule *rule;
  335. const struct inode *inode;
  336. struct landlock_id id = {
  337. .type = LANDLOCK_KEY_INODE,
  338. };
  339. /* Ignores nonexistent leafs. */
  340. if (d_is_negative(dentry))
  341. return NULL;
  342. inode = d_backing_inode(dentry);
  343. rcu_read_lock();
  344. id.key.object = rcu_dereference(landlock_inode(inode)->object);
  345. rule = landlock_find_rule(domain, id);
  346. rcu_read_unlock();
  347. return rule;
  348. }
  349. /*
  350. * Allows access to pseudo filesystems that will never be mountable (e.g.
  351. * sockfs, pipefs), but can still be reachable through
  352. * /proc/<pid>/fd/<file-descriptor>
  353. */
  354. static bool is_nouser_or_private(const struct dentry *dentry)
  355. {
  356. return (dentry->d_sb->s_flags & SB_NOUSER) ||
  357. (d_is_positive(dentry) &&
  358. unlikely(IS_PRIVATE(d_backing_inode(dentry))));
  359. }
  360. static access_mask_t
  361. get_handled_fs_accesses(const struct landlock_ruleset *const domain)
  362. {
  363. /* Handles all initially denied by default access rights. */
  364. return landlock_union_access_masks(domain).fs |
  365. LANDLOCK_ACCESS_FS_INITIALLY_DENIED;
  366. }
  367. static const struct access_masks any_fs = {
  368. .fs = ~0,
  369. };
  370. static const struct landlock_ruleset *get_current_fs_domain(void)
  371. {
  372. return landlock_get_applicable_domain(landlock_get_current_domain(),
  373. any_fs);
  374. }
  375. /*
  376. * Check that a destination file hierarchy has more restrictions than a source
  377. * file hierarchy. This is only used for link and rename actions.
  378. *
  379. * @layer_masks_child2: Optional child masks.
  380. */
  381. static bool no_more_access(
  382. const layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
  383. const layer_mask_t (*const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS],
  384. const bool child1_is_directory,
  385. const layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
  386. const layer_mask_t (*const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS],
  387. const bool child2_is_directory)
  388. {
  389. unsigned long access_bit;
  390. for (access_bit = 0; access_bit < ARRAY_SIZE(*layer_masks_parent2);
  391. access_bit++) {
  392. /* Ignores accesses that only make sense for directories. */
  393. const bool is_file_access =
  394. !!(BIT_ULL(access_bit) & ACCESS_FILE);
  395. if (child1_is_directory || is_file_access) {
  396. /*
  397. * Checks if the destination restrictions are a
  398. * superset of the source ones (i.e. inherited access
  399. * rights without child exceptions):
  400. * restrictions(parent2) >= restrictions(child1)
  401. */
  402. if ((((*layer_masks_parent1)[access_bit] &
  403. (*layer_masks_child1)[access_bit]) |
  404. (*layer_masks_parent2)[access_bit]) !=
  405. (*layer_masks_parent2)[access_bit])
  406. return false;
  407. }
  408. if (!layer_masks_child2)
  409. continue;
  410. if (child2_is_directory || is_file_access) {
  411. /*
  412. * Checks inverted restrictions for RENAME_EXCHANGE:
  413. * restrictions(parent1) >= restrictions(child2)
  414. */
  415. if ((((*layer_masks_parent2)[access_bit] &
  416. (*layer_masks_child2)[access_bit]) |
  417. (*layer_masks_parent1)[access_bit]) !=
  418. (*layer_masks_parent1)[access_bit])
  419. return false;
  420. }
  421. }
  422. return true;
  423. }
  424. #define NMA_TRUE(...) KUNIT_EXPECT_TRUE(test, no_more_access(__VA_ARGS__))
  425. #define NMA_FALSE(...) KUNIT_EXPECT_FALSE(test, no_more_access(__VA_ARGS__))
  426. #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
  427. static void test_no_more_access(struct kunit *const test)
  428. {
  429. const layer_mask_t rx0[LANDLOCK_NUM_ACCESS_FS] = {
  430. [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
  431. [BIT_INDEX(LANDLOCK_ACCESS_FS_READ_FILE)] = BIT_ULL(0),
  432. };
  433. const layer_mask_t mx0[LANDLOCK_NUM_ACCESS_FS] = {
  434. [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
  435. [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_REG)] = BIT_ULL(0),
  436. };
  437. const layer_mask_t x0[LANDLOCK_NUM_ACCESS_FS] = {
  438. [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
  439. };
  440. const layer_mask_t x1[LANDLOCK_NUM_ACCESS_FS] = {
  441. [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(1),
  442. };
  443. const layer_mask_t x01[LANDLOCK_NUM_ACCESS_FS] = {
  444. [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0) |
  445. BIT_ULL(1),
  446. };
  447. const layer_mask_t allows_all[LANDLOCK_NUM_ACCESS_FS] = {};
  448. /* Checks without restriction. */
  449. NMA_TRUE(&x0, &allows_all, false, &allows_all, NULL, false);
  450. NMA_TRUE(&allows_all, &x0, false, &allows_all, NULL, false);
  451. NMA_FALSE(&x0, &x0, false, &allows_all, NULL, false);
  452. /*
  453. * Checks that we can only refer a file if no more access could be
  454. * inherited.
  455. */
  456. NMA_TRUE(&x0, &x0, false, &rx0, NULL, false);
  457. NMA_TRUE(&rx0, &rx0, false, &rx0, NULL, false);
  458. NMA_FALSE(&rx0, &rx0, false, &x0, NULL, false);
  459. NMA_FALSE(&rx0, &rx0, false, &x1, NULL, false);
  460. /* Checks allowed referring with different nested domains. */
  461. NMA_TRUE(&x0, &x1, false, &x0, NULL, false);
  462. NMA_TRUE(&x1, &x0, false, &x0, NULL, false);
  463. NMA_TRUE(&x0, &x01, false, &x0, NULL, false);
  464. NMA_TRUE(&x0, &x01, false, &rx0, NULL, false);
  465. NMA_TRUE(&x01, &x0, false, &x0, NULL, false);
  466. NMA_TRUE(&x01, &x0, false, &rx0, NULL, false);
  467. NMA_FALSE(&x01, &x01, false, &x0, NULL, false);
  468. /* Checks that file access rights are also enforced for a directory. */
  469. NMA_FALSE(&rx0, &rx0, true, &x0, NULL, false);
  470. /* Checks that directory access rights don't impact file referring... */
  471. NMA_TRUE(&mx0, &mx0, false, &x0, NULL, false);
  472. /* ...but only directory referring. */
  473. NMA_FALSE(&mx0, &mx0, true, &x0, NULL, false);
  474. /* Checks directory exchange. */
  475. NMA_TRUE(&mx0, &mx0, true, &mx0, &mx0, true);
  476. NMA_TRUE(&mx0, &mx0, true, &mx0, &x0, true);
  477. NMA_FALSE(&mx0, &mx0, true, &x0, &mx0, true);
  478. NMA_FALSE(&mx0, &mx0, true, &x0, &x0, true);
  479. NMA_FALSE(&mx0, &mx0, true, &x1, &x1, true);
  480. /* Checks file exchange with directory access rights... */
  481. NMA_TRUE(&mx0, &mx0, false, &mx0, &mx0, false);
  482. NMA_TRUE(&mx0, &mx0, false, &mx0, &x0, false);
  483. NMA_TRUE(&mx0, &mx0, false, &x0, &mx0, false);
  484. NMA_TRUE(&mx0, &mx0, false, &x0, &x0, false);
  485. /* ...and with file access rights. */
  486. NMA_TRUE(&rx0, &rx0, false, &rx0, &rx0, false);
  487. NMA_TRUE(&rx0, &rx0, false, &rx0, &x0, false);
  488. NMA_FALSE(&rx0, &rx0, false, &x0, &rx0, false);
  489. NMA_FALSE(&rx0, &rx0, false, &x0, &x0, false);
  490. NMA_FALSE(&rx0, &rx0, false, &x1, &x1, false);
  491. /*
  492. * Allowing the following requests should not be a security risk
  493. * because domain 0 denies execute access, and domain 1 is always
  494. * nested with domain 0. However, adding an exception for this case
  495. * would mean to check all nested domains to make sure none can get
  496. * more privileges (e.g. processes only sandboxed by domain 0).
  497. * Moreover, this behavior (i.e. composition of N domains) could then
  498. * be inconsistent compared to domain 1's ruleset alone (e.g. it might
  499. * be denied to link/rename with domain 1's ruleset, whereas it would
  500. * be allowed if nested on top of domain 0). Another drawback would be
  501. * to create a cover channel that could enable sandboxed processes to
  502. * infer most of the filesystem restrictions from their domain. To
  503. * make it simple, efficient, safe, and more consistent, this case is
  504. * always denied.
  505. */
  506. NMA_FALSE(&x1, &x1, false, &x0, NULL, false);
  507. NMA_FALSE(&x1, &x1, false, &rx0, NULL, false);
  508. NMA_FALSE(&x1, &x1, true, &x0, NULL, false);
  509. NMA_FALSE(&x1, &x1, true, &rx0, NULL, false);
  510. /* Checks the same case of exclusive domains with a file... */
  511. NMA_TRUE(&x1, &x1, false, &x01, NULL, false);
  512. NMA_FALSE(&x1, &x1, false, &x01, &x0, false);
  513. NMA_FALSE(&x1, &x1, false, &x01, &x01, false);
  514. NMA_FALSE(&x1, &x1, false, &x0, &x0, false);
  515. /* ...and with a directory. */
  516. NMA_FALSE(&x1, &x1, false, &x0, &x0, true);
  517. NMA_FALSE(&x1, &x1, true, &x0, &x0, false);
  518. NMA_FALSE(&x1, &x1, true, &x0, &x0, true);
  519. }
  520. #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
  521. #undef NMA_TRUE
  522. #undef NMA_FALSE
  523. /*
  524. * Removes @layer_masks accesses that are not requested.
  525. *
  526. * Returns true if the request is allowed, false otherwise.
  527. */
  528. static bool
  529. scope_to_request(const access_mask_t access_request,
  530. layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
  531. {
  532. const unsigned long access_req = access_request;
  533. unsigned long access_bit;
  534. if (WARN_ON_ONCE(!layer_masks))
  535. return true;
  536. for_each_clear_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks))
  537. (*layer_masks)[access_bit] = 0;
  538. return !memchr_inv(layer_masks, 0, sizeof(*layer_masks));
  539. }
  540. #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
  541. static void test_scope_to_request_with_exec_none(struct kunit *const test)
  542. {
  543. /* Allows everything. */
  544. layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
  545. /* Checks and scopes with execute. */
  546. KUNIT_EXPECT_TRUE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE,
  547. &layer_masks));
  548. KUNIT_EXPECT_EQ(test, 0,
  549. layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
  550. KUNIT_EXPECT_EQ(test, 0,
  551. layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
  552. }
  553. static void test_scope_to_request_with_exec_some(struct kunit *const test)
  554. {
  555. /* Denies execute and write. */
  556. layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
  557. [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
  558. [BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1),
  559. };
  560. /* Checks and scopes with execute. */
  561. KUNIT_EXPECT_FALSE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE,
  562. &layer_masks));
  563. KUNIT_EXPECT_EQ(test, BIT_ULL(0),
  564. layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
  565. KUNIT_EXPECT_EQ(test, 0,
  566. layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
  567. }
  568. static void test_scope_to_request_without_access(struct kunit *const test)
  569. {
  570. /* Denies execute and write. */
  571. layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
  572. [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
  573. [BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1),
  574. };
  575. /* Checks and scopes without access request. */
  576. KUNIT_EXPECT_TRUE(test, scope_to_request(0, &layer_masks));
  577. KUNIT_EXPECT_EQ(test, 0,
  578. layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
  579. KUNIT_EXPECT_EQ(test, 0,
  580. layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
  581. }
  582. #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
  583. /*
  584. * Returns true if there is at least one access right different than
  585. * LANDLOCK_ACCESS_FS_REFER.
  586. */
  587. static bool
  588. is_eacces(const layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS],
  589. const access_mask_t access_request)
  590. {
  591. unsigned long access_bit;
  592. /* LANDLOCK_ACCESS_FS_REFER alone must return -EXDEV. */
  593. const unsigned long access_check = access_request &
  594. ~LANDLOCK_ACCESS_FS_REFER;
  595. if (!layer_masks)
  596. return false;
  597. for_each_set_bit(access_bit, &access_check, ARRAY_SIZE(*layer_masks)) {
  598. if ((*layer_masks)[access_bit])
  599. return true;
  600. }
  601. return false;
  602. }
  603. #define IE_TRUE(...) KUNIT_EXPECT_TRUE(test, is_eacces(__VA_ARGS__))
  604. #define IE_FALSE(...) KUNIT_EXPECT_FALSE(test, is_eacces(__VA_ARGS__))
  605. #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
  606. static void test_is_eacces_with_none(struct kunit *const test)
  607. {
  608. const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
  609. IE_FALSE(&layer_masks, 0);
  610. IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
  611. IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
  612. IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
  613. }
  614. static void test_is_eacces_with_refer(struct kunit *const test)
  615. {
  616. const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
  617. [BIT_INDEX(LANDLOCK_ACCESS_FS_REFER)] = BIT_ULL(0),
  618. };
  619. IE_FALSE(&layer_masks, 0);
  620. IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
  621. IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
  622. IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
  623. }
  624. static void test_is_eacces_with_write(struct kunit *const test)
  625. {
  626. const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
  627. [BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(0),
  628. };
  629. IE_FALSE(&layer_masks, 0);
  630. IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
  631. IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
  632. IE_TRUE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
  633. }
  634. #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
  635. #undef IE_TRUE
  636. #undef IE_FALSE
  637. /**
  638. * is_access_to_paths_allowed - Check accesses for requests with a common path
  639. *
  640. * @domain: Domain to check against.
  641. * @path: File hierarchy to walk through.
  642. * @access_request_parent1: Accesses to check, once @layer_masks_parent1 is
  643. * equal to @layer_masks_parent2 (if any). This is tied to the unique
  644. * requested path for most actions, or the source in case of a refer action
  645. * (i.e. rename or link), or the source and destination in case of
  646. * RENAME_EXCHANGE.
  647. * @layer_masks_parent1: Pointer to a matrix of layer masks per access
  648. * masks, identifying the layers that forbid a specific access. Bits from
  649. * this matrix can be unset according to the @path walk. An empty matrix
  650. * means that @domain allows all possible Landlock accesses (i.e. not only
  651. * those identified by @access_request_parent1). This matrix can
  652. * initially refer to domain layer masks and, when the accesses for the
  653. * destination and source are the same, to requested layer masks.
  654. * @dentry_child1: Dentry to the initial child of the parent1 path. This
  655. * pointer must be NULL for non-refer actions (i.e. not link nor rename).
  656. * @access_request_parent2: Similar to @access_request_parent1 but for a
  657. * request involving a source and a destination. This refers to the
  658. * destination, except in case of RENAME_EXCHANGE where it also refers to
  659. * the source. Must be set to 0 when using a simple path request.
  660. * @layer_masks_parent2: Similar to @layer_masks_parent1 but for a refer
  661. * action. This must be NULL otherwise.
  662. * @dentry_child2: Dentry to the initial child of the parent2 path. This
  663. * pointer is only set for RENAME_EXCHANGE actions and must be NULL
  664. * otherwise.
  665. *
  666. * This helper first checks that the destination has a superset of restrictions
  667. * compared to the source (if any) for a common path. Because of
  668. * RENAME_EXCHANGE actions, source and destinations may be swapped. It then
  669. * checks that the collected accesses and the remaining ones are enough to
  670. * allow the request.
  671. *
  672. * Returns:
  673. * - true if the access request is granted;
  674. * - false otherwise.
  675. */
  676. static bool is_access_to_paths_allowed(
  677. const struct landlock_ruleset *const domain,
  678. const struct path *const path,
  679. const access_mask_t access_request_parent1,
  680. layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
  681. const struct dentry *const dentry_child1,
  682. const access_mask_t access_request_parent2,
  683. layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
  684. const struct dentry *const dentry_child2)
  685. {
  686. bool allowed_parent1 = false, allowed_parent2 = false, is_dom_check,
  687. child1_is_directory = true, child2_is_directory = true;
  688. struct path walker_path;
  689. access_mask_t access_masked_parent1, access_masked_parent2;
  690. layer_mask_t _layer_masks_child1[LANDLOCK_NUM_ACCESS_FS],
  691. _layer_masks_child2[LANDLOCK_NUM_ACCESS_FS];
  692. layer_mask_t(*layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS] = NULL,
  693. (*layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS] = NULL;
  694. if (!access_request_parent1 && !access_request_parent2)
  695. return true;
  696. if (WARN_ON_ONCE(!domain || !path))
  697. return true;
  698. if (is_nouser_or_private(path->dentry))
  699. return true;
  700. if (WARN_ON_ONCE(domain->num_layers < 1 || !layer_masks_parent1))
  701. return false;
  702. if (unlikely(layer_masks_parent2)) {
  703. if (WARN_ON_ONCE(!dentry_child1))
  704. return false;
  705. /*
  706. * For a double request, first check for potential privilege
  707. * escalation by looking at domain handled accesses (which are
  708. * a superset of the meaningful requested accesses).
  709. */
  710. access_masked_parent1 = access_masked_parent2 =
  711. get_handled_fs_accesses(domain);
  712. is_dom_check = true;
  713. } else {
  714. if (WARN_ON_ONCE(dentry_child1 || dentry_child2))
  715. return false;
  716. /* For a simple request, only check for requested accesses. */
  717. access_masked_parent1 = access_request_parent1;
  718. access_masked_parent2 = access_request_parent2;
  719. is_dom_check = false;
  720. }
  721. if (unlikely(dentry_child1)) {
  722. landlock_unmask_layers(
  723. find_rule(domain, dentry_child1),
  724. landlock_init_layer_masks(
  725. domain, LANDLOCK_MASK_ACCESS_FS,
  726. &_layer_masks_child1, LANDLOCK_KEY_INODE),
  727. &_layer_masks_child1, ARRAY_SIZE(_layer_masks_child1));
  728. layer_masks_child1 = &_layer_masks_child1;
  729. child1_is_directory = d_is_dir(dentry_child1);
  730. }
  731. if (unlikely(dentry_child2)) {
  732. landlock_unmask_layers(
  733. find_rule(domain, dentry_child2),
  734. landlock_init_layer_masks(
  735. domain, LANDLOCK_MASK_ACCESS_FS,
  736. &_layer_masks_child2, LANDLOCK_KEY_INODE),
  737. &_layer_masks_child2, ARRAY_SIZE(_layer_masks_child2));
  738. layer_masks_child2 = &_layer_masks_child2;
  739. child2_is_directory = d_is_dir(dentry_child2);
  740. }
  741. walker_path = *path;
  742. path_get(&walker_path);
  743. /*
  744. * We need to walk through all the hierarchy to not miss any relevant
  745. * restriction.
  746. */
  747. while (true) {
  748. struct dentry *parent_dentry;
  749. const struct landlock_rule *rule;
  750. /*
  751. * If at least all accesses allowed on the destination are
  752. * already allowed on the source, respectively if there is at
  753. * least as much as restrictions on the destination than on the
  754. * source, then we can safely refer files from the source to
  755. * the destination without risking a privilege escalation.
  756. * This also applies in the case of RENAME_EXCHANGE, which
  757. * implies checks on both direction. This is crucial for
  758. * standalone multilayered security policies. Furthermore,
  759. * this helps avoid policy writers to shoot themselves in the
  760. * foot.
  761. */
  762. if (unlikely(is_dom_check &&
  763. no_more_access(
  764. layer_masks_parent1, layer_masks_child1,
  765. child1_is_directory, layer_masks_parent2,
  766. layer_masks_child2,
  767. child2_is_directory))) {
  768. allowed_parent1 = scope_to_request(
  769. access_request_parent1, layer_masks_parent1);
  770. allowed_parent2 = scope_to_request(
  771. access_request_parent2, layer_masks_parent2);
  772. /* Stops when all accesses are granted. */
  773. if (allowed_parent1 && allowed_parent2)
  774. break;
  775. /*
  776. * Now, downgrades the remaining checks from domain
  777. * handled accesses to requested accesses.
  778. */
  779. is_dom_check = false;
  780. access_masked_parent1 = access_request_parent1;
  781. access_masked_parent2 = access_request_parent2;
  782. }
  783. rule = find_rule(domain, walker_path.dentry);
  784. allowed_parent1 = landlock_unmask_layers(
  785. rule, access_masked_parent1, layer_masks_parent1,
  786. ARRAY_SIZE(*layer_masks_parent1));
  787. allowed_parent2 = landlock_unmask_layers(
  788. rule, access_masked_parent2, layer_masks_parent2,
  789. ARRAY_SIZE(*layer_masks_parent2));
  790. /* Stops when a rule from each layer grants access. */
  791. if (allowed_parent1 && allowed_parent2)
  792. break;
  793. jump_up:
  794. if (walker_path.dentry == walker_path.mnt->mnt_root) {
  795. if (follow_up(&walker_path)) {
  796. /* Ignores hidden mount points. */
  797. goto jump_up;
  798. } else {
  799. /*
  800. * Stops at the real root. Denies access
  801. * because not all layers have granted access.
  802. */
  803. break;
  804. }
  805. }
  806. if (unlikely(IS_ROOT(walker_path.dentry))) {
  807. /*
  808. * Stops at disconnected root directories. Only allows
  809. * access to internal filesystems (e.g. nsfs, which is
  810. * reachable through /proc/<pid>/ns/<namespace>).
  811. */
  812. allowed_parent1 = allowed_parent2 =
  813. !!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
  814. break;
  815. }
  816. parent_dentry = dget_parent(walker_path.dentry);
  817. dput(walker_path.dentry);
  818. walker_path.dentry = parent_dentry;
  819. }
  820. path_put(&walker_path);
  821. return allowed_parent1 && allowed_parent2;
  822. }
  823. static int check_access_path(const struct landlock_ruleset *const domain,
  824. const struct path *const path,
  825. access_mask_t access_request)
  826. {
  827. layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
  828. access_request = landlock_init_layer_masks(
  829. domain, access_request, &layer_masks, LANDLOCK_KEY_INODE);
  830. if (is_access_to_paths_allowed(domain, path, access_request,
  831. &layer_masks, NULL, 0, NULL, NULL))
  832. return 0;
  833. return -EACCES;
  834. }
  835. static int current_check_access_path(const struct path *const path,
  836. const access_mask_t access_request)
  837. {
  838. const struct landlock_ruleset *const dom = get_current_fs_domain();
  839. if (!dom)
  840. return 0;
  841. return check_access_path(dom, path, access_request);
  842. }
  843. static access_mask_t get_mode_access(const umode_t mode)
  844. {
  845. switch (mode & S_IFMT) {
  846. case S_IFLNK:
  847. return LANDLOCK_ACCESS_FS_MAKE_SYM;
  848. case S_IFDIR:
  849. return LANDLOCK_ACCESS_FS_MAKE_DIR;
  850. case S_IFCHR:
  851. return LANDLOCK_ACCESS_FS_MAKE_CHAR;
  852. case S_IFBLK:
  853. return LANDLOCK_ACCESS_FS_MAKE_BLOCK;
  854. case S_IFIFO:
  855. return LANDLOCK_ACCESS_FS_MAKE_FIFO;
  856. case S_IFSOCK:
  857. return LANDLOCK_ACCESS_FS_MAKE_SOCK;
  858. case S_IFREG:
  859. case 0:
  860. /* A zero mode translates to S_IFREG. */
  861. default:
  862. /* Treats weird files as regular files. */
  863. return LANDLOCK_ACCESS_FS_MAKE_REG;
  864. }
  865. }
  866. static access_mask_t maybe_remove(const struct dentry *const dentry)
  867. {
  868. if (d_is_negative(dentry))
  869. return 0;
  870. return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR :
  871. LANDLOCK_ACCESS_FS_REMOVE_FILE;
  872. }
  873. /**
  874. * collect_domain_accesses - Walk through a file path and collect accesses
  875. *
  876. * @domain: Domain to check against.
  877. * @mnt_root: Last directory to check.
  878. * @dir: Directory to start the walk from.
  879. * @layer_masks_dom: Where to store the collected accesses.
  880. *
  881. * This helper is useful to begin a path walk from the @dir directory to a
  882. * @mnt_root directory used as a mount point. This mount point is the common
  883. * ancestor between the source and the destination of a renamed and linked
  884. * file. While walking from @dir to @mnt_root, we record all the domain's
  885. * allowed accesses in @layer_masks_dom.
  886. *
  887. * This is similar to is_access_to_paths_allowed() but much simpler because it
  888. * only handles walking on the same mount point and only checks one set of
  889. * accesses.
  890. *
  891. * Returns:
  892. * - true if all the domain access rights are allowed for @dir;
  893. * - false if the walk reached @mnt_root.
  894. */
  895. static bool collect_domain_accesses(
  896. const struct landlock_ruleset *const domain,
  897. const struct dentry *const mnt_root, struct dentry *dir,
  898. layer_mask_t (*const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS])
  899. {
  900. unsigned long access_dom;
  901. bool ret = false;
  902. if (WARN_ON_ONCE(!domain || !mnt_root || !dir || !layer_masks_dom))
  903. return true;
  904. if (is_nouser_or_private(dir))
  905. return true;
  906. access_dom = landlock_init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
  907. layer_masks_dom,
  908. LANDLOCK_KEY_INODE);
  909. dget(dir);
  910. while (true) {
  911. struct dentry *parent_dentry;
  912. /* Gets all layers allowing all domain accesses. */
  913. if (landlock_unmask_layers(find_rule(domain, dir), access_dom,
  914. layer_masks_dom,
  915. ARRAY_SIZE(*layer_masks_dom))) {
  916. /*
  917. * Stops when all handled accesses are allowed by at
  918. * least one rule in each layer.
  919. */
  920. ret = true;
  921. break;
  922. }
  923. /* We should not reach a root other than @mnt_root. */
  924. if (dir == mnt_root || WARN_ON_ONCE(IS_ROOT(dir)))
  925. break;
  926. parent_dentry = dget_parent(dir);
  927. dput(dir);
  928. dir = parent_dentry;
  929. }
  930. dput(dir);
  931. return ret;
  932. }
  933. /**
  934. * current_check_refer_path - Check if a rename or link action is allowed
  935. *
  936. * @old_dentry: File or directory requested to be moved or linked.
  937. * @new_dir: Destination parent directory.
  938. * @new_dentry: Destination file or directory.
  939. * @removable: Sets to true if it is a rename operation.
  940. * @exchange: Sets to true if it is a rename operation with RENAME_EXCHANGE.
  941. *
  942. * Because of its unprivileged constraints, Landlock relies on file hierarchies
  943. * (and not only inodes) to tie access rights to files. Being able to link or
  944. * rename a file hierarchy brings some challenges. Indeed, moving or linking a
  945. * file (i.e. creating a new reference to an inode) can have an impact on the
  946. * actions allowed for a set of files if it would change its parent directory
  947. * (i.e. reparenting).
  948. *
  949. * To avoid trivial access right bypasses, Landlock first checks if the file or
  950. * directory requested to be moved would gain new access rights inherited from
  951. * its new hierarchy. Before returning any error, Landlock then checks that
  952. * the parent source hierarchy and the destination hierarchy would allow the
  953. * link or rename action. If it is not the case, an error with EACCES is
  954. * returned to inform user space that there is no way to remove or create the
  955. * requested source file type. If it should be allowed but the new inherited
  956. * access rights would be greater than the source access rights, then the
  957. * kernel returns an error with EXDEV. Prioritizing EACCES over EXDEV enables
  958. * user space to abort the whole operation if there is no way to do it, or to
  959. * manually copy the source to the destination if this remains allowed, e.g.
  960. * because file creation is allowed on the destination directory but not direct
  961. * linking.
  962. *
  963. * To achieve this goal, the kernel needs to compare two file hierarchies: the
  964. * one identifying the source file or directory (including itself), and the
  965. * destination one. This can be seen as a multilayer partial ordering problem.
  966. * The kernel walks through these paths and collects in a matrix the access
  967. * rights that are denied per layer. These matrices are then compared to see
  968. * if the destination one has more (or the same) restrictions as the source
  969. * one. If this is the case, the requested action will not return EXDEV, which
  970. * doesn't mean the action is allowed. The parent hierarchy of the source
  971. * (i.e. parent directory), and the destination hierarchy must also be checked
  972. * to verify that they explicitly allow such action (i.e. referencing,
  973. * creation and potentially removal rights). The kernel implementation is then
  974. * required to rely on potentially four matrices of access rights: one for the
  975. * source file or directory (i.e. the child), a potentially other one for the
  976. * other source/destination (in case of RENAME_EXCHANGE), one for the source
  977. * parent hierarchy and a last one for the destination hierarchy. These
  978. * ephemeral matrices take some space on the stack, which limits the number of
  979. * layers to a deemed reasonable number: 16.
  980. *
  981. * Returns:
  982. * - 0 if access is allowed;
  983. * - -EXDEV if @old_dentry would inherit new access rights from @new_dir;
  984. * - -EACCES if file removal or creation is denied.
  985. */
  986. static int current_check_refer_path(struct dentry *const old_dentry,
  987. const struct path *const new_dir,
  988. struct dentry *const new_dentry,
  989. const bool removable, const bool exchange)
  990. {
  991. const struct landlock_ruleset *const dom = get_current_fs_domain();
  992. bool allow_parent1, allow_parent2;
  993. access_mask_t access_request_parent1, access_request_parent2;
  994. struct path mnt_dir;
  995. struct dentry *old_parent;
  996. layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
  997. layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
  998. if (!dom)
  999. return 0;
  1000. if (WARN_ON_ONCE(dom->num_layers < 1))
  1001. return -EACCES;
  1002. if (unlikely(d_is_negative(old_dentry)))
  1003. return -ENOENT;
  1004. if (exchange) {
  1005. if (unlikely(d_is_negative(new_dentry)))
  1006. return -ENOENT;
  1007. access_request_parent1 =
  1008. get_mode_access(d_backing_inode(new_dentry)->i_mode);
  1009. } else {
  1010. access_request_parent1 = 0;
  1011. }
  1012. access_request_parent2 =
  1013. get_mode_access(d_backing_inode(old_dentry)->i_mode);
  1014. if (removable) {
  1015. access_request_parent1 |= maybe_remove(old_dentry);
  1016. access_request_parent2 |= maybe_remove(new_dentry);
  1017. }
  1018. /* The mount points are the same for old and new paths, cf. EXDEV. */
  1019. if (old_dentry->d_parent == new_dir->dentry) {
  1020. /*
  1021. * The LANDLOCK_ACCESS_FS_REFER access right is not required
  1022. * for same-directory referer (i.e. no reparenting).
  1023. */
  1024. access_request_parent1 = landlock_init_layer_masks(
  1025. dom, access_request_parent1 | access_request_parent2,
  1026. &layer_masks_parent1, LANDLOCK_KEY_INODE);
  1027. if (is_access_to_paths_allowed(
  1028. dom, new_dir, access_request_parent1,
  1029. &layer_masks_parent1, NULL, 0, NULL, NULL))
  1030. return 0;
  1031. return -EACCES;
  1032. }
  1033. access_request_parent1 |= LANDLOCK_ACCESS_FS_REFER;
  1034. access_request_parent2 |= LANDLOCK_ACCESS_FS_REFER;
  1035. /* Saves the common mount point. */
  1036. mnt_dir.mnt = new_dir->mnt;
  1037. mnt_dir.dentry = new_dir->mnt->mnt_root;
  1038. /*
  1039. * old_dentry may be the root of the common mount point and
  1040. * !IS_ROOT(old_dentry) at the same time (e.g. with open_tree() and
  1041. * OPEN_TREE_CLONE). We do not need to call dget(old_parent) because
  1042. * we keep a reference to old_dentry.
  1043. */
  1044. old_parent = (old_dentry == mnt_dir.dentry) ? old_dentry :
  1045. old_dentry->d_parent;
  1046. /* new_dir->dentry is equal to new_dentry->d_parent */
  1047. allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, old_parent,
  1048. &layer_masks_parent1);
  1049. allow_parent2 = collect_domain_accesses(
  1050. dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
  1051. if (allow_parent1 && allow_parent2)
  1052. return 0;
  1053. /*
  1054. * To be able to compare source and destination domain access rights,
  1055. * take into account the @old_dentry access rights aggregated with its
  1056. * parent access rights. This will be useful to compare with the
  1057. * destination parent access rights.
  1058. */
  1059. if (is_access_to_paths_allowed(
  1060. dom, &mnt_dir, access_request_parent1, &layer_masks_parent1,
  1061. old_dentry, access_request_parent2, &layer_masks_parent2,
  1062. exchange ? new_dentry : NULL))
  1063. return 0;
  1064. /*
  1065. * This prioritizes EACCES over EXDEV for all actions, including
  1066. * renames with RENAME_EXCHANGE.
  1067. */
  1068. if (likely(is_eacces(&layer_masks_parent1, access_request_parent1) ||
  1069. is_eacces(&layer_masks_parent2, access_request_parent2)))
  1070. return -EACCES;
  1071. /*
  1072. * Gracefully forbids reparenting if the destination directory
  1073. * hierarchy is not a superset of restrictions of the source directory
  1074. * hierarchy, or if LANDLOCK_ACCESS_FS_REFER is not allowed by the
  1075. * source or the destination.
  1076. */
  1077. return -EXDEV;
  1078. }
  1079. /* Inode hooks */
  1080. static void hook_inode_free_security_rcu(void *inode_security)
  1081. {
  1082. struct landlock_inode_security *inode_sec;
  1083. /*
  1084. * All inodes must already have been untied from their object by
  1085. * release_inode() or hook_sb_delete().
  1086. */
  1087. inode_sec = inode_security + landlock_blob_sizes.lbs_inode;
  1088. WARN_ON_ONCE(inode_sec->object);
  1089. }
  1090. /* Super-block hooks */
  1091. /*
  1092. * Release the inodes used in a security policy.
  1093. *
  1094. * Cf. fsnotify_unmount_inodes() and invalidate_inodes()
  1095. */
  1096. static void hook_sb_delete(struct super_block *const sb)
  1097. {
  1098. struct inode *inode, *prev_inode = NULL;
  1099. if (!landlock_initialized)
  1100. return;
  1101. spin_lock(&sb->s_inode_list_lock);
  1102. list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
  1103. struct landlock_object *object;
  1104. /* Only handles referenced inodes. */
  1105. if (!atomic_read(&inode->i_count))
  1106. continue;
  1107. /*
  1108. * Protects against concurrent modification of inode (e.g.
  1109. * from get_inode_object()).
  1110. */
  1111. spin_lock(&inode->i_lock);
  1112. /*
  1113. * Checks I_FREEING and I_WILL_FREE to protect against a race
  1114. * condition when release_inode() just called iput(), which
  1115. * could lead to a NULL dereference of inode->security or a
  1116. * second call to iput() for the same Landlock object. Also
  1117. * checks I_NEW because such inode cannot be tied to an object.
  1118. */
  1119. if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
  1120. spin_unlock(&inode->i_lock);
  1121. continue;
  1122. }
  1123. rcu_read_lock();
  1124. object = rcu_dereference(landlock_inode(inode)->object);
  1125. if (!object) {
  1126. rcu_read_unlock();
  1127. spin_unlock(&inode->i_lock);
  1128. continue;
  1129. }
  1130. /* Keeps a reference to this inode until the next loop walk. */
  1131. __iget(inode);
  1132. spin_unlock(&inode->i_lock);
  1133. /*
  1134. * If there is no concurrent release_inode() ongoing, then we
  1135. * are in charge of calling iput() on this inode, otherwise we
  1136. * will just wait for it to finish.
  1137. */
  1138. spin_lock(&object->lock);
  1139. if (object->underobj == inode) {
  1140. object->underobj = NULL;
  1141. spin_unlock(&object->lock);
  1142. rcu_read_unlock();
  1143. /*
  1144. * Because object->underobj was not NULL,
  1145. * release_inode() and get_inode_object() guarantee
  1146. * that it is safe to reset
  1147. * landlock_inode(inode)->object while it is not NULL.
  1148. * It is therefore not necessary to lock inode->i_lock.
  1149. */
  1150. rcu_assign_pointer(landlock_inode(inode)->object, NULL);
  1151. /*
  1152. * At this point, we own the ihold() reference that was
  1153. * originally set up by get_inode_object() and the
  1154. * __iget() reference that we just set in this loop
  1155. * walk. Therefore the following call to iput() will
  1156. * not sleep nor drop the inode because there is now at
  1157. * least two references to it.
  1158. */
  1159. iput(inode);
  1160. } else {
  1161. spin_unlock(&object->lock);
  1162. rcu_read_unlock();
  1163. }
  1164. if (prev_inode) {
  1165. /*
  1166. * At this point, we still own the __iget() reference
  1167. * that we just set in this loop walk. Therefore we
  1168. * can drop the list lock and know that the inode won't
  1169. * disappear from under us until the next loop walk.
  1170. */
  1171. spin_unlock(&sb->s_inode_list_lock);
  1172. /*
  1173. * We can now actually put the inode reference from the
  1174. * previous loop walk, which is not needed anymore.
  1175. */
  1176. iput(prev_inode);
  1177. cond_resched();
  1178. spin_lock(&sb->s_inode_list_lock);
  1179. }
  1180. prev_inode = inode;
  1181. }
  1182. spin_unlock(&sb->s_inode_list_lock);
  1183. /* Puts the inode reference from the last loop walk, if any. */
  1184. if (prev_inode)
  1185. iput(prev_inode);
  1186. /* Waits for pending iput() in release_inode(). */
  1187. wait_var_event(&landlock_superblock(sb)->inode_refs,
  1188. !atomic_long_read(&landlock_superblock(sb)->inode_refs));
  1189. }
  1190. /*
  1191. * Because a Landlock security policy is defined according to the filesystem
  1192. * topology (i.e. the mount namespace), changing it may grant access to files
  1193. * not previously allowed.
  1194. *
  1195. * To make it simple, deny any filesystem topology modification by landlocked
  1196. * processes. Non-landlocked processes may still change the namespace of a
  1197. * landlocked process, but this kind of threat must be handled by a system-wide
  1198. * access-control security policy.
  1199. *
  1200. * This could be lifted in the future if Landlock can safely handle mount
  1201. * namespace updates requested by a landlocked process. Indeed, we could
  1202. * update the current domain (which is currently read-only) by taking into
  1203. * account the accesses of the source and the destination of a new mount point.
  1204. * However, it would also require to make all the child domains dynamically
  1205. * inherit these new constraints. Anyway, for backward compatibility reasons,
  1206. * a dedicated user space option would be required (e.g. as a ruleset flag).
  1207. */
  1208. static int hook_sb_mount(const char *const dev_name,
  1209. const struct path *const path, const char *const type,
  1210. const unsigned long flags, void *const data)
  1211. {
  1212. if (!get_current_fs_domain())
  1213. return 0;
  1214. return -EPERM;
  1215. }
  1216. static int hook_move_mount(const struct path *const from_path,
  1217. const struct path *const to_path)
  1218. {
  1219. if (!get_current_fs_domain())
  1220. return 0;
  1221. return -EPERM;
  1222. }
  1223. /*
  1224. * Removing a mount point may reveal a previously hidden file hierarchy, which
  1225. * may then grant access to files, which may have previously been forbidden.
  1226. */
  1227. static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
  1228. {
  1229. if (!get_current_fs_domain())
  1230. return 0;
  1231. return -EPERM;
  1232. }
  1233. static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
  1234. {
  1235. if (!get_current_fs_domain())
  1236. return 0;
  1237. return -EPERM;
  1238. }
  1239. /*
  1240. * pivot_root(2), like mount(2), changes the current mount namespace. It must
  1241. * then be forbidden for a landlocked process.
  1242. *
  1243. * However, chroot(2) may be allowed because it only changes the relative root
  1244. * directory of the current process. Moreover, it can be used to restrict the
  1245. * view of the filesystem.
  1246. */
  1247. static int hook_sb_pivotroot(const struct path *const old_path,
  1248. const struct path *const new_path)
  1249. {
  1250. if (!get_current_fs_domain())
  1251. return 0;
  1252. return -EPERM;
  1253. }
  1254. /* Path hooks */
  1255. static int hook_path_link(struct dentry *const old_dentry,
  1256. const struct path *const new_dir,
  1257. struct dentry *const new_dentry)
  1258. {
  1259. return current_check_refer_path(old_dentry, new_dir, new_dentry, false,
  1260. false);
  1261. }
  1262. static int hook_path_rename(const struct path *const old_dir,
  1263. struct dentry *const old_dentry,
  1264. const struct path *const new_dir,
  1265. struct dentry *const new_dentry,
  1266. const unsigned int flags)
  1267. {
  1268. /* old_dir refers to old_dentry->d_parent and new_dir->mnt */
  1269. return current_check_refer_path(old_dentry, new_dir, new_dentry, true,
  1270. !!(flags & RENAME_EXCHANGE));
  1271. }
  1272. static int hook_path_mkdir(const struct path *const dir,
  1273. struct dentry *const dentry, const umode_t mode)
  1274. {
  1275. return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR);
  1276. }
  1277. static int hook_path_mknod(const struct path *const dir,
  1278. struct dentry *const dentry, const umode_t mode,
  1279. const unsigned int dev)
  1280. {
  1281. const struct landlock_ruleset *const dom = get_current_fs_domain();
  1282. if (!dom)
  1283. return 0;
  1284. return check_access_path(dom, dir, get_mode_access(mode));
  1285. }
  1286. static int hook_path_symlink(const struct path *const dir,
  1287. struct dentry *const dentry,
  1288. const char *const old_name)
  1289. {
  1290. return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM);
  1291. }
  1292. static int hook_path_unlink(const struct path *const dir,
  1293. struct dentry *const dentry)
  1294. {
  1295. return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE);
  1296. }
  1297. static int hook_path_rmdir(const struct path *const dir,
  1298. struct dentry *const dentry)
  1299. {
  1300. return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR);
  1301. }
  1302. static int hook_path_truncate(const struct path *const path)
  1303. {
  1304. return current_check_access_path(path, LANDLOCK_ACCESS_FS_TRUNCATE);
  1305. }
  1306. /* File hooks */
  1307. /**
  1308. * get_required_file_open_access - Get access needed to open a file
  1309. *
  1310. * @file: File being opened.
  1311. *
  1312. * Returns the access rights that are required for opening the given file,
  1313. * depending on the file type and open mode.
  1314. */
  1315. static access_mask_t
  1316. get_required_file_open_access(const struct file *const file)
  1317. {
  1318. access_mask_t access = 0;
  1319. if (file->f_mode & FMODE_READ) {
  1320. /* A directory can only be opened in read mode. */
  1321. if (S_ISDIR(file_inode(file)->i_mode))
  1322. return LANDLOCK_ACCESS_FS_READ_DIR;
  1323. access = LANDLOCK_ACCESS_FS_READ_FILE;
  1324. }
  1325. if (file->f_mode & FMODE_WRITE)
  1326. access |= LANDLOCK_ACCESS_FS_WRITE_FILE;
  1327. /* __FMODE_EXEC is indeed part of f_flags, not f_mode. */
  1328. if (file->f_flags & __FMODE_EXEC)
  1329. access |= LANDLOCK_ACCESS_FS_EXECUTE;
  1330. return access;
  1331. }
  1332. static int hook_file_alloc_security(struct file *const file)
  1333. {
  1334. /*
  1335. * Grants all access rights, even if most of them are not checked later
  1336. * on. It is more consistent.
  1337. *
  1338. * Notably, file descriptors for regular files can also be acquired
  1339. * without going through the file_open hook, for example when using
  1340. * memfd_create(2).
  1341. */
  1342. landlock_file(file)->allowed_access = LANDLOCK_MASK_ACCESS_FS;
  1343. return 0;
  1344. }
  1345. static bool is_device(const struct file *const file)
  1346. {
  1347. const struct inode *inode = file_inode(file);
  1348. return S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode);
  1349. }
  1350. static int hook_file_open(struct file *const file)
  1351. {
  1352. layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
  1353. access_mask_t open_access_request, full_access_request, allowed_access,
  1354. optional_access;
  1355. const struct landlock_ruleset *const dom =
  1356. landlock_get_applicable_domain(
  1357. landlock_cred(file->f_cred)->domain, any_fs);
  1358. if (!dom)
  1359. return 0;
  1360. /*
  1361. * Because a file may be opened with O_PATH, get_required_file_open_access()
  1362. * may return 0. This case will be handled with a future Landlock
  1363. * evolution.
  1364. */
  1365. open_access_request = get_required_file_open_access(file);
  1366. /*
  1367. * We look up more access than what we immediately need for open(), so
  1368. * that we can later authorize operations on opened files.
  1369. */
  1370. optional_access = LANDLOCK_ACCESS_FS_TRUNCATE;
  1371. if (is_device(file))
  1372. optional_access |= LANDLOCK_ACCESS_FS_IOCTL_DEV;
  1373. full_access_request = open_access_request | optional_access;
  1374. if (is_access_to_paths_allowed(
  1375. dom, &file->f_path,
  1376. landlock_init_layer_masks(dom, full_access_request,
  1377. &layer_masks, LANDLOCK_KEY_INODE),
  1378. &layer_masks, NULL, 0, NULL, NULL)) {
  1379. allowed_access = full_access_request;
  1380. } else {
  1381. unsigned long access_bit;
  1382. const unsigned long access_req = full_access_request;
  1383. /*
  1384. * Calculate the actual allowed access rights from layer_masks.
  1385. * Add each access right to allowed_access which has not been
  1386. * vetoed by any layer.
  1387. */
  1388. allowed_access = 0;
  1389. for_each_set_bit(access_bit, &access_req,
  1390. ARRAY_SIZE(layer_masks)) {
  1391. if (!layer_masks[access_bit])
  1392. allowed_access |= BIT_ULL(access_bit);
  1393. }
  1394. }
  1395. /*
  1396. * For operations on already opened files (i.e. ftruncate()), it is the
  1397. * access rights at the time of open() which decide whether the
  1398. * operation is permitted. Therefore, we record the relevant subset of
  1399. * file access rights in the opened struct file.
  1400. */
  1401. landlock_file(file)->allowed_access = allowed_access;
  1402. if ((open_access_request & allowed_access) == open_access_request)
  1403. return 0;
  1404. return -EACCES;
  1405. }
  1406. static int hook_file_truncate(struct file *const file)
  1407. {
  1408. /*
  1409. * Allows truncation if the truncate right was available at the time of
  1410. * opening the file, to get a consistent access check as for read, write
  1411. * and execute operations.
  1412. *
  1413. * Note: For checks done based on the file's Landlock allowed access, we
  1414. * enforce them independently of whether the current thread is in a
  1415. * Landlock domain, so that open files passed between independent
  1416. * processes retain their behaviour.
  1417. */
  1418. if (landlock_file(file)->allowed_access & LANDLOCK_ACCESS_FS_TRUNCATE)
  1419. return 0;
  1420. return -EACCES;
  1421. }
  1422. static int hook_file_ioctl(struct file *file, unsigned int cmd,
  1423. unsigned long arg)
  1424. {
  1425. access_mask_t allowed_access = landlock_file(file)->allowed_access;
  1426. /*
  1427. * It is the access rights at the time of opening the file which
  1428. * determine whether IOCTL can be used on the opened file later.
  1429. *
  1430. * The access right is attached to the opened file in hook_file_open().
  1431. */
  1432. if (allowed_access & LANDLOCK_ACCESS_FS_IOCTL_DEV)
  1433. return 0;
  1434. if (!is_device(file))
  1435. return 0;
  1436. if (is_masked_device_ioctl(cmd))
  1437. return 0;
  1438. return -EACCES;
  1439. }
  1440. static int hook_file_ioctl_compat(struct file *file, unsigned int cmd,
  1441. unsigned long arg)
  1442. {
  1443. access_mask_t allowed_access = landlock_file(file)->allowed_access;
  1444. /*
  1445. * It is the access rights at the time of opening the file which
  1446. * determine whether IOCTL can be used on the opened file later.
  1447. *
  1448. * The access right is attached to the opened file in hook_file_open().
  1449. */
  1450. if (allowed_access & LANDLOCK_ACCESS_FS_IOCTL_DEV)
  1451. return 0;
  1452. if (!is_device(file))
  1453. return 0;
  1454. if (is_masked_device_ioctl_compat(cmd))
  1455. return 0;
  1456. return -EACCES;
  1457. }
  1458. /*
  1459. * Always allow sending signals between threads of the same process. This
  1460. * ensures consistency with hook_task_kill().
  1461. */
  1462. static bool control_current_fowner(struct fown_struct *const fown)
  1463. {
  1464. struct task_struct *p;
  1465. /*
  1466. * Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix
  1467. * file_set_fowner LSM hook inconsistencies").
  1468. */
  1469. lockdep_assert_held(&fown->lock);
  1470. /*
  1471. * Some callers (e.g. fcntl_dirnotify) may not be in an RCU read-side
  1472. * critical section.
  1473. */
  1474. guard(rcu)();
  1475. p = pid_task(fown->pid, fown->pid_type);
  1476. if (!p)
  1477. return true;
  1478. return !same_thread_group(p, current);
  1479. }
  1480. static void hook_file_set_fowner(struct file *file)
  1481. {
  1482. struct landlock_ruleset *prev_dom;
  1483. struct landlock_ruleset *new_dom = NULL;
  1484. if (control_current_fowner(file_f_owner(file))) {
  1485. new_dom = landlock_get_current_domain();
  1486. landlock_get_ruleset(new_dom);
  1487. }
  1488. prev_dom = landlock_file(file)->fown_domain;
  1489. landlock_file(file)->fown_domain = new_dom;
  1490. /* May be called in an RCU read-side critical section. */
  1491. landlock_put_ruleset_deferred(prev_dom);
  1492. }
  1493. static void hook_file_free_security(struct file *file)
  1494. {
  1495. landlock_put_ruleset_deferred(landlock_file(file)->fown_domain);
  1496. }
  1497. static struct security_hook_list landlock_hooks[] __ro_after_init = {
  1498. LSM_HOOK_INIT(inode_free_security_rcu, hook_inode_free_security_rcu),
  1499. LSM_HOOK_INIT(sb_delete, hook_sb_delete),
  1500. LSM_HOOK_INIT(sb_mount, hook_sb_mount),
  1501. LSM_HOOK_INIT(move_mount, hook_move_mount),
  1502. LSM_HOOK_INIT(sb_umount, hook_sb_umount),
  1503. LSM_HOOK_INIT(sb_remount, hook_sb_remount),
  1504. LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot),
  1505. LSM_HOOK_INIT(path_link, hook_path_link),
  1506. LSM_HOOK_INIT(path_rename, hook_path_rename),
  1507. LSM_HOOK_INIT(path_mkdir, hook_path_mkdir),
  1508. LSM_HOOK_INIT(path_mknod, hook_path_mknod),
  1509. LSM_HOOK_INIT(path_symlink, hook_path_symlink),
  1510. LSM_HOOK_INIT(path_unlink, hook_path_unlink),
  1511. LSM_HOOK_INIT(path_rmdir, hook_path_rmdir),
  1512. LSM_HOOK_INIT(path_truncate, hook_path_truncate),
  1513. LSM_HOOK_INIT(file_alloc_security, hook_file_alloc_security),
  1514. LSM_HOOK_INIT(file_open, hook_file_open),
  1515. LSM_HOOK_INIT(file_truncate, hook_file_truncate),
  1516. LSM_HOOK_INIT(file_ioctl, hook_file_ioctl),
  1517. LSM_HOOK_INIT(file_ioctl_compat, hook_file_ioctl_compat),
  1518. LSM_HOOK_INIT(file_set_fowner, hook_file_set_fowner),
  1519. LSM_HOOK_INIT(file_free_security, hook_file_free_security),
  1520. };
  1521. __init void landlock_add_fs_hooks(void)
  1522. {
  1523. security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
  1524. &landlock_lsmid);
  1525. }
  1526. #ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
  1527. /* clang-format off */
  1528. static struct kunit_case test_cases[] = {
  1529. KUNIT_CASE(test_no_more_access),
  1530. KUNIT_CASE(test_scope_to_request_with_exec_none),
  1531. KUNIT_CASE(test_scope_to_request_with_exec_some),
  1532. KUNIT_CASE(test_scope_to_request_without_access),
  1533. KUNIT_CASE(test_is_eacces_with_none),
  1534. KUNIT_CASE(test_is_eacces_with_refer),
  1535. KUNIT_CASE(test_is_eacces_with_write),
  1536. {}
  1537. };
  1538. /* clang-format on */
  1539. static struct kunit_suite test_suite = {
  1540. .name = "landlock_fs",
  1541. .test_cases = test_cases,
  1542. };
  1543. kunit_test_suite(test_suite);
  1544. #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */