fanotify.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/fanotify.h>
  3. #include <linux/fdtable.h>
  4. #include <linux/fsnotify_backend.h>
  5. #include <linux/init.h>
  6. #include <linux/jiffies.h>
  7. #include <linux/kernel.h> /* UINT_MAX */
  8. #include <linux/mount.h>
  9. #include <linux/sched.h>
  10. #include <linux/sched/user.h>
  11. #include <linux/sched/signal.h>
  12. #include <linux/types.h>
  13. #include <linux/wait.h>
  14. #include <linux/audit.h>
  15. #include <linux/sched/mm.h>
  16. #include "fanotify.h"
  17. static bool should_merge(struct fsnotify_event *old_fsn,
  18. struct fsnotify_event *new_fsn)
  19. {
  20. struct fanotify_event_info *old, *new;
  21. pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
  22. old = FANOTIFY_E(old_fsn);
  23. new = FANOTIFY_E(new_fsn);
  24. if (old_fsn->inode == new_fsn->inode && old->tgid == new->tgid &&
  25. old->path.mnt == new->path.mnt &&
  26. old->path.dentry == new->path.dentry)
  27. return true;
  28. return false;
  29. }
  30. /* and the list better be locked by something too! */
  31. static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
  32. {
  33. struct fsnotify_event *test_event;
  34. pr_debug("%s: list=%p event=%p\n", __func__, list, event);
  35. /*
  36. * Don't merge a permission event with any other event so that we know
  37. * the event structure we have created in fanotify_handle_event() is the
  38. * one we should check for permission response.
  39. */
  40. if (fanotify_is_perm_event(event->mask))
  41. return 0;
  42. list_for_each_entry_reverse(test_event, list, list) {
  43. if (should_merge(test_event, event)) {
  44. test_event->mask |= event->mask;
  45. return 1;
  46. }
  47. }
  48. return 0;
  49. }
  50. static int fanotify_get_response(struct fsnotify_group *group,
  51. struct fanotify_perm_event_info *event,
  52. struct fsnotify_iter_info *iter_info)
  53. {
  54. int ret;
  55. pr_debug("%s: group=%p event=%p\n", __func__, group, event);
  56. wait_event(group->fanotify_data.access_waitq, event->response);
  57. /* userspace responded, convert to something usable */
  58. switch (event->response & ~FAN_AUDIT) {
  59. case FAN_ALLOW:
  60. ret = 0;
  61. break;
  62. case FAN_DENY:
  63. default:
  64. ret = -EPERM;
  65. }
  66. /* Check if the response should be audited */
  67. if (event->response & FAN_AUDIT)
  68. audit_fanotify(event->response & ~FAN_AUDIT);
  69. event->response = 0;
  70. pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
  71. group, event, ret);
  72. return ret;
  73. }
  74. static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info,
  75. u32 event_mask, const void *data,
  76. int data_type)
  77. {
  78. __u32 marks_mask = 0, marks_ignored_mask = 0;
  79. const struct path *path = data;
  80. struct fsnotify_mark *mark;
  81. int type;
  82. pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n",
  83. __func__, iter_info->report_mask, event_mask, data, data_type);
  84. /* if we don't have enough info to send an event to userspace say no */
  85. if (data_type != FSNOTIFY_EVENT_PATH)
  86. return false;
  87. /* sorry, fanotify only gives a damn about files and dirs */
  88. if (!d_is_reg(path->dentry) &&
  89. !d_can_lookup(path->dentry))
  90. return false;
  91. fsnotify_foreach_obj_type(type) {
  92. if (!fsnotify_iter_should_report_type(iter_info, type))
  93. continue;
  94. mark = iter_info->marks[type];
  95. /* Apply ignore mask regardless of ISDIR and ON_CHILD flags */
  96. marks_ignored_mask |= mark->ignored_mask;
  97. /*
  98. * If the event is for a child and this mark doesn't care about
  99. * events on a child, don't send it!
  100. */
  101. if (event_mask & FS_EVENT_ON_CHILD &&
  102. (type != FSNOTIFY_OBJ_TYPE_INODE ||
  103. !(mark->mask & FS_EVENT_ON_CHILD)))
  104. continue;
  105. marks_mask |= mark->mask;
  106. }
  107. if (d_is_dir(path->dentry) &&
  108. !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
  109. return false;
  110. if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask &
  111. ~marks_ignored_mask)
  112. return true;
  113. return false;
  114. }
  115. struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
  116. struct inode *inode, u32 mask,
  117. const struct path *path)
  118. {
  119. struct fanotify_event_info *event = NULL;
  120. gfp_t gfp = GFP_KERNEL_ACCOUNT;
  121. /*
  122. * For queues with unlimited length lost events are not expected and
  123. * can possibly have security implications. Avoid losing events when
  124. * memory is short. For the limited size queues, avoid OOM killer in the
  125. * target monitoring memcg as it may have security repercussion.
  126. */
  127. if (group->max_events == UINT_MAX)
  128. gfp |= __GFP_NOFAIL;
  129. else
  130. gfp |= __GFP_RETRY_MAYFAIL;
  131. /* Whoever is interested in the event, pays for the allocation. */
  132. memalloc_use_memcg(group->memcg);
  133. if (fanotify_is_perm_event(mask)) {
  134. struct fanotify_perm_event_info *pevent;
  135. pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
  136. if (!pevent)
  137. goto out;
  138. event = &pevent->fae;
  139. pevent->response = 0;
  140. goto init;
  141. }
  142. event = kmem_cache_alloc(fanotify_event_cachep, gfp);
  143. if (!event)
  144. goto out;
  145. init: __maybe_unused
  146. fsnotify_init_event(&event->fse, inode, mask);
  147. event->tgid = get_pid(task_tgid(current));
  148. if (path) {
  149. event->path = *path;
  150. path_get(&event->path);
  151. } else {
  152. event->path.mnt = NULL;
  153. event->path.dentry = NULL;
  154. }
  155. out:
  156. memalloc_unuse_memcg();
  157. return event;
  158. }
  159. static int fanotify_handle_event(struct fsnotify_group *group,
  160. struct inode *inode,
  161. u32 mask, const void *data, int data_type,
  162. const unsigned char *file_name, u32 cookie,
  163. struct fsnotify_iter_info *iter_info)
  164. {
  165. int ret = 0;
  166. struct fanotify_event_info *event;
  167. struct fsnotify_event *fsn_event;
  168. BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
  169. BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
  170. BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
  171. BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
  172. BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
  173. BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
  174. BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
  175. BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
  176. BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
  177. BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
  178. if (!fanotify_should_send_event(iter_info, mask, data, data_type))
  179. return 0;
  180. pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
  181. mask);
  182. if (fanotify_is_perm_event(mask)) {
  183. /*
  184. * fsnotify_prepare_user_wait() fails if we race with mark
  185. * deletion. Just let the operation pass in that case.
  186. */
  187. if (!fsnotify_prepare_user_wait(iter_info))
  188. return 0;
  189. }
  190. event = fanotify_alloc_event(group, inode, mask, data);
  191. ret = -ENOMEM;
  192. if (unlikely(!event)) {
  193. /*
  194. * We don't queue overflow events for permission events as
  195. * there the access is denied and so no event is in fact lost.
  196. */
  197. if (!fanotify_is_perm_event(mask))
  198. fsnotify_queue_overflow(group);
  199. goto finish;
  200. }
  201. fsn_event = &event->fse;
  202. ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
  203. if (ret) {
  204. /* Permission events shouldn't be merged */
  205. BUG_ON(ret == 1 && mask & FAN_ALL_PERM_EVENTS);
  206. /* Our event wasn't used in the end. Free it. */
  207. fsnotify_destroy_event(group, fsn_event);
  208. ret = 0;
  209. } else if (fanotify_is_perm_event(mask)) {
  210. ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event),
  211. iter_info);
  212. fsnotify_destroy_event(group, fsn_event);
  213. }
  214. finish:
  215. if (fanotify_is_perm_event(mask))
  216. fsnotify_finish_user_wait(iter_info);
  217. return ret;
  218. }
  219. static void fanotify_free_group_priv(struct fsnotify_group *group)
  220. {
  221. struct user_struct *user;
  222. user = group->fanotify_data.user;
  223. atomic_dec(&user->fanotify_listeners);
  224. free_uid(user);
  225. }
  226. static void fanotify_free_event(struct fsnotify_event *fsn_event)
  227. {
  228. struct fanotify_event_info *event;
  229. event = FANOTIFY_E(fsn_event);
  230. path_put(&event->path);
  231. put_pid(event->tgid);
  232. if (fanotify_is_perm_event(fsn_event->mask)) {
  233. kmem_cache_free(fanotify_perm_event_cachep,
  234. FANOTIFY_PE(fsn_event));
  235. return;
  236. }
  237. kmem_cache_free(fanotify_event_cachep, event);
  238. }
  239. static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
  240. {
  241. kmem_cache_free(fanotify_mark_cache, fsn_mark);
  242. }
  243. const struct fsnotify_ops fanotify_fsnotify_ops = {
  244. .handle_event = fanotify_handle_event,
  245. .free_group_priv = fanotify_free_group_priv,
  246. .free_event = fanotify_free_event,
  247. .free_mark = fanotify_free_mark,
  248. };