cn_proc.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * cn_proc.c - process events connector
  4. *
  5. * Copyright (C) Matt Helsley, IBM Corp. 2005
  6. * Based on cn_fork.c by Guillaume Thouvenin <guillaume.thouvenin@bull.net>
  7. * Original copyright notice follows:
  8. * Copyright (C) 2005 BULL SA.
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/ktime.h>
  12. #include <linux/init.h>
  13. #include <linux/connector.h>
  14. #include <linux/gfp.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/atomic.h>
  17. #include <linux/pid_namespace.h>
  18. #include <linux/cn_proc.h>
  19. #include <linux/local_lock.h>
  20. /*
  21. * Size of a cn_msg followed by a proc_event structure. Since the
  22. * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
  23. * add one 4-byte word to the size here, and then start the actual
  24. * cn_msg structure 4 bytes into the stack buffer. The result is that
  25. * the immediately following proc_event structure is aligned to 8 bytes.
  26. */
  27. #define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
  28. /* See comment above; we test our assumption about sizeof struct cn_msg here. */
  29. static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
  30. {
  31. BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
  32. return (struct cn_msg *)(buffer + 4);
  33. }
  34. static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
  35. static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
  36. /* local_event.count is used as the sequence number of the netlink message */
  37. struct local_event {
  38. local_lock_t lock;
  39. __u32 count;
  40. };
  41. static DEFINE_PER_CPU(struct local_event, local_event) = {
  42. .lock = INIT_LOCAL_LOCK(lock),
  43. };
  44. static int cn_filter(struct sock *dsk, struct sk_buff *skb, void *data)
  45. {
  46. __u32 what, exit_code, *ptr;
  47. enum proc_cn_mcast_op mc_op;
  48. uintptr_t val;
  49. if (!dsk || !dsk->sk_user_data || !data)
  50. return 0;
  51. ptr = (__u32 *)data;
  52. what = *ptr++;
  53. exit_code = *ptr;
  54. val = ((struct proc_input *)(dsk->sk_user_data))->event_type;
  55. mc_op = ((struct proc_input *)(dsk->sk_user_data))->mcast_op;
  56. if (mc_op == PROC_CN_MCAST_IGNORE)
  57. return 1;
  58. if ((__u32)val == PROC_EVENT_ALL)
  59. return 0;
  60. /*
  61. * Drop packet if we have to report only non-zero exit status
  62. * (PROC_EVENT_NONZERO_EXIT) and exit status is 0
  63. */
  64. if (((__u32)val & PROC_EVENT_NONZERO_EXIT) &&
  65. (what == PROC_EVENT_EXIT)) {
  66. if (exit_code)
  67. return 0;
  68. }
  69. if ((__u32)val & what)
  70. return 0;
  71. return 1;
  72. }
  73. static inline void send_msg(struct cn_msg *msg)
  74. {
  75. __u32 filter_data[2];
  76. local_lock(&local_event.lock);
  77. msg->seq = __this_cpu_inc_return(local_event.count) - 1;
  78. ((struct proc_event *)msg->data)->cpu = smp_processor_id();
  79. /*
  80. * local_lock() disables preemption during send to ensure the messages
  81. * are ordered according to their sequence numbers.
  82. *
  83. * If cn_netlink_send() fails, the data is not sent.
  84. */
  85. filter_data[0] = ((struct proc_event *)msg->data)->what;
  86. if (filter_data[0] == PROC_EVENT_EXIT) {
  87. filter_data[1] =
  88. ((struct proc_event *)msg->data)->event_data.exit.exit_code;
  89. } else {
  90. filter_data[1] = 0;
  91. }
  92. cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
  93. cn_filter, (void *)filter_data);
  94. local_unlock(&local_event.lock);
  95. }
  96. void proc_fork_connector(struct task_struct *task)
  97. {
  98. struct cn_msg *msg;
  99. struct proc_event *ev;
  100. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  101. struct task_struct *parent;
  102. if (atomic_read(&proc_event_num_listeners) < 1)
  103. return;
  104. msg = buffer_to_cn_msg(buffer);
  105. ev = (struct proc_event *)msg->data;
  106. memset(&ev->event_data, 0, sizeof(ev->event_data));
  107. ev->timestamp_ns = ktime_get_ns();
  108. ev->what = PROC_EVENT_FORK;
  109. rcu_read_lock();
  110. parent = rcu_dereference(task->real_parent);
  111. ev->event_data.fork.parent_pid = parent->pid;
  112. ev->event_data.fork.parent_tgid = parent->tgid;
  113. rcu_read_unlock();
  114. ev->event_data.fork.child_pid = task->pid;
  115. ev->event_data.fork.child_tgid = task->tgid;
  116. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  117. msg->ack = 0; /* not used */
  118. msg->len = sizeof(*ev);
  119. msg->flags = 0; /* not used */
  120. send_msg(msg);
  121. }
  122. void proc_exec_connector(struct task_struct *task)
  123. {
  124. struct cn_msg *msg;
  125. struct proc_event *ev;
  126. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  127. if (atomic_read(&proc_event_num_listeners) < 1)
  128. return;
  129. msg = buffer_to_cn_msg(buffer);
  130. ev = (struct proc_event *)msg->data;
  131. memset(&ev->event_data, 0, sizeof(ev->event_data));
  132. ev->timestamp_ns = ktime_get_ns();
  133. ev->what = PROC_EVENT_EXEC;
  134. ev->event_data.exec.process_pid = task->pid;
  135. ev->event_data.exec.process_tgid = task->tgid;
  136. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  137. msg->ack = 0; /* not used */
  138. msg->len = sizeof(*ev);
  139. msg->flags = 0; /* not used */
  140. send_msg(msg);
  141. }
  142. void proc_id_connector(struct task_struct *task, int which_id)
  143. {
  144. struct cn_msg *msg;
  145. struct proc_event *ev;
  146. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  147. const struct cred *cred;
  148. if (atomic_read(&proc_event_num_listeners) < 1)
  149. return;
  150. msg = buffer_to_cn_msg(buffer);
  151. ev = (struct proc_event *)msg->data;
  152. memset(&ev->event_data, 0, sizeof(ev->event_data));
  153. ev->what = which_id;
  154. ev->event_data.id.process_pid = task->pid;
  155. ev->event_data.id.process_tgid = task->tgid;
  156. rcu_read_lock();
  157. cred = __task_cred(task);
  158. if (which_id == PROC_EVENT_UID) {
  159. ev->event_data.id.r.ruid = from_kuid_munged(&init_user_ns, cred->uid);
  160. ev->event_data.id.e.euid = from_kuid_munged(&init_user_ns, cred->euid);
  161. } else if (which_id == PROC_EVENT_GID) {
  162. ev->event_data.id.r.rgid = from_kgid_munged(&init_user_ns, cred->gid);
  163. ev->event_data.id.e.egid = from_kgid_munged(&init_user_ns, cred->egid);
  164. } else {
  165. rcu_read_unlock();
  166. return;
  167. }
  168. rcu_read_unlock();
  169. ev->timestamp_ns = ktime_get_ns();
  170. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  171. msg->ack = 0; /* not used */
  172. msg->len = sizeof(*ev);
  173. msg->flags = 0; /* not used */
  174. send_msg(msg);
  175. }
  176. void proc_sid_connector(struct task_struct *task)
  177. {
  178. struct cn_msg *msg;
  179. struct proc_event *ev;
  180. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  181. if (atomic_read(&proc_event_num_listeners) < 1)
  182. return;
  183. msg = buffer_to_cn_msg(buffer);
  184. ev = (struct proc_event *)msg->data;
  185. memset(&ev->event_data, 0, sizeof(ev->event_data));
  186. ev->timestamp_ns = ktime_get_ns();
  187. ev->what = PROC_EVENT_SID;
  188. ev->event_data.sid.process_pid = task->pid;
  189. ev->event_data.sid.process_tgid = task->tgid;
  190. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  191. msg->ack = 0; /* not used */
  192. msg->len = sizeof(*ev);
  193. msg->flags = 0; /* not used */
  194. send_msg(msg);
  195. }
  196. void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
  197. {
  198. struct cn_msg *msg;
  199. struct proc_event *ev;
  200. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  201. if (atomic_read(&proc_event_num_listeners) < 1)
  202. return;
  203. msg = buffer_to_cn_msg(buffer);
  204. ev = (struct proc_event *)msg->data;
  205. memset(&ev->event_data, 0, sizeof(ev->event_data));
  206. ev->timestamp_ns = ktime_get_ns();
  207. ev->what = PROC_EVENT_PTRACE;
  208. ev->event_data.ptrace.process_pid = task->pid;
  209. ev->event_data.ptrace.process_tgid = task->tgid;
  210. if (ptrace_id == PTRACE_ATTACH) {
  211. ev->event_data.ptrace.tracer_pid = current->pid;
  212. ev->event_data.ptrace.tracer_tgid = current->tgid;
  213. } else if (ptrace_id == PTRACE_DETACH) {
  214. ev->event_data.ptrace.tracer_pid = 0;
  215. ev->event_data.ptrace.tracer_tgid = 0;
  216. } else
  217. return;
  218. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  219. msg->ack = 0; /* not used */
  220. msg->len = sizeof(*ev);
  221. msg->flags = 0; /* not used */
  222. send_msg(msg);
  223. }
  224. void proc_comm_connector(struct task_struct *task)
  225. {
  226. struct cn_msg *msg;
  227. struct proc_event *ev;
  228. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  229. if (atomic_read(&proc_event_num_listeners) < 1)
  230. return;
  231. msg = buffer_to_cn_msg(buffer);
  232. ev = (struct proc_event *)msg->data;
  233. memset(&ev->event_data, 0, sizeof(ev->event_data));
  234. ev->timestamp_ns = ktime_get_ns();
  235. ev->what = PROC_EVENT_COMM;
  236. ev->event_data.comm.process_pid = task->pid;
  237. ev->event_data.comm.process_tgid = task->tgid;
  238. get_task_comm(ev->event_data.comm.comm, task);
  239. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  240. msg->ack = 0; /* not used */
  241. msg->len = sizeof(*ev);
  242. msg->flags = 0; /* not used */
  243. send_msg(msg);
  244. }
  245. void proc_coredump_connector(struct task_struct *task)
  246. {
  247. struct cn_msg *msg;
  248. struct proc_event *ev;
  249. struct task_struct *parent;
  250. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  251. if (atomic_read(&proc_event_num_listeners) < 1)
  252. return;
  253. msg = buffer_to_cn_msg(buffer);
  254. ev = (struct proc_event *)msg->data;
  255. memset(&ev->event_data, 0, sizeof(ev->event_data));
  256. ev->timestamp_ns = ktime_get_ns();
  257. ev->what = PROC_EVENT_COREDUMP;
  258. ev->event_data.coredump.process_pid = task->pid;
  259. ev->event_data.coredump.process_tgid = task->tgid;
  260. rcu_read_lock();
  261. if (pid_alive(task)) {
  262. parent = rcu_dereference(task->real_parent);
  263. ev->event_data.coredump.parent_pid = parent->pid;
  264. ev->event_data.coredump.parent_tgid = parent->tgid;
  265. }
  266. rcu_read_unlock();
  267. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  268. msg->ack = 0; /* not used */
  269. msg->len = sizeof(*ev);
  270. msg->flags = 0; /* not used */
  271. send_msg(msg);
  272. }
  273. void proc_exit_connector(struct task_struct *task)
  274. {
  275. struct cn_msg *msg;
  276. struct proc_event *ev;
  277. struct task_struct *parent;
  278. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  279. if (atomic_read(&proc_event_num_listeners) < 1)
  280. return;
  281. msg = buffer_to_cn_msg(buffer);
  282. ev = (struct proc_event *)msg->data;
  283. memset(&ev->event_data, 0, sizeof(ev->event_data));
  284. ev->timestamp_ns = ktime_get_ns();
  285. ev->what = PROC_EVENT_EXIT;
  286. ev->event_data.exit.process_pid = task->pid;
  287. ev->event_data.exit.process_tgid = task->tgid;
  288. ev->event_data.exit.exit_code = task->exit_code;
  289. ev->event_data.exit.exit_signal = task->exit_signal;
  290. rcu_read_lock();
  291. if (pid_alive(task)) {
  292. parent = rcu_dereference(task->real_parent);
  293. ev->event_data.exit.parent_pid = parent->pid;
  294. ev->event_data.exit.parent_tgid = parent->tgid;
  295. }
  296. rcu_read_unlock();
  297. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  298. msg->ack = 0; /* not used */
  299. msg->len = sizeof(*ev);
  300. msg->flags = 0; /* not used */
  301. send_msg(msg);
  302. }
  303. /*
  304. * Send an acknowledgement message to userspace
  305. *
  306. * Use 0 for success, EFOO otherwise.
  307. * Note: this is the negative of conventional kernel error
  308. * values because it's not being returned via syscall return
  309. * mechanisms.
  310. */
  311. static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
  312. {
  313. struct cn_msg *msg;
  314. struct proc_event *ev;
  315. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  316. if (atomic_read(&proc_event_num_listeners) < 1)
  317. return;
  318. msg = buffer_to_cn_msg(buffer);
  319. ev = (struct proc_event *)msg->data;
  320. memset(&ev->event_data, 0, sizeof(ev->event_data));
  321. msg->seq = rcvd_seq;
  322. ev->timestamp_ns = ktime_get_ns();
  323. ev->cpu = -1;
  324. ev->what = PROC_EVENT_NONE;
  325. ev->event_data.ack.err = err;
  326. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  327. msg->ack = rcvd_ack + 1;
  328. msg->len = sizeof(*ev);
  329. msg->flags = 0; /* not used */
  330. send_msg(msg);
  331. }
  332. /**
  333. * cn_proc_mcast_ctl
  334. * @msg: message sent from userspace via the connector
  335. * @nsp: NETLINK_CB of the client's socket buffer
  336. */
  337. static void cn_proc_mcast_ctl(struct cn_msg *msg,
  338. struct netlink_skb_parms *nsp)
  339. {
  340. enum proc_cn_mcast_op mc_op = 0, prev_mc_op = 0;
  341. struct proc_input *pinput = NULL;
  342. enum proc_cn_event ev_type = 0;
  343. int err = 0, initial = 0;
  344. struct sock *sk = NULL;
  345. /*
  346. * Events are reported with respect to the initial pid
  347. * and user namespaces so ignore requestors from
  348. * other namespaces.
  349. */
  350. if ((current_user_ns() != &init_user_ns) ||
  351. !task_is_in_init_pid_ns(current))
  352. return;
  353. if (msg->len == sizeof(*pinput)) {
  354. pinput = (struct proc_input *)msg->data;
  355. mc_op = pinput->mcast_op;
  356. ev_type = pinput->event_type;
  357. } else if (msg->len == sizeof(mc_op)) {
  358. mc_op = *((enum proc_cn_mcast_op *)msg->data);
  359. ev_type = PROC_EVENT_ALL;
  360. } else {
  361. return;
  362. }
  363. ev_type = valid_event((enum proc_cn_event)ev_type);
  364. if (ev_type == PROC_EVENT_NONE)
  365. ev_type = PROC_EVENT_ALL;
  366. if (nsp->sk) {
  367. sk = nsp->sk;
  368. if (sk->sk_user_data == NULL) {
  369. sk->sk_user_data = kzalloc(sizeof(struct proc_input),
  370. GFP_KERNEL);
  371. if (sk->sk_user_data == NULL) {
  372. err = ENOMEM;
  373. goto out;
  374. }
  375. initial = 1;
  376. } else {
  377. prev_mc_op =
  378. ((struct proc_input *)(sk->sk_user_data))->mcast_op;
  379. }
  380. ((struct proc_input *)(sk->sk_user_data))->event_type =
  381. ev_type;
  382. ((struct proc_input *)(sk->sk_user_data))->mcast_op = mc_op;
  383. }
  384. switch (mc_op) {
  385. case PROC_CN_MCAST_LISTEN:
  386. if (initial || (prev_mc_op != PROC_CN_MCAST_LISTEN))
  387. atomic_inc(&proc_event_num_listeners);
  388. break;
  389. case PROC_CN_MCAST_IGNORE:
  390. if (!initial && (prev_mc_op != PROC_CN_MCAST_IGNORE))
  391. atomic_dec(&proc_event_num_listeners);
  392. ((struct proc_input *)(sk->sk_user_data))->event_type =
  393. PROC_EVENT_NONE;
  394. break;
  395. default:
  396. err = EINVAL;
  397. break;
  398. }
  399. out:
  400. cn_proc_ack(err, msg->seq, msg->ack);
  401. }
  402. /*
  403. * cn_proc_init - initialization entry point
  404. *
  405. * Adds the connector callback to the connector driver.
  406. */
  407. static int __init cn_proc_init(void)
  408. {
  409. int err = cn_add_callback(&cn_proc_event_id,
  410. "cn_proc",
  411. &cn_proc_mcast_ctl);
  412. if (err) {
  413. pr_warn("cn_proc failed to register\n");
  414. return err;
  415. }
  416. return 0;
  417. }
  418. device_initcall(cn_proc_init);