smc_rx.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Shared Memory Communications over RDMA (SMC-R) and RoCE
  4. *
  5. * Manage RMBE
  6. * copy new RMBE data into user space
  7. *
  8. * Copyright IBM Corp. 2016
  9. *
  10. * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
  11. */
  12. #include <linux/net.h>
  13. #include <linux/rcupdate.h>
  14. #include <linux/sched/signal.h>
  15. #include <linux/splice.h>
  16. #include <net/sock.h>
  17. #include <trace/events/sock.h>
  18. #include "smc.h"
  19. #include "smc_core.h"
  20. #include "smc_cdc.h"
  21. #include "smc_tx.h" /* smc_tx_consumer_update() */
  22. #include "smc_rx.h"
  23. #include "smc_stats.h"
  24. #include "smc_tracepoint.h"
  25. /* callback implementation to wakeup consumers blocked with smc_rx_wait().
  26. * indirectly called by smc_cdc_msg_recv_action().
  27. */
  28. static void smc_rx_wake_up(struct sock *sk)
  29. {
  30. struct socket_wq *wq;
  31. trace_sk_data_ready(sk);
  32. /* derived from sock_def_readable() */
  33. /* called already in smc_listen_work() */
  34. rcu_read_lock();
  35. wq = rcu_dereference(sk->sk_wq);
  36. if (skwq_has_sleeper(wq))
  37. wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
  38. EPOLLRDNORM | EPOLLRDBAND);
  39. sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN);
  40. if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
  41. (sk->sk_state == SMC_CLOSED))
  42. sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_HUP);
  43. rcu_read_unlock();
  44. }
  45. /* Update consumer cursor
  46. * @conn connection to update
  47. * @cons consumer cursor
  48. * @len number of Bytes consumed
  49. * Returns:
  50. * 1 if we should end our receive, 0 otherwise
  51. */
  52. static int smc_rx_update_consumer(struct smc_sock *smc,
  53. union smc_host_cursor cons, size_t len)
  54. {
  55. struct smc_connection *conn = &smc->conn;
  56. struct sock *sk = &smc->sk;
  57. bool force = false;
  58. int diff, rc = 0;
  59. smc_curs_add(conn->rmb_desc->len, &cons, len);
  60. /* did we process urgent data? */
  61. if (conn->urg_state == SMC_URG_VALID || conn->urg_rx_skip_pend) {
  62. diff = smc_curs_comp(conn->rmb_desc->len, &cons,
  63. &conn->urg_curs);
  64. if (sock_flag(sk, SOCK_URGINLINE)) {
  65. if (diff == 0) {
  66. force = true;
  67. rc = 1;
  68. conn->urg_state = SMC_URG_READ;
  69. }
  70. } else {
  71. if (diff == 1) {
  72. /* skip urgent byte */
  73. force = true;
  74. smc_curs_add(conn->rmb_desc->len, &cons, 1);
  75. conn->urg_rx_skip_pend = false;
  76. } else if (diff < -1)
  77. /* we read past urgent byte */
  78. conn->urg_state = SMC_URG_READ;
  79. }
  80. }
  81. smc_curs_copy(&conn->local_tx_ctrl.cons, &cons, conn);
  82. /* send consumer cursor update if required */
  83. /* similar to advertising new TCP rcv_wnd if required */
  84. smc_tx_consumer_update(conn, force);
  85. return rc;
  86. }
  87. static void smc_rx_update_cons(struct smc_sock *smc, size_t len)
  88. {
  89. struct smc_connection *conn = &smc->conn;
  90. union smc_host_cursor cons;
  91. smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
  92. smc_rx_update_consumer(smc, cons, len);
  93. }
  94. struct smc_spd_priv {
  95. struct smc_sock *smc;
  96. size_t len;
  97. };
  98. static void smc_rx_pipe_buf_release(struct pipe_inode_info *pipe,
  99. struct pipe_buffer *buf)
  100. {
  101. struct smc_spd_priv *priv = (struct smc_spd_priv *)buf->private;
  102. struct smc_sock *smc = priv->smc;
  103. struct smc_connection *conn;
  104. struct sock *sk = &smc->sk;
  105. if (sk->sk_state == SMC_CLOSED ||
  106. sk->sk_state == SMC_PEERFINCLOSEWAIT ||
  107. sk->sk_state == SMC_APPFINCLOSEWAIT)
  108. goto out;
  109. conn = &smc->conn;
  110. lock_sock(sk);
  111. smc_rx_update_cons(smc, priv->len);
  112. release_sock(sk);
  113. if (atomic_sub_and_test(priv->len, &conn->splice_pending))
  114. smc_rx_wake_up(sk);
  115. out:
  116. kfree(priv);
  117. put_page(buf->page);
  118. sock_put(sk);
  119. }
  120. static const struct pipe_buf_operations smc_pipe_ops = {
  121. .release = smc_rx_pipe_buf_release,
  122. .get = generic_pipe_buf_get
  123. };
  124. static void smc_rx_spd_release(struct splice_pipe_desc *spd,
  125. unsigned int i)
  126. {
  127. put_page(spd->pages[i]);
  128. }
  129. static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len,
  130. struct smc_sock *smc)
  131. {
  132. struct smc_link_group *lgr = smc->conn.lgr;
  133. int offset = offset_in_page(src);
  134. struct partial_page *partial;
  135. struct splice_pipe_desc spd;
  136. struct smc_spd_priv **priv;
  137. struct page **pages;
  138. int bytes, nr_pages;
  139. int i;
  140. nr_pages = !lgr->is_smcd && smc->conn.rmb_desc->is_vm ?
  141. PAGE_ALIGN(len + offset) / PAGE_SIZE : 1;
  142. pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
  143. if (!pages)
  144. goto out;
  145. partial = kcalloc(nr_pages, sizeof(*partial), GFP_KERNEL);
  146. if (!partial)
  147. goto out_page;
  148. priv = kcalloc(nr_pages, sizeof(*priv), GFP_KERNEL);
  149. if (!priv)
  150. goto out_part;
  151. for (i = 0; i < nr_pages; i++) {
  152. priv[i] = kzalloc(sizeof(**priv), GFP_KERNEL);
  153. if (!priv[i])
  154. goto out_priv;
  155. }
  156. if (lgr->is_smcd ||
  157. (!lgr->is_smcd && !smc->conn.rmb_desc->is_vm)) {
  158. /* smcd or smcr that uses physically contiguous RMBs */
  159. priv[0]->len = len;
  160. priv[0]->smc = smc;
  161. partial[0].offset = src - (char *)smc->conn.rmb_desc->cpu_addr;
  162. partial[0].len = len;
  163. partial[0].private = (unsigned long)priv[0];
  164. pages[0] = smc->conn.rmb_desc->pages;
  165. } else {
  166. int size, left = len;
  167. void *buf = src;
  168. /* smcr that uses virtually contiguous RMBs*/
  169. for (i = 0; i < nr_pages; i++) {
  170. size = min_t(int, PAGE_SIZE - offset, left);
  171. priv[i]->len = size;
  172. priv[i]->smc = smc;
  173. pages[i] = vmalloc_to_page(buf);
  174. partial[i].offset = offset;
  175. partial[i].len = size;
  176. partial[i].private = (unsigned long)priv[i];
  177. buf += size / sizeof(*buf);
  178. left -= size;
  179. offset = 0;
  180. }
  181. }
  182. spd.nr_pages_max = nr_pages;
  183. spd.nr_pages = nr_pages;
  184. spd.pages = pages;
  185. spd.partial = partial;
  186. spd.ops = &smc_pipe_ops;
  187. spd.spd_release = smc_rx_spd_release;
  188. bytes = splice_to_pipe(pipe, &spd);
  189. if (bytes > 0) {
  190. sock_hold(&smc->sk);
  191. if (!lgr->is_smcd && smc->conn.rmb_desc->is_vm) {
  192. for (i = 0; i < PAGE_ALIGN(bytes + offset) / PAGE_SIZE; i++)
  193. get_page(pages[i]);
  194. } else {
  195. get_page(smc->conn.rmb_desc->pages);
  196. }
  197. atomic_add(bytes, &smc->conn.splice_pending);
  198. }
  199. kfree(priv);
  200. kfree(partial);
  201. kfree(pages);
  202. return bytes;
  203. out_priv:
  204. for (i = (i - 1); i >= 0; i--)
  205. kfree(priv[i]);
  206. kfree(priv);
  207. out_part:
  208. kfree(partial);
  209. out_page:
  210. kfree(pages);
  211. out:
  212. return -ENOMEM;
  213. }
  214. static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn, size_t peeked)
  215. {
  216. return smc_rx_data_available(conn, peeked) &&
  217. !atomic_read(&conn->splice_pending);
  218. }
  219. /* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted
  220. * @smc smc socket
  221. * @timeo pointer to max seconds to wait, pointer to value 0 for no timeout
  222. * @peeked number of bytes already peeked
  223. * @fcrit add'l criterion to evaluate as function pointer
  224. * Returns:
  225. * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown.
  226. * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted).
  227. */
  228. int smc_rx_wait(struct smc_sock *smc, long *timeo, size_t peeked,
  229. int (*fcrit)(struct smc_connection *conn, size_t baseline))
  230. {
  231. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  232. struct smc_connection *conn = &smc->conn;
  233. struct smc_cdc_conn_state_flags *cflags =
  234. &conn->local_tx_ctrl.conn_state_flags;
  235. struct sock *sk = &smc->sk;
  236. int rc;
  237. if (fcrit(conn, peeked))
  238. return 1;
  239. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  240. add_wait_queue(sk_sleep(sk), &wait);
  241. rc = sk_wait_event(sk, timeo,
  242. READ_ONCE(sk->sk_err) ||
  243. cflags->peer_conn_abort ||
  244. READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN ||
  245. conn->killed ||
  246. fcrit(conn, peeked),
  247. &wait);
  248. remove_wait_queue(sk_sleep(sk), &wait);
  249. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  250. return rc;
  251. }
  252. static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
  253. int flags)
  254. {
  255. struct smc_connection *conn = &smc->conn;
  256. union smc_host_cursor cons;
  257. struct sock *sk = &smc->sk;
  258. int rc = 0;
  259. if (sock_flag(sk, SOCK_URGINLINE) ||
  260. !(conn->urg_state == SMC_URG_VALID) ||
  261. conn->urg_state == SMC_URG_READ)
  262. return -EINVAL;
  263. SMC_STAT_INC(smc, urg_data_cnt);
  264. if (conn->urg_state == SMC_URG_VALID) {
  265. if (!(flags & MSG_PEEK))
  266. smc->conn.urg_state = SMC_URG_READ;
  267. msg->msg_flags |= MSG_OOB;
  268. if (len > 0) {
  269. if (!(flags & MSG_TRUNC))
  270. rc = memcpy_to_msg(msg, &conn->urg_rx_byte, 1);
  271. len = 1;
  272. smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
  273. if (smc_curs_diff(conn->rmb_desc->len, &cons,
  274. &conn->urg_curs) > 1)
  275. conn->urg_rx_skip_pend = true;
  276. /* Urgent Byte was already accounted for, but trigger
  277. * skipping the urgent byte in non-inline case
  278. */
  279. if (!(flags & MSG_PEEK))
  280. smc_rx_update_consumer(smc, cons, 0);
  281. } else {
  282. msg->msg_flags |= MSG_TRUNC;
  283. }
  284. return rc ? -EFAULT : len;
  285. }
  286. if (sk->sk_state == SMC_CLOSED || sk->sk_shutdown & RCV_SHUTDOWN)
  287. return 0;
  288. return -EAGAIN;
  289. }
  290. static bool smc_rx_recvmsg_data_available(struct smc_sock *smc, size_t peeked)
  291. {
  292. struct smc_connection *conn = &smc->conn;
  293. if (smc_rx_data_available(conn, peeked))
  294. return true;
  295. else if (conn->urg_state == SMC_URG_VALID)
  296. /* we received a single urgent Byte - skip */
  297. smc_rx_update_cons(smc, 0);
  298. return false;
  299. }
  300. /* smc_rx_recvmsg - receive data from RMBE
  301. * @msg: copy data to receive buffer
  302. * @pipe: copy data to pipe if set - indicates splice() call
  303. *
  304. * rcvbuf consumer: main API called by socket layer.
  305. * Called under sk lock.
  306. */
  307. int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
  308. struct pipe_inode_info *pipe, size_t len, int flags)
  309. {
  310. size_t copylen, read_done = 0, read_remaining = len, peeked_bytes = 0;
  311. size_t chunk_len, chunk_off, chunk_len_sum;
  312. struct smc_connection *conn = &smc->conn;
  313. int (*func)(struct smc_connection *conn, size_t baseline);
  314. union smc_host_cursor cons;
  315. int readable, chunk;
  316. char *rcvbuf_base;
  317. struct sock *sk;
  318. int splbytes;
  319. long timeo;
  320. int target; /* Read at least these many bytes */
  321. int rc;
  322. if (unlikely(flags & MSG_ERRQUEUE))
  323. return -EINVAL; /* future work for sk.sk_family == AF_SMC */
  324. sk = &smc->sk;
  325. if (sk->sk_state == SMC_LISTEN)
  326. return -ENOTCONN;
  327. if (flags & MSG_OOB)
  328. return smc_rx_recv_urg(smc, msg, len, flags);
  329. timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  330. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  331. readable = atomic_read(&conn->bytes_to_rcv);
  332. if (readable >= conn->rmb_desc->len)
  333. SMC_STAT_RMB_RX_FULL(smc, !conn->lnk);
  334. if (len < readable)
  335. SMC_STAT_RMB_RX_SIZE_SMALL(smc, !conn->lnk);
  336. /* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */
  337. rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr;
  338. do { /* while (read_remaining) */
  339. if (read_done >= target || (pipe && read_done))
  340. break;
  341. if (conn->killed)
  342. break;
  343. if (smc_rx_recvmsg_data_available(smc, peeked_bytes))
  344. goto copy;
  345. if (sk->sk_shutdown & RCV_SHUTDOWN) {
  346. /* smc_cdc_msg_recv_action() could have run after
  347. * above smc_rx_recvmsg_data_available()
  348. */
  349. if (smc_rx_recvmsg_data_available(smc, peeked_bytes))
  350. goto copy;
  351. break;
  352. }
  353. if (read_done) {
  354. if (sk->sk_err ||
  355. sk->sk_state == SMC_CLOSED ||
  356. !timeo ||
  357. signal_pending(current))
  358. break;
  359. } else {
  360. if (sk->sk_err) {
  361. read_done = sock_error(sk);
  362. break;
  363. }
  364. if (sk->sk_state == SMC_CLOSED) {
  365. if (!sock_flag(sk, SOCK_DONE)) {
  366. /* This occurs when user tries to read
  367. * from never connected socket.
  368. */
  369. read_done = -ENOTCONN;
  370. break;
  371. }
  372. break;
  373. }
  374. if (!timeo)
  375. return -EAGAIN;
  376. if (signal_pending(current)) {
  377. read_done = sock_intr_errno(timeo);
  378. break;
  379. }
  380. }
  381. if (!smc_rx_data_available(conn, peeked_bytes)) {
  382. smc_rx_wait(smc, &timeo, peeked_bytes, smc_rx_data_available);
  383. continue;
  384. }
  385. copy:
  386. /* initialize variables for 1st iteration of subsequent loop */
  387. /* could be just 1 byte, even after waiting on data above */
  388. readable = smc_rx_data_available(conn, peeked_bytes);
  389. splbytes = atomic_read(&conn->splice_pending);
  390. if (!readable || (msg && splbytes)) {
  391. if (splbytes)
  392. func = smc_rx_data_available_and_no_splice_pend;
  393. else
  394. func = smc_rx_data_available;
  395. smc_rx_wait(smc, &timeo, peeked_bytes, func);
  396. continue;
  397. }
  398. smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
  399. if ((flags & MSG_PEEK) && peeked_bytes)
  400. smc_curs_add(conn->rmb_desc->len, &cons, peeked_bytes);
  401. /* subsequent splice() calls pick up where previous left */
  402. if (splbytes)
  403. smc_curs_add(conn->rmb_desc->len, &cons, splbytes);
  404. if (conn->urg_state == SMC_URG_VALID &&
  405. sock_flag(&smc->sk, SOCK_URGINLINE) &&
  406. readable > 1)
  407. readable--; /* always stop at urgent Byte */
  408. /* not more than what user space asked for */
  409. copylen = min_t(size_t, read_remaining, readable);
  410. /* determine chunks where to read from rcvbuf */
  411. /* either unwrapped case, or 1st chunk of wrapped case */
  412. chunk_len = min_t(size_t, copylen, conn->rmb_desc->len -
  413. cons.count);
  414. chunk_len_sum = chunk_len;
  415. chunk_off = cons.count;
  416. smc_rmb_sync_sg_for_cpu(conn);
  417. for (chunk = 0; chunk < 2; chunk++) {
  418. if (!(flags & MSG_TRUNC)) {
  419. if (msg) {
  420. rc = memcpy_to_msg(msg, rcvbuf_base +
  421. chunk_off,
  422. chunk_len);
  423. } else {
  424. rc = smc_rx_splice(pipe, rcvbuf_base +
  425. chunk_off, chunk_len,
  426. smc);
  427. }
  428. if (rc < 0) {
  429. if (!read_done)
  430. read_done = -EFAULT;
  431. goto out;
  432. }
  433. }
  434. read_remaining -= chunk_len;
  435. read_done += chunk_len;
  436. if (flags & MSG_PEEK)
  437. peeked_bytes += chunk_len;
  438. if (chunk_len_sum == copylen)
  439. break; /* either on 1st or 2nd iteration */
  440. /* prepare next (== 2nd) iteration */
  441. chunk_len = copylen - chunk_len; /* remainder */
  442. chunk_len_sum += chunk_len;
  443. chunk_off = 0; /* modulo offset in recv ring buffer */
  444. }
  445. /* update cursors */
  446. if (!(flags & MSG_PEEK)) {
  447. /* increased in recv tasklet smc_cdc_msg_rcv() */
  448. smp_mb__before_atomic();
  449. atomic_sub(copylen, &conn->bytes_to_rcv);
  450. /* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */
  451. smp_mb__after_atomic();
  452. if (msg && smc_rx_update_consumer(smc, cons, copylen))
  453. goto out;
  454. }
  455. trace_smc_rx_recvmsg(smc, copylen);
  456. } while (read_remaining);
  457. out:
  458. return read_done;
  459. }
  460. /* Initialize receive properties on connection establishment. NB: not __init! */
  461. void smc_rx_init(struct smc_sock *smc)
  462. {
  463. smc->sk.sk_data_ready = smc_rx_wake_up;
  464. atomic_set(&smc->conn.splice_pending, 0);
  465. smc->conn.urg_state = SMC_URG_READ;
  466. }