vmci_transport_notify_qstate.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * VMware vSockets Driver
  4. *
  5. * Copyright (C) 2009-2013 VMware, Inc. All rights reserved.
  6. */
  7. #include <linux/types.h>
  8. #include <linux/socket.h>
  9. #include <linux/stddef.h>
  10. #include <net/sock.h>
  11. #include "vmci_transport_notify.h"
  12. #define PKT_FIELD(vsk, field_name) \
  13. (vmci_trans(vsk)->notify.pkt_q_state.field_name)
  14. static bool vmci_transport_notify_waiting_write(struct vsock_sock *vsk)
  15. {
  16. bool retval;
  17. u64 notify_limit;
  18. if (!PKT_FIELD(vsk, peer_waiting_write))
  19. return false;
  20. /* When the sender blocks, we take that as a sign that the sender is
  21. * faster than the receiver. To reduce the transmit rate of the sender,
  22. * we delay the sending of the read notification by decreasing the
  23. * write_notify_window. The notification is delayed until the number of
  24. * bytes used in the queue drops below the write_notify_window.
  25. */
  26. if (!PKT_FIELD(vsk, peer_waiting_write_detected)) {
  27. PKT_FIELD(vsk, peer_waiting_write_detected) = true;
  28. if (PKT_FIELD(vsk, write_notify_window) < PAGE_SIZE) {
  29. PKT_FIELD(vsk, write_notify_window) =
  30. PKT_FIELD(vsk, write_notify_min_window);
  31. } else {
  32. PKT_FIELD(vsk, write_notify_window) -= PAGE_SIZE;
  33. if (PKT_FIELD(vsk, write_notify_window) <
  34. PKT_FIELD(vsk, write_notify_min_window))
  35. PKT_FIELD(vsk, write_notify_window) =
  36. PKT_FIELD(vsk, write_notify_min_window);
  37. }
  38. }
  39. notify_limit = vmci_trans(vsk)->consume_size -
  40. PKT_FIELD(vsk, write_notify_window);
  41. /* The notify_limit is used to delay notifications in the case where
  42. * flow control is enabled. Below the test is expressed in terms of
  43. * free space in the queue: if free_space > ConsumeSize -
  44. * write_notify_window then notify An alternate way of expressing this
  45. * is to rewrite the expression to use the data ready in the receive
  46. * queue: if write_notify_window > bufferReady then notify as
  47. * free_space == ConsumeSize - bufferReady.
  48. */
  49. retval = vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair) >
  50. notify_limit;
  51. if (retval) {
  52. /* Once we notify the peer, we reset the detected flag so the
  53. * next wait will again cause a decrease in the window size.
  54. */
  55. PKT_FIELD(vsk, peer_waiting_write_detected) = false;
  56. }
  57. return retval;
  58. }
  59. static void
  60. vmci_transport_handle_read(struct sock *sk,
  61. struct vmci_transport_packet *pkt,
  62. bool bottom_half,
  63. struct sockaddr_vm *dst, struct sockaddr_vm *src)
  64. {
  65. sk->sk_write_space(sk);
  66. }
  67. static void
  68. vmci_transport_handle_wrote(struct sock *sk,
  69. struct vmci_transport_packet *pkt,
  70. bool bottom_half,
  71. struct sockaddr_vm *dst, struct sockaddr_vm *src)
  72. {
  73. vsock_data_ready(sk);
  74. }
  75. static void vsock_block_update_write_window(struct sock *sk)
  76. {
  77. struct vsock_sock *vsk = vsock_sk(sk);
  78. if (PKT_FIELD(vsk, write_notify_window) < vmci_trans(vsk)->consume_size)
  79. PKT_FIELD(vsk, write_notify_window) =
  80. min(PKT_FIELD(vsk, write_notify_window) + PAGE_SIZE,
  81. vmci_trans(vsk)->consume_size);
  82. }
  83. static int vmci_transport_send_read_notification(struct sock *sk)
  84. {
  85. struct vsock_sock *vsk;
  86. bool sent_read;
  87. unsigned int retries;
  88. int err;
  89. vsk = vsock_sk(sk);
  90. sent_read = false;
  91. retries = 0;
  92. err = 0;
  93. if (vmci_transport_notify_waiting_write(vsk)) {
  94. /* Notify the peer that we have read, retrying the send on
  95. * failure up to our maximum value. XXX For now we just log
  96. * the failure, but later we should schedule a work item to
  97. * handle the resend until it succeeds. That would require
  98. * keeping track of work items in the vsk and cleaning them up
  99. * upon socket close.
  100. */
  101. while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
  102. !sent_read &&
  103. retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
  104. err = vmci_transport_send_read(sk);
  105. if (err >= 0)
  106. sent_read = true;
  107. retries++;
  108. }
  109. if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS && !sent_read)
  110. pr_err("%p unable to send read notification to peer\n",
  111. sk);
  112. else
  113. PKT_FIELD(vsk, peer_waiting_write) = false;
  114. }
  115. return err;
  116. }
  117. static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
  118. {
  119. struct vsock_sock *vsk = vsock_sk(sk);
  120. PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
  121. PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
  122. PKT_FIELD(vsk, peer_waiting_write) = false;
  123. PKT_FIELD(vsk, peer_waiting_write_detected) = false;
  124. }
  125. static void vmci_transport_notify_pkt_socket_destruct(struct vsock_sock *vsk)
  126. {
  127. PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
  128. PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
  129. PKT_FIELD(vsk, peer_waiting_write) = false;
  130. PKT_FIELD(vsk, peer_waiting_write_detected) = false;
  131. }
  132. static int
  133. vmci_transport_notify_pkt_poll_in(struct sock *sk,
  134. size_t target, bool *data_ready_now)
  135. {
  136. struct vsock_sock *vsk = vsock_sk(sk);
  137. if (vsock_stream_has_data(vsk) >= target) {
  138. *data_ready_now = true;
  139. } else {
  140. /* We can't read right now because there is not enough data
  141. * in the queue. Ask for notifications when there is something
  142. * to read.
  143. */
  144. if (sk->sk_state == TCP_ESTABLISHED)
  145. vsock_block_update_write_window(sk);
  146. *data_ready_now = false;
  147. }
  148. return 0;
  149. }
  150. static int
  151. vmci_transport_notify_pkt_poll_out(struct sock *sk,
  152. size_t target, bool *space_avail_now)
  153. {
  154. s64 produce_q_free_space;
  155. struct vsock_sock *vsk = vsock_sk(sk);
  156. produce_q_free_space = vsock_stream_has_space(vsk);
  157. if (produce_q_free_space > 0) {
  158. *space_avail_now = true;
  159. return 0;
  160. } else if (produce_q_free_space == 0) {
  161. /* This is a connected socket but we can't currently send data.
  162. * Nothing else to do.
  163. */
  164. *space_avail_now = false;
  165. }
  166. return 0;
  167. }
  168. static int
  169. vmci_transport_notify_pkt_recv_init(
  170. struct sock *sk,
  171. size_t target,
  172. struct vmci_transport_recv_notify_data *data)
  173. {
  174. struct vsock_sock *vsk = vsock_sk(sk);
  175. data->consume_head = 0;
  176. data->produce_tail = 0;
  177. data->notify_on_block = false;
  178. if (PKT_FIELD(vsk, write_notify_min_window) < target + 1) {
  179. PKT_FIELD(vsk, write_notify_min_window) = target + 1;
  180. if (PKT_FIELD(vsk, write_notify_window) <
  181. PKT_FIELD(vsk, write_notify_min_window)) {
  182. /* If the current window is smaller than the new
  183. * minimal window size, we need to reevaluate whether
  184. * we need to notify the sender. If the number of ready
  185. * bytes are smaller than the new window, we need to
  186. * send a notification to the sender before we block.
  187. */
  188. PKT_FIELD(vsk, write_notify_window) =
  189. PKT_FIELD(vsk, write_notify_min_window);
  190. data->notify_on_block = true;
  191. }
  192. }
  193. return 0;
  194. }
  195. static int
  196. vmci_transport_notify_pkt_recv_pre_block(
  197. struct sock *sk,
  198. size_t target,
  199. struct vmci_transport_recv_notify_data *data)
  200. {
  201. int err = 0;
  202. vsock_block_update_write_window(sk);
  203. if (data->notify_on_block) {
  204. err = vmci_transport_send_read_notification(sk);
  205. if (err < 0)
  206. return err;
  207. data->notify_on_block = false;
  208. }
  209. return err;
  210. }
  211. static int
  212. vmci_transport_notify_pkt_recv_post_dequeue(
  213. struct sock *sk,
  214. size_t target,
  215. ssize_t copied,
  216. bool data_read,
  217. struct vmci_transport_recv_notify_data *data)
  218. {
  219. struct vsock_sock *vsk;
  220. int err;
  221. bool was_full = false;
  222. u64 free_space;
  223. vsk = vsock_sk(sk);
  224. err = 0;
  225. if (data_read) {
  226. smp_mb();
  227. free_space =
  228. vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair);
  229. was_full = free_space == copied;
  230. if (was_full)
  231. PKT_FIELD(vsk, peer_waiting_write) = true;
  232. err = vmci_transport_send_read_notification(sk);
  233. if (err < 0)
  234. return err;
  235. /* See the comment in
  236. * vmci_transport_notify_pkt_send_post_enqueue().
  237. */
  238. vsock_data_ready(sk);
  239. }
  240. return err;
  241. }
  242. static int
  243. vmci_transport_notify_pkt_send_init(
  244. struct sock *sk,
  245. struct vmci_transport_send_notify_data *data)
  246. {
  247. data->consume_head = 0;
  248. data->produce_tail = 0;
  249. return 0;
  250. }
  251. static int
  252. vmci_transport_notify_pkt_send_post_enqueue(
  253. struct sock *sk,
  254. ssize_t written,
  255. struct vmci_transport_send_notify_data *data)
  256. {
  257. int err = 0;
  258. struct vsock_sock *vsk;
  259. bool sent_wrote = false;
  260. bool was_empty;
  261. int retries = 0;
  262. vsk = vsock_sk(sk);
  263. smp_mb();
  264. was_empty =
  265. vmci_qpair_produce_buf_ready(vmci_trans(vsk)->qpair) == written;
  266. if (was_empty) {
  267. while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
  268. !sent_wrote &&
  269. retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
  270. err = vmci_transport_send_wrote(sk);
  271. if (err >= 0)
  272. sent_wrote = true;
  273. retries++;
  274. }
  275. }
  276. if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS && !sent_wrote) {
  277. pr_err("%p unable to send wrote notification to peer\n",
  278. sk);
  279. return err;
  280. }
  281. return err;
  282. }
  283. static void
  284. vmci_transport_notify_pkt_handle_pkt(
  285. struct sock *sk,
  286. struct vmci_transport_packet *pkt,
  287. bool bottom_half,
  288. struct sockaddr_vm *dst,
  289. struct sockaddr_vm *src, bool *pkt_processed)
  290. {
  291. bool processed = false;
  292. switch (pkt->type) {
  293. case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
  294. vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src);
  295. processed = true;
  296. break;
  297. case VMCI_TRANSPORT_PACKET_TYPE_READ:
  298. vmci_transport_handle_read(sk, pkt, bottom_half, dst, src);
  299. processed = true;
  300. break;
  301. }
  302. if (pkt_processed)
  303. *pkt_processed = processed;
  304. }
  305. static void vmci_transport_notify_pkt_process_request(struct sock *sk)
  306. {
  307. struct vsock_sock *vsk = vsock_sk(sk);
  308. PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
  309. if (vmci_trans(vsk)->consume_size <
  310. PKT_FIELD(vsk, write_notify_min_window))
  311. PKT_FIELD(vsk, write_notify_min_window) =
  312. vmci_trans(vsk)->consume_size;
  313. }
  314. static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
  315. {
  316. struct vsock_sock *vsk = vsock_sk(sk);
  317. PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
  318. if (vmci_trans(vsk)->consume_size <
  319. PKT_FIELD(vsk, write_notify_min_window))
  320. PKT_FIELD(vsk, write_notify_min_window) =
  321. vmci_trans(vsk)->consume_size;
  322. }
  323. static int
  324. vmci_transport_notify_pkt_recv_pre_dequeue(
  325. struct sock *sk,
  326. size_t target,
  327. struct vmci_transport_recv_notify_data *data)
  328. {
  329. return 0; /* NOP for QState. */
  330. }
  331. static int
  332. vmci_transport_notify_pkt_send_pre_block(
  333. struct sock *sk,
  334. struct vmci_transport_send_notify_data *data)
  335. {
  336. return 0; /* NOP for QState. */
  337. }
  338. static int
  339. vmci_transport_notify_pkt_send_pre_enqueue(
  340. struct sock *sk,
  341. struct vmci_transport_send_notify_data *data)
  342. {
  343. return 0; /* NOP for QState. */
  344. }
  345. /* Socket always on control packet based operations. */
  346. const struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops = {
  347. .socket_init = vmci_transport_notify_pkt_socket_init,
  348. .socket_destruct = vmci_transport_notify_pkt_socket_destruct,
  349. .poll_in = vmci_transport_notify_pkt_poll_in,
  350. .poll_out = vmci_transport_notify_pkt_poll_out,
  351. .handle_notify_pkt = vmci_transport_notify_pkt_handle_pkt,
  352. .recv_init = vmci_transport_notify_pkt_recv_init,
  353. .recv_pre_block = vmci_transport_notify_pkt_recv_pre_block,
  354. .recv_pre_dequeue = vmci_transport_notify_pkt_recv_pre_dequeue,
  355. .recv_post_dequeue = vmci_transport_notify_pkt_recv_post_dequeue,
  356. .send_init = vmci_transport_notify_pkt_send_init,
  357. .send_pre_block = vmci_transport_notify_pkt_send_pre_block,
  358. .send_pre_enqueue = vmci_transport_notify_pkt_send_pre_enqueue,
  359. .send_post_enqueue = vmci_transport_notify_pkt_send_post_enqueue,
  360. .process_request = vmci_transport_notify_pkt_process_request,
  361. .process_negotiate = vmci_transport_notify_pkt_process_negotiate,
  362. };