smc_stats.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Shared Memory Communications over RDMA (SMC-R) and RoCE
  4. *
  5. * Macros for SMC statistics
  6. *
  7. * Copyright IBM Corp. 2021
  8. *
  9. * Author(s): Guvenc Gulce
  10. */
  11. #ifndef NET_SMC_SMC_STATS_H_
  12. #define NET_SMC_SMC_STATS_H_
  13. #include <linux/init.h>
  14. #include <linux/mutex.h>
  15. #include <linux/percpu.h>
  16. #include <linux/ctype.h>
  17. #include <linux/smc.h>
  18. #include "smc_clc.h"
  19. #define SMC_MAX_FBACK_RSN_CNT 36
  20. enum {
  21. SMC_BUF_8K,
  22. SMC_BUF_16K,
  23. SMC_BUF_32K,
  24. SMC_BUF_64K,
  25. SMC_BUF_128K,
  26. SMC_BUF_256K,
  27. SMC_BUF_512K,
  28. SMC_BUF_1024K,
  29. SMC_BUF_G_1024K,
  30. SMC_BUF_MAX,
  31. };
  32. struct smc_stats_fback {
  33. int fback_code;
  34. u16 count;
  35. };
  36. struct smc_stats_rsn {
  37. struct smc_stats_fback srv[SMC_MAX_FBACK_RSN_CNT];
  38. struct smc_stats_fback clnt[SMC_MAX_FBACK_RSN_CNT];
  39. u64 srv_fback_cnt;
  40. u64 clnt_fback_cnt;
  41. };
  42. struct smc_stats_rmbcnt {
  43. u64 buf_size_small_peer_cnt;
  44. u64 buf_size_small_cnt;
  45. u64 buf_full_peer_cnt;
  46. u64 buf_full_cnt;
  47. u64 reuse_cnt;
  48. u64 alloc_cnt;
  49. u64 dgrade_cnt;
  50. };
  51. struct smc_stats_memsize {
  52. u64 buf[SMC_BUF_MAX];
  53. };
  54. struct smc_stats_tech {
  55. struct smc_stats_memsize tx_rmbsize;
  56. struct smc_stats_memsize rx_rmbsize;
  57. struct smc_stats_memsize tx_pd;
  58. struct smc_stats_memsize rx_pd;
  59. struct smc_stats_rmbcnt rmb_tx;
  60. struct smc_stats_rmbcnt rmb_rx;
  61. u64 clnt_v1_succ_cnt;
  62. u64 clnt_v2_succ_cnt;
  63. u64 srv_v1_succ_cnt;
  64. u64 srv_v2_succ_cnt;
  65. u64 urg_data_cnt;
  66. u64 splice_cnt;
  67. u64 cork_cnt;
  68. u64 ndly_cnt;
  69. u64 rx_bytes;
  70. u64 tx_bytes;
  71. u64 rx_cnt;
  72. u64 tx_cnt;
  73. u64 rx_rmbuse;
  74. u64 tx_rmbuse;
  75. };
  76. struct smc_stats {
  77. struct smc_stats_tech smc[2];
  78. u64 clnt_hshake_err_cnt;
  79. u64 srv_hshake_err_cnt;
  80. };
  81. #define SMC_STAT_PAYLOAD_SUB(_smc_stats, _tech, key, _len, _rc) \
  82. do { \
  83. typeof(_smc_stats) stats = (_smc_stats); \
  84. typeof(_tech) t = (_tech); \
  85. typeof(_len) l = (_len); \
  86. int _pos; \
  87. typeof(_rc) r = (_rc); \
  88. int m = SMC_BUF_MAX - 1; \
  89. this_cpu_inc((*stats).smc[t].key ## _cnt); \
  90. if (r <= 0 || l <= 0) \
  91. break; \
  92. _pos = fls64((l - 1) >> 13); \
  93. _pos = (_pos <= m) ? _pos : m; \
  94. this_cpu_inc((*stats).smc[t].key ## _pd.buf[_pos]); \
  95. this_cpu_add((*stats).smc[t].key ## _bytes, r); \
  96. } \
  97. while (0)
  98. #define SMC_STAT_TX_PAYLOAD(_smc, length, rcode) \
  99. do { \
  100. typeof(_smc) __smc = _smc; \
  101. struct net *_net = sock_net(&__smc->sk); \
  102. struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
  103. typeof(length) _len = (length); \
  104. typeof(rcode) _rc = (rcode); \
  105. bool is_smcd = !__smc->conn.lnk; \
  106. if (is_smcd) \
  107. SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, tx, _len, _rc); \
  108. else \
  109. SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, tx, _len, _rc); \
  110. } \
  111. while (0)
  112. #define SMC_STAT_RX_PAYLOAD(_smc, length, rcode) \
  113. do { \
  114. typeof(_smc) __smc = _smc; \
  115. struct net *_net = sock_net(&__smc->sk); \
  116. struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
  117. typeof(length) _len = (length); \
  118. typeof(rcode) _rc = (rcode); \
  119. bool is_smcd = !__smc->conn.lnk; \
  120. if (is_smcd) \
  121. SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, rx, _len, _rc); \
  122. else \
  123. SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, rx, _len, _rc); \
  124. } \
  125. while (0)
  126. #define SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tech, k, _is_add, _len) \
  127. do { \
  128. typeof(_smc_stats) stats = (_smc_stats); \
  129. typeof(_is_add) is_a = (_is_add); \
  130. typeof(_len) _l = (_len); \
  131. typeof(_tech) t = (_tech); \
  132. int _pos; \
  133. int m = SMC_BUF_MAX - 1; \
  134. if (_l <= 0) \
  135. break; \
  136. if (is_a) { \
  137. _pos = fls((_l - 1) >> 13); \
  138. _pos = (_pos <= m) ? _pos : m; \
  139. this_cpu_inc((*stats).smc[t].k ## _rmbsize.buf[_pos]); \
  140. this_cpu_add((*stats).smc[t].k ## _rmbuse, _l); \
  141. } else { \
  142. this_cpu_sub((*stats).smc[t].k ## _rmbuse, _l); \
  143. } \
  144. } \
  145. while (0)
  146. #define SMC_STAT_RMB_SUB(_smc_stats, type, t, key) \
  147. this_cpu_inc((*(_smc_stats)).smc[t].rmb ## _ ## key.type ## _cnt)
  148. #define SMC_STAT_RMB_SIZE(_smc, _is_smcd, _is_rx, _is_add, _len) \
  149. do { \
  150. struct net *_net = sock_net(&(_smc)->sk); \
  151. struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
  152. typeof(_is_add) is_add = (_is_add); \
  153. typeof(_is_smcd) is_d = (_is_smcd); \
  154. typeof(_is_rx) is_r = (_is_rx); \
  155. typeof(_len) l = (_len); \
  156. if ((is_d) && (is_r)) \
  157. SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, rx, is_add, l); \
  158. if ((is_d) && !(is_r)) \
  159. SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, tx, is_add, l); \
  160. if (!(is_d) && (is_r)) \
  161. SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, rx, is_add, l); \
  162. if (!(is_d) && !(is_r)) \
  163. SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, tx, is_add, l); \
  164. } \
  165. while (0)
  166. #define SMC_STAT_RMB(_smc, type, _is_smcd, _is_rx) \
  167. do { \
  168. struct net *net = sock_net(&(_smc)->sk); \
  169. struct smc_stats __percpu *_smc_stats = net->smc.smc_stats; \
  170. typeof(_is_smcd) is_d = (_is_smcd); \
  171. typeof(_is_rx) is_r = (_is_rx); \
  172. if ((is_d) && (is_r)) \
  173. SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, rx); \
  174. if ((is_d) && !(is_r)) \
  175. SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, tx); \
  176. if (!(is_d) && (is_r)) \
  177. SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, rx); \
  178. if (!(is_d) && !(is_r)) \
  179. SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, tx); \
  180. } \
  181. while (0)
  182. #define SMC_STAT_BUF_REUSE(smc, is_smcd, is_rx) \
  183. SMC_STAT_RMB(smc, reuse, is_smcd, is_rx)
  184. #define SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rx) \
  185. SMC_STAT_RMB(smc, alloc, is_smcd, is_rx)
  186. #define SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rx) \
  187. SMC_STAT_RMB(smc, dgrade, is_smcd, is_rx)
  188. #define SMC_STAT_RMB_TX_PEER_FULL(smc, is_smcd) \
  189. SMC_STAT_RMB(smc, buf_full_peer, is_smcd, false)
  190. #define SMC_STAT_RMB_TX_FULL(smc, is_smcd) \
  191. SMC_STAT_RMB(smc, buf_full, is_smcd, false)
  192. #define SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, is_smcd) \
  193. SMC_STAT_RMB(smc, buf_size_small_peer, is_smcd, false)
  194. #define SMC_STAT_RMB_TX_SIZE_SMALL(smc, is_smcd) \
  195. SMC_STAT_RMB(smc, buf_size_small, is_smcd, false)
  196. #define SMC_STAT_RMB_RX_SIZE_SMALL(smc, is_smcd) \
  197. SMC_STAT_RMB(smc, buf_size_small, is_smcd, true)
  198. #define SMC_STAT_RMB_RX_FULL(smc, is_smcd) \
  199. SMC_STAT_RMB(smc, buf_full, is_smcd, true)
  200. #define SMC_STAT_INC(_smc, type) \
  201. do { \
  202. typeof(_smc) __smc = _smc; \
  203. bool is_smcd = !(__smc)->conn.lnk; \
  204. struct net *net = sock_net(&(__smc)->sk); \
  205. struct smc_stats __percpu *smc_stats = net->smc.smc_stats; \
  206. if ((is_smcd)) \
  207. this_cpu_inc(smc_stats->smc[SMC_TYPE_D].type); \
  208. else \
  209. this_cpu_inc(smc_stats->smc[SMC_TYPE_R].type); \
  210. } \
  211. while (0)
  212. #define SMC_STAT_CLNT_SUCC_INC(net, _aclc) \
  213. do { \
  214. typeof(_aclc) acl = (_aclc); \
  215. bool is_v2 = (acl->hdr.version == SMC_V2); \
  216. bool is_smcd = (acl->hdr.typev1 == SMC_TYPE_D); \
  217. struct smc_stats __percpu *smc_stats = (net)->smc.smc_stats; \
  218. if (is_v2 && is_smcd) \
  219. this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v2_succ_cnt); \
  220. else if (is_v2 && !is_smcd) \
  221. this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v2_succ_cnt); \
  222. else if (!is_v2 && is_smcd) \
  223. this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v1_succ_cnt); \
  224. else if (!is_v2 && !is_smcd) \
  225. this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v1_succ_cnt); \
  226. } \
  227. while (0)
  228. #define SMC_STAT_SERV_SUCC_INC(net, _ini) \
  229. do { \
  230. typeof(_ini) i = (_ini); \
  231. bool is_smcd = (i->is_smcd); \
  232. u8 version = is_smcd ? i->smcd_version : i->smcr_version; \
  233. bool is_v2 = (version & SMC_V2); \
  234. typeof(net->smc.smc_stats) smc_stats = (net)->smc.smc_stats; \
  235. if (is_v2 && is_smcd) \
  236. this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \
  237. else if (is_v2 && !is_smcd) \
  238. this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v2_succ_cnt); \
  239. else if (!is_v2 && is_smcd) \
  240. this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v1_succ_cnt); \
  241. else if (!is_v2 && !is_smcd) \
  242. this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v1_succ_cnt); \
  243. } \
  244. while (0)
  245. int smc_nl_get_stats(struct sk_buff *skb, struct netlink_callback *cb);
  246. int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb);
  247. int smc_stats_init(struct net *net);
  248. void smc_stats_exit(struct net *net);
  249. #endif /* NET_SMC_SMC_STATS_H_ */