tcp_metrics.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/rcupdate.h>
  3. #include <linux/spinlock.h>
  4. #include <linux/jiffies.h>
  5. #include <linux/module.h>
  6. #include <linux/cache.h>
  7. #include <linux/slab.h>
  8. #include <linux/init.h>
  9. #include <linux/tcp.h>
  10. #include <linux/hash.h>
  11. #include <linux/tcp_metrics.h>
  12. #include <linux/vmalloc.h>
  13. #include <net/inet_connection_sock.h>
  14. #include <net/net_namespace.h>
  15. #include <net/request_sock.h>
  16. #include <net/inetpeer.h>
  17. #include <net/sock.h>
  18. #include <net/ipv6.h>
  19. #include <net/dst.h>
  20. #include <net/tcp.h>
  21. #include <net/genetlink.h>
  22. static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
  23. const struct inetpeer_addr *daddr,
  24. struct net *net, unsigned int hash);
  25. struct tcp_fastopen_metrics {
  26. u16 mss;
  27. u16 syn_loss:10, /* Recurring Fast Open SYN losses */
  28. try_exp:2; /* Request w/ exp. option (once) */
  29. unsigned long last_syn_loss; /* Last Fast Open SYN loss */
  30. struct tcp_fastopen_cookie cookie;
  31. };
  32. /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
  33. * Kernel only stores RTT and RTTVAR in usec resolution
  34. */
  35. #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
  36. struct tcp_metrics_block {
  37. struct tcp_metrics_block __rcu *tcpm_next;
  38. struct net *tcpm_net;
  39. struct inetpeer_addr tcpm_saddr;
  40. struct inetpeer_addr tcpm_daddr;
  41. unsigned long tcpm_stamp;
  42. u32 tcpm_lock;
  43. u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
  44. struct tcp_fastopen_metrics tcpm_fastopen;
  45. struct rcu_head rcu_head;
  46. };
  47. static inline struct net *tm_net(const struct tcp_metrics_block *tm)
  48. {
  49. /* Paired with the WRITE_ONCE() in tcpm_new() */
  50. return READ_ONCE(tm->tcpm_net);
  51. }
  52. static bool tcp_metric_locked(struct tcp_metrics_block *tm,
  53. enum tcp_metric_index idx)
  54. {
  55. /* Paired with WRITE_ONCE() in tcpm_suck_dst() */
  56. return READ_ONCE(tm->tcpm_lock) & (1 << idx);
  57. }
  58. static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
  59. enum tcp_metric_index idx)
  60. {
  61. /* Paired with WRITE_ONCE() in tcp_metric_set() */
  62. return READ_ONCE(tm->tcpm_vals[idx]);
  63. }
  64. static void tcp_metric_set(struct tcp_metrics_block *tm,
  65. enum tcp_metric_index idx,
  66. u32 val)
  67. {
  68. /* Paired with READ_ONCE() in tcp_metric_get() */
  69. WRITE_ONCE(tm->tcpm_vals[idx], val);
  70. }
  71. static bool addr_same(const struct inetpeer_addr *a,
  72. const struct inetpeer_addr *b)
  73. {
  74. return (a->family == b->family) && !inetpeer_addr_cmp(a, b);
  75. }
  76. struct tcpm_hash_bucket {
  77. struct tcp_metrics_block __rcu *chain;
  78. };
  79. static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly;
  80. static unsigned int tcp_metrics_hash_log __read_mostly;
  81. static DEFINE_SPINLOCK(tcp_metrics_lock);
  82. static DEFINE_SEQLOCK(fastopen_seqlock);
  83. static void tcpm_suck_dst(struct tcp_metrics_block *tm,
  84. const struct dst_entry *dst,
  85. bool fastopen_clear)
  86. {
  87. u32 msval;
  88. u32 val;
  89. WRITE_ONCE(tm->tcpm_stamp, jiffies);
  90. val = 0;
  91. if (dst_metric_locked(dst, RTAX_RTT))
  92. val |= 1 << TCP_METRIC_RTT;
  93. if (dst_metric_locked(dst, RTAX_RTTVAR))
  94. val |= 1 << TCP_METRIC_RTTVAR;
  95. if (dst_metric_locked(dst, RTAX_SSTHRESH))
  96. val |= 1 << TCP_METRIC_SSTHRESH;
  97. if (dst_metric_locked(dst, RTAX_CWND))
  98. val |= 1 << TCP_METRIC_CWND;
  99. if (dst_metric_locked(dst, RTAX_REORDERING))
  100. val |= 1 << TCP_METRIC_REORDERING;
  101. /* Paired with READ_ONCE() in tcp_metric_locked() */
  102. WRITE_ONCE(tm->tcpm_lock, val);
  103. msval = dst_metric_raw(dst, RTAX_RTT);
  104. tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);
  105. msval = dst_metric_raw(dst, RTAX_RTTVAR);
  106. tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
  107. tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
  108. dst_metric_raw(dst, RTAX_SSTHRESH));
  109. tcp_metric_set(tm, TCP_METRIC_CWND,
  110. dst_metric_raw(dst, RTAX_CWND));
  111. tcp_metric_set(tm, TCP_METRIC_REORDERING,
  112. dst_metric_raw(dst, RTAX_REORDERING));
  113. if (fastopen_clear) {
  114. write_seqlock(&fastopen_seqlock);
  115. tm->tcpm_fastopen.mss = 0;
  116. tm->tcpm_fastopen.syn_loss = 0;
  117. tm->tcpm_fastopen.try_exp = 0;
  118. tm->tcpm_fastopen.cookie.exp = false;
  119. tm->tcpm_fastopen.cookie.len = 0;
  120. write_sequnlock(&fastopen_seqlock);
  121. }
  122. }
  123. #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
  124. static void tcpm_check_stamp(struct tcp_metrics_block *tm,
  125. const struct dst_entry *dst)
  126. {
  127. unsigned long limit;
  128. if (!tm)
  129. return;
  130. limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT;
  131. if (unlikely(time_after(jiffies, limit)))
  132. tcpm_suck_dst(tm, dst, false);
  133. }
  134. #define TCP_METRICS_RECLAIM_DEPTH 5
  135. #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
  136. #define deref_locked(p) \
  137. rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
  138. static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
  139. struct inetpeer_addr *saddr,
  140. struct inetpeer_addr *daddr,
  141. unsigned int hash)
  142. {
  143. struct tcp_metrics_block *tm;
  144. bool reclaim = false;
  145. struct net *net;
  146. spin_lock_bh(&tcp_metrics_lock);
  147. net = dev_net_rcu(dst_dev(dst));
  148. /* While waiting for the spin-lock the cache might have been populated
  149. * with this entry and so we have to check again.
  150. */
  151. tm = __tcp_get_metrics(saddr, daddr, net, hash);
  152. if (tm == TCP_METRICS_RECLAIM_PTR) {
  153. reclaim = true;
  154. tm = NULL;
  155. }
  156. if (tm) {
  157. tcpm_check_stamp(tm, dst);
  158. goto out_unlock;
  159. }
  160. if (unlikely(reclaim)) {
  161. struct tcp_metrics_block *oldest;
  162. oldest = deref_locked(tcp_metrics_hash[hash].chain);
  163. for (tm = deref_locked(oldest->tcpm_next); tm;
  164. tm = deref_locked(tm->tcpm_next)) {
  165. if (time_before(READ_ONCE(tm->tcpm_stamp),
  166. READ_ONCE(oldest->tcpm_stamp)))
  167. oldest = tm;
  168. }
  169. tm = oldest;
  170. } else {
  171. tm = kzalloc(sizeof(*tm), GFP_ATOMIC);
  172. if (!tm)
  173. goto out_unlock;
  174. }
  175. /* Paired with the READ_ONCE() in tm_net() */
  176. WRITE_ONCE(tm->tcpm_net, net);
  177. tm->tcpm_saddr = *saddr;
  178. tm->tcpm_daddr = *daddr;
  179. tcpm_suck_dst(tm, dst, reclaim);
  180. if (likely(!reclaim)) {
  181. tm->tcpm_next = tcp_metrics_hash[hash].chain;
  182. rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
  183. }
  184. out_unlock:
  185. spin_unlock_bh(&tcp_metrics_lock);
  186. return tm;
  187. }
  188. static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
  189. {
  190. if (tm)
  191. return tm;
  192. if (depth > TCP_METRICS_RECLAIM_DEPTH)
  193. return TCP_METRICS_RECLAIM_PTR;
  194. return NULL;
  195. }
  196. static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
  197. const struct inetpeer_addr *daddr,
  198. struct net *net, unsigned int hash)
  199. {
  200. struct tcp_metrics_block *tm;
  201. int depth = 0;
  202. for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
  203. tm = rcu_dereference(tm->tcpm_next)) {
  204. if (addr_same(&tm->tcpm_saddr, saddr) &&
  205. addr_same(&tm->tcpm_daddr, daddr) &&
  206. net_eq(tm_net(tm), net))
  207. break;
  208. depth++;
  209. }
  210. return tcp_get_encode(tm, depth);
  211. }
  212. static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
  213. struct dst_entry *dst)
  214. {
  215. struct tcp_metrics_block *tm;
  216. struct inetpeer_addr saddr, daddr;
  217. unsigned int hash;
  218. struct net *net;
  219. saddr.family = req->rsk_ops->family;
  220. daddr.family = req->rsk_ops->family;
  221. switch (daddr.family) {
  222. case AF_INET:
  223. inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
  224. inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
  225. hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
  226. break;
  227. #if IS_ENABLED(CONFIG_IPV6)
  228. case AF_INET6:
  229. inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
  230. inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
  231. hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
  232. break;
  233. #endif
  234. default:
  235. return NULL;
  236. }
  237. net = dev_net_rcu(dst_dev(dst));
  238. hash ^= net_hash_mix(net);
  239. hash = hash_32(hash, tcp_metrics_hash_log);
  240. for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
  241. tm = rcu_dereference(tm->tcpm_next)) {
  242. if (addr_same(&tm->tcpm_saddr, &saddr) &&
  243. addr_same(&tm->tcpm_daddr, &daddr) &&
  244. net_eq(tm_net(tm), net))
  245. break;
  246. }
  247. tcpm_check_stamp(tm, dst);
  248. return tm;
  249. }
  250. static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
  251. struct dst_entry *dst,
  252. bool create)
  253. {
  254. struct tcp_metrics_block *tm;
  255. struct inetpeer_addr saddr, daddr;
  256. unsigned int hash;
  257. struct net *net;
  258. if (sk->sk_family == AF_INET) {
  259. inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
  260. inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
  261. hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
  262. }
  263. #if IS_ENABLED(CONFIG_IPV6)
  264. else if (sk->sk_family == AF_INET6) {
  265. if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
  266. inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
  267. inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
  268. hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
  269. } else {
  270. inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
  271. inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
  272. hash = ipv6_addr_hash(&sk->sk_v6_daddr);
  273. }
  274. }
  275. #endif
  276. else
  277. return NULL;
  278. net = dev_net_rcu(dst_dev(dst));
  279. hash ^= net_hash_mix(net);
  280. hash = hash_32(hash, tcp_metrics_hash_log);
  281. tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
  282. if (tm == TCP_METRICS_RECLAIM_PTR)
  283. tm = NULL;
  284. if (!tm && create)
  285. tm = tcpm_new(dst, &saddr, &daddr, hash);
  286. else
  287. tcpm_check_stamp(tm, dst);
  288. return tm;
  289. }
  290. /* Save metrics learned by this TCP session. This function is called
  291. * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
  292. * or goes from LAST-ACK to CLOSE.
  293. */
  294. void tcp_update_metrics(struct sock *sk)
  295. {
  296. const struct inet_connection_sock *icsk = inet_csk(sk);
  297. struct dst_entry *dst = __sk_dst_get(sk);
  298. struct tcp_sock *tp = tcp_sk(sk);
  299. struct net *net = sock_net(sk);
  300. struct tcp_metrics_block *tm;
  301. unsigned long rtt;
  302. u32 val;
  303. int m;
  304. sk_dst_confirm(sk);
  305. if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
  306. return;
  307. rcu_read_lock();
  308. if (icsk->icsk_backoff || !tp->srtt_us) {
  309. /* This session failed to estimate rtt. Why?
  310. * Probably, no packets returned in time. Reset our
  311. * results.
  312. */
  313. tm = tcp_get_metrics(sk, dst, false);
  314. if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
  315. tcp_metric_set(tm, TCP_METRIC_RTT, 0);
  316. goto out_unlock;
  317. } else
  318. tm = tcp_get_metrics(sk, dst, true);
  319. if (!tm)
  320. goto out_unlock;
  321. rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
  322. m = rtt - tp->srtt_us;
  323. /* If newly calculated rtt larger than stored one, store new
  324. * one. Otherwise, use EWMA. Remember, rtt overestimation is
  325. * always better than underestimation.
  326. */
  327. if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
  328. if (m <= 0)
  329. rtt = tp->srtt_us;
  330. else
  331. rtt -= (m >> 3);
  332. tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
  333. }
  334. if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
  335. unsigned long var;
  336. if (m < 0)
  337. m = -m;
  338. /* Scale deviation to rttvar fixed point */
  339. m >>= 1;
  340. if (m < tp->mdev_us)
  341. m = tp->mdev_us;
  342. var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
  343. if (m >= var)
  344. var = m;
  345. else
  346. var -= (var - m) >> 2;
  347. tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
  348. }
  349. if (tcp_in_initial_slowstart(tp)) {
  350. /* Slow start still did not finish. */
  351. if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
  352. !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
  353. val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
  354. if (val && (tcp_snd_cwnd(tp) >> 1) > val)
  355. tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
  356. tcp_snd_cwnd(tp) >> 1);
  357. }
  358. if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
  359. val = tcp_metric_get(tm, TCP_METRIC_CWND);
  360. if (tcp_snd_cwnd(tp) > val)
  361. tcp_metric_set(tm, TCP_METRIC_CWND,
  362. tcp_snd_cwnd(tp));
  363. }
  364. } else if (!tcp_in_slow_start(tp) &&
  365. icsk->icsk_ca_state == TCP_CA_Open) {
  366. /* Cong. avoidance phase, cwnd is reliable. */
  367. if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
  368. !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
  369. tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
  370. max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));
  371. if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
  372. val = tcp_metric_get(tm, TCP_METRIC_CWND);
  373. tcp_metric_set(tm, TCP_METRIC_CWND, (val + tcp_snd_cwnd(tp)) >> 1);
  374. }
  375. } else {
  376. /* Else slow start did not finish, cwnd is non-sense,
  377. * ssthresh may be also invalid.
  378. */
  379. if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
  380. val = tcp_metric_get(tm, TCP_METRIC_CWND);
  381. tcp_metric_set(tm, TCP_METRIC_CWND,
  382. (val + tp->snd_ssthresh) >> 1);
  383. }
  384. if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
  385. !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
  386. val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
  387. if (val && tp->snd_ssthresh > val)
  388. tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
  389. tp->snd_ssthresh);
  390. }
  391. if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
  392. val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
  393. if (val < tp->reordering &&
  394. tp->reordering !=
  395. READ_ONCE(net->ipv4.sysctl_tcp_reordering))
  396. tcp_metric_set(tm, TCP_METRIC_REORDERING,
  397. tp->reordering);
  398. }
  399. }
  400. WRITE_ONCE(tm->tcpm_stamp, jiffies);
  401. out_unlock:
  402. rcu_read_unlock();
  403. }
  404. /* Initialize metrics on socket. */
  405. void tcp_init_metrics(struct sock *sk)
  406. {
  407. struct dst_entry *dst = __sk_dst_get(sk);
  408. struct tcp_sock *tp = tcp_sk(sk);
  409. struct net *net = sock_net(sk);
  410. struct tcp_metrics_block *tm;
  411. u32 val, crtt = 0; /* cached RTT scaled by 8 */
  412. sk_dst_confirm(sk);
  413. /* ssthresh may have been reduced unnecessarily during.
  414. * 3WHS. Restore it back to its initial default.
  415. */
  416. tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
  417. if (!dst)
  418. goto reset;
  419. rcu_read_lock();
  420. tm = tcp_get_metrics(sk, dst, false);
  421. if (!tm) {
  422. rcu_read_unlock();
  423. goto reset;
  424. }
  425. if (tcp_metric_locked(tm, TCP_METRIC_CWND))
  426. tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
  427. val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ?
  428. 0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
  429. if (val) {
  430. tp->snd_ssthresh = val;
  431. if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
  432. tp->snd_ssthresh = tp->snd_cwnd_clamp;
  433. }
  434. val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
  435. if (val && tp->reordering != val)
  436. tp->reordering = val;
  437. crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
  438. rcu_read_unlock();
  439. reset:
  440. /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
  441. * to seed the RTO for later data packets because SYN packets are
  442. * small. Use the per-dst cached values to seed the RTO but keep
  443. * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
  444. * Later the RTO will be updated immediately upon obtaining the first
  445. * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
  446. * influences the first RTO but not later RTT estimation.
  447. *
  448. * But if RTT is not available from the SYN (due to retransmits or
  449. * syn cookies) or the cache, force a conservative 3secs timeout.
  450. *
  451. * A bit of theory. RTT is time passed after "normal" sized packet
  452. * is sent until it is ACKed. In normal circumstances sending small
  453. * packets force peer to delay ACKs and calculation is correct too.
  454. * The algorithm is adaptive and, provided we follow specs, it
  455. * NEVER underestimate RTT. BUT! If peer tries to make some clever
  456. * tricks sort of "quick acks" for time long enough to decrease RTT
  457. * to low value, and then abruptly stops to do it and starts to delay
  458. * ACKs, wait for troubles.
  459. */
  460. if (crtt > tp->srtt_us) {
  461. /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
  462. crtt /= 8 * USEC_PER_SEC / HZ;
  463. inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
  464. } else if (tp->srtt_us == 0) {
  465. /* RFC6298: 5.7 We've failed to get a valid RTT sample from
  466. * 3WHS. This is most likely due to retransmission,
  467. * including spurious one. Reset the RTO back to 3secs
  468. * from the more aggressive 1sec to avoid more spurious
  469. * retransmission.
  470. */
  471. tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
  472. tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
  473. inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
  474. }
  475. }
  476. bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
  477. {
  478. struct tcp_metrics_block *tm;
  479. bool ret;
  480. if (!dst)
  481. return false;
  482. rcu_read_lock();
  483. tm = __tcp_get_metrics_req(req, dst);
  484. if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
  485. ret = true;
  486. else
  487. ret = false;
  488. rcu_read_unlock();
  489. return ret;
  490. }
  491. void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
  492. struct tcp_fastopen_cookie *cookie)
  493. {
  494. struct tcp_metrics_block *tm;
  495. rcu_read_lock();
  496. tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
  497. if (tm) {
  498. struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
  499. unsigned int seq;
  500. do {
  501. seq = read_seqbegin(&fastopen_seqlock);
  502. if (tfom->mss)
  503. *mss = tfom->mss;
  504. *cookie = tfom->cookie;
  505. if (cookie->len <= 0 && tfom->try_exp == 1)
  506. cookie->exp = true;
  507. } while (read_seqretry(&fastopen_seqlock, seq));
  508. }
  509. rcu_read_unlock();
  510. }
  511. void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
  512. struct tcp_fastopen_cookie *cookie, bool syn_lost,
  513. u16 try_exp)
  514. {
  515. struct dst_entry *dst = __sk_dst_get(sk);
  516. struct tcp_metrics_block *tm;
  517. if (!dst)
  518. return;
  519. rcu_read_lock();
  520. tm = tcp_get_metrics(sk, dst, true);
  521. if (tm) {
  522. struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
  523. write_seqlock_bh(&fastopen_seqlock);
  524. if (mss)
  525. tfom->mss = mss;
  526. if (cookie && cookie->len > 0)
  527. tfom->cookie = *cookie;
  528. else if (try_exp > tfom->try_exp &&
  529. tfom->cookie.len <= 0 && !tfom->cookie.exp)
  530. tfom->try_exp = try_exp;
  531. if (syn_lost) {
  532. ++tfom->syn_loss;
  533. tfom->last_syn_loss = jiffies;
  534. } else
  535. tfom->syn_loss = 0;
  536. write_sequnlock_bh(&fastopen_seqlock);
  537. }
  538. rcu_read_unlock();
  539. }
  540. static struct genl_family tcp_metrics_nl_family;
  541. static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
  542. [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
  543. [TCP_METRICS_ATTR_ADDR_IPV6] =
  544. NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
  545. [TCP_METRICS_ATTR_SADDR_IPV4] = { .type = NLA_U32, },
  546. [TCP_METRICS_ATTR_SADDR_IPV6] =
  547. NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
  548. /* Following attributes are not received for GET/DEL,
  549. * we keep them for reference
  550. */
  551. #if 0
  552. [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
  553. [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
  554. [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
  555. [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
  556. [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
  557. [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
  558. [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
  559. [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
  560. .len = TCP_FASTOPEN_COOKIE_MAX, },
  561. #endif
  562. };
  563. /* Add attributes, caller cancels its header on failure */
  564. static int tcp_metrics_fill_info(struct sk_buff *msg,
  565. struct tcp_metrics_block *tm)
  566. {
  567. struct nlattr *nest;
  568. int i;
  569. switch (tm->tcpm_daddr.family) {
  570. case AF_INET:
  571. if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
  572. inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
  573. goto nla_put_failure;
  574. if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
  575. inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
  576. goto nla_put_failure;
  577. break;
  578. case AF_INET6:
  579. if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
  580. inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
  581. goto nla_put_failure;
  582. if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
  583. inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
  584. goto nla_put_failure;
  585. break;
  586. default:
  587. return -EAFNOSUPPORT;
  588. }
  589. if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
  590. jiffies - READ_ONCE(tm->tcpm_stamp),
  591. TCP_METRICS_ATTR_PAD) < 0)
  592. goto nla_put_failure;
  593. {
  594. int n = 0;
  595. nest = nla_nest_start_noflag(msg, TCP_METRICS_ATTR_VALS);
  596. if (!nest)
  597. goto nla_put_failure;
  598. for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
  599. u32 val = tcp_metric_get(tm, i);
  600. if (!val)
  601. continue;
  602. if (i == TCP_METRIC_RTT) {
  603. if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
  604. val) < 0)
  605. goto nla_put_failure;
  606. n++;
  607. val = max(val / 1000, 1U);
  608. }
  609. if (i == TCP_METRIC_RTTVAR) {
  610. if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
  611. val) < 0)
  612. goto nla_put_failure;
  613. n++;
  614. val = max(val / 1000, 1U);
  615. }
  616. if (nla_put_u32(msg, i + 1, val) < 0)
  617. goto nla_put_failure;
  618. n++;
  619. }
  620. if (n)
  621. nla_nest_end(msg, nest);
  622. else
  623. nla_nest_cancel(msg, nest);
  624. }
  625. {
  626. struct tcp_fastopen_metrics tfom_copy[1], *tfom;
  627. unsigned int seq;
  628. do {
  629. seq = read_seqbegin(&fastopen_seqlock);
  630. tfom_copy[0] = tm->tcpm_fastopen;
  631. } while (read_seqretry(&fastopen_seqlock, seq));
  632. tfom = tfom_copy;
  633. if (tfom->mss &&
  634. nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
  635. tfom->mss) < 0)
  636. goto nla_put_failure;
  637. if (tfom->syn_loss &&
  638. (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
  639. tfom->syn_loss) < 0 ||
  640. nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
  641. jiffies - tfom->last_syn_loss,
  642. TCP_METRICS_ATTR_PAD) < 0))
  643. goto nla_put_failure;
  644. if (tfom->cookie.len > 0 &&
  645. nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
  646. tfom->cookie.len, tfom->cookie.val) < 0)
  647. goto nla_put_failure;
  648. }
  649. return 0;
  650. nla_put_failure:
  651. return -EMSGSIZE;
  652. }
  653. static int tcp_metrics_dump_info(struct sk_buff *skb,
  654. struct netlink_callback *cb,
  655. struct tcp_metrics_block *tm)
  656. {
  657. void *hdr;
  658. hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  659. &tcp_metrics_nl_family, NLM_F_MULTI,
  660. TCP_METRICS_CMD_GET);
  661. if (!hdr)
  662. return -EMSGSIZE;
  663. if (tcp_metrics_fill_info(skb, tm) < 0)
  664. goto nla_put_failure;
  665. genlmsg_end(skb, hdr);
  666. return 0;
  667. nla_put_failure:
  668. genlmsg_cancel(skb, hdr);
  669. return -EMSGSIZE;
  670. }
  671. static int tcp_metrics_nl_dump(struct sk_buff *skb,
  672. struct netlink_callback *cb)
  673. {
  674. struct net *net = sock_net(skb->sk);
  675. unsigned int max_rows = 1U << tcp_metrics_hash_log;
  676. unsigned int row, s_row = cb->args[0];
  677. int s_col = cb->args[1], col = s_col;
  678. int res = 0;
  679. for (row = s_row; row < max_rows; row++, s_col = 0) {
  680. struct tcp_metrics_block *tm;
  681. struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
  682. rcu_read_lock();
  683. for (col = 0, tm = rcu_dereference(hb->chain); tm;
  684. tm = rcu_dereference(tm->tcpm_next), col++) {
  685. if (!net_eq(tm_net(tm), net))
  686. continue;
  687. if (col < s_col)
  688. continue;
  689. res = tcp_metrics_dump_info(skb, cb, tm);
  690. if (res < 0) {
  691. rcu_read_unlock();
  692. goto done;
  693. }
  694. }
  695. rcu_read_unlock();
  696. }
  697. done:
  698. cb->args[0] = row;
  699. cb->args[1] = col;
  700. return res;
  701. }
  702. static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
  703. unsigned int *hash, int optional, int v4, int v6)
  704. {
  705. struct nlattr *a;
  706. a = info->attrs[v4];
  707. if (a) {
  708. inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
  709. if (hash)
  710. *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
  711. return 0;
  712. }
  713. a = info->attrs[v6];
  714. if (a) {
  715. struct in6_addr in6;
  716. in6 = nla_get_in6_addr(a);
  717. inetpeer_set_addr_v6(addr, &in6);
  718. if (hash)
  719. *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
  720. return 0;
  721. }
  722. return optional ? 1 : -EAFNOSUPPORT;
  723. }
  724. static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
  725. unsigned int *hash, int optional)
  726. {
  727. return __parse_nl_addr(info, addr, hash, optional,
  728. TCP_METRICS_ATTR_ADDR_IPV4,
  729. TCP_METRICS_ATTR_ADDR_IPV6);
  730. }
  731. static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
  732. {
  733. return __parse_nl_addr(info, addr, NULL, 0,
  734. TCP_METRICS_ATTR_SADDR_IPV4,
  735. TCP_METRICS_ATTR_SADDR_IPV6);
  736. }
  737. static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
  738. {
  739. struct tcp_metrics_block *tm;
  740. struct inetpeer_addr saddr, daddr;
  741. unsigned int hash;
  742. struct sk_buff *msg;
  743. struct net *net = genl_info_net(info);
  744. void *reply;
  745. int ret;
  746. bool src = true;
  747. ret = parse_nl_addr(info, &daddr, &hash, 0);
  748. if (ret < 0)
  749. return ret;
  750. ret = parse_nl_saddr(info, &saddr);
  751. if (ret < 0)
  752. src = false;
  753. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  754. if (!msg)
  755. return -ENOMEM;
  756. reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
  757. info->genlhdr->cmd);
  758. if (!reply)
  759. goto nla_put_failure;
  760. hash ^= net_hash_mix(net);
  761. hash = hash_32(hash, tcp_metrics_hash_log);
  762. ret = -ESRCH;
  763. rcu_read_lock();
  764. for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
  765. tm = rcu_dereference(tm->tcpm_next)) {
  766. if (addr_same(&tm->tcpm_daddr, &daddr) &&
  767. (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
  768. net_eq(tm_net(tm), net)) {
  769. ret = tcp_metrics_fill_info(msg, tm);
  770. break;
  771. }
  772. }
  773. rcu_read_unlock();
  774. if (ret < 0)
  775. goto out_free;
  776. genlmsg_end(msg, reply);
  777. return genlmsg_reply(msg, info);
  778. nla_put_failure:
  779. ret = -EMSGSIZE;
  780. out_free:
  781. nlmsg_free(msg);
  782. return ret;
  783. }
  784. static void tcp_metrics_flush_all(struct net *net)
  785. {
  786. unsigned int max_rows = 1U << tcp_metrics_hash_log;
  787. struct tcpm_hash_bucket *hb = tcp_metrics_hash;
  788. struct tcp_metrics_block *tm;
  789. unsigned int row;
  790. for (row = 0; row < max_rows; row++, hb++) {
  791. struct tcp_metrics_block __rcu **pp = &hb->chain;
  792. bool match;
  793. if (!rcu_access_pointer(*pp))
  794. continue;
  795. spin_lock_bh(&tcp_metrics_lock);
  796. for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
  797. match = net ? net_eq(tm_net(tm), net) :
  798. !refcount_read(&tm_net(tm)->ns.count);
  799. if (match) {
  800. rcu_assign_pointer(*pp, tm->tcpm_next);
  801. kfree_rcu(tm, rcu_head);
  802. } else {
  803. pp = &tm->tcpm_next;
  804. }
  805. }
  806. spin_unlock_bh(&tcp_metrics_lock);
  807. cond_resched();
  808. }
  809. }
  810. static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
  811. {
  812. struct tcpm_hash_bucket *hb;
  813. struct tcp_metrics_block *tm;
  814. struct tcp_metrics_block __rcu **pp;
  815. struct inetpeer_addr saddr, daddr;
  816. unsigned int hash;
  817. struct net *net = genl_info_net(info);
  818. int ret;
  819. bool src = true, found = false;
  820. ret = parse_nl_addr(info, &daddr, &hash, 1);
  821. if (ret < 0)
  822. return ret;
  823. if (ret > 0) {
  824. tcp_metrics_flush_all(net);
  825. return 0;
  826. }
  827. ret = parse_nl_saddr(info, &saddr);
  828. if (ret < 0)
  829. src = false;
  830. hash ^= net_hash_mix(net);
  831. hash = hash_32(hash, tcp_metrics_hash_log);
  832. hb = tcp_metrics_hash + hash;
  833. pp = &hb->chain;
  834. spin_lock_bh(&tcp_metrics_lock);
  835. for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
  836. if (addr_same(&tm->tcpm_daddr, &daddr) &&
  837. (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
  838. net_eq(tm_net(tm), net)) {
  839. rcu_assign_pointer(*pp, tm->tcpm_next);
  840. kfree_rcu(tm, rcu_head);
  841. found = true;
  842. } else {
  843. pp = &tm->tcpm_next;
  844. }
  845. }
  846. spin_unlock_bh(&tcp_metrics_lock);
  847. if (!found)
  848. return -ESRCH;
  849. return 0;
  850. }
  851. static const struct genl_small_ops tcp_metrics_nl_ops[] = {
  852. {
  853. .cmd = TCP_METRICS_CMD_GET,
  854. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  855. .doit = tcp_metrics_nl_cmd_get,
  856. .dumpit = tcp_metrics_nl_dump,
  857. },
  858. {
  859. .cmd = TCP_METRICS_CMD_DEL,
  860. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  861. .doit = tcp_metrics_nl_cmd_del,
  862. .flags = GENL_ADMIN_PERM,
  863. },
  864. };
  865. static struct genl_family tcp_metrics_nl_family __ro_after_init = {
  866. .hdrsize = 0,
  867. .name = TCP_METRICS_GENL_NAME,
  868. .version = TCP_METRICS_GENL_VERSION,
  869. .maxattr = TCP_METRICS_ATTR_MAX,
  870. .policy = tcp_metrics_nl_policy,
  871. .netnsok = true,
  872. .parallel_ops = true,
  873. .module = THIS_MODULE,
  874. .small_ops = tcp_metrics_nl_ops,
  875. .n_small_ops = ARRAY_SIZE(tcp_metrics_nl_ops),
  876. .resv_start_op = TCP_METRICS_CMD_DEL + 1,
  877. };
  878. static unsigned int tcpmhash_entries __initdata;
  879. static int __init set_tcpmhash_entries(char *str)
  880. {
  881. ssize_t ret;
  882. if (!str)
  883. return 0;
  884. ret = kstrtouint(str, 0, &tcpmhash_entries);
  885. if (ret)
  886. return 0;
  887. return 1;
  888. }
  889. __setup("tcpmhash_entries=", set_tcpmhash_entries);
  890. static void __init tcp_metrics_hash_alloc(void)
  891. {
  892. unsigned int slots = tcpmhash_entries;
  893. size_t size;
  894. if (!slots) {
  895. if (totalram_pages() >= 128 * 1024)
  896. slots = 16 * 1024;
  897. else
  898. slots = 8 * 1024;
  899. }
  900. tcp_metrics_hash_log = order_base_2(slots);
  901. size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
  902. tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
  903. if (!tcp_metrics_hash)
  904. panic("Could not allocate the tcp_metrics hash table\n");
  905. }
  906. static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
  907. {
  908. tcp_metrics_flush_all(NULL);
  909. }
  910. static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
  911. .exit_batch = tcp_net_metrics_exit_batch,
  912. };
  913. void __init tcp_metrics_init(void)
  914. {
  915. int ret;
  916. tcp_metrics_hash_alloc();
  917. ret = register_pernet_subsys(&tcp_net_metrics_ops);
  918. if (ret < 0)
  919. panic("Could not register tcp_net_metrics_ops\n");
  920. ret = genl_register_family(&tcp_metrics_nl_family);
  921. if (ret < 0)
  922. panic("Could not register tcp_metrics generic netlink\n");
  923. }