net-procfs.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/netdevice.h>
  3. #include <linux/proc_fs.h>
  4. #include <linux/seq_file.h>
  5. #include <net/wext.h>
  6. #include <net/hotdata.h>
  7. #include "dev.h"
  8. static void *dev_seq_from_index(struct seq_file *seq, loff_t *pos)
  9. {
  10. unsigned long ifindex = *pos;
  11. struct net_device *dev;
  12. for_each_netdev_dump(seq_file_net(seq), dev, ifindex) {
  13. *pos = dev->ifindex;
  14. return dev;
  15. }
  16. return NULL;
  17. }
  18. static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
  19. __acquires(RCU)
  20. {
  21. rcu_read_lock();
  22. if (!*pos)
  23. return SEQ_START_TOKEN;
  24. return dev_seq_from_index(seq, pos);
  25. }
  26. static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  27. {
  28. ++*pos;
  29. return dev_seq_from_index(seq, pos);
  30. }
  31. static void dev_seq_stop(struct seq_file *seq, void *v)
  32. __releases(RCU)
  33. {
  34. rcu_read_unlock();
  35. }
  36. static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
  37. {
  38. struct rtnl_link_stats64 temp;
  39. const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
  40. seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
  41. "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
  42. dev->name, stats->rx_bytes, stats->rx_packets,
  43. stats->rx_errors,
  44. stats->rx_dropped + stats->rx_missed_errors,
  45. stats->rx_fifo_errors,
  46. stats->rx_length_errors + stats->rx_over_errors +
  47. stats->rx_crc_errors + stats->rx_frame_errors,
  48. stats->rx_compressed, stats->multicast,
  49. stats->tx_bytes, stats->tx_packets,
  50. stats->tx_errors, stats->tx_dropped,
  51. stats->tx_fifo_errors, stats->collisions,
  52. stats->tx_carrier_errors +
  53. stats->tx_aborted_errors +
  54. stats->tx_window_errors +
  55. stats->tx_heartbeat_errors,
  56. stats->tx_compressed);
  57. }
  58. /*
  59. * Called from the PROCfs module. This now uses the new arbitrary sized
  60. * /proc/net interface to create /proc/net/dev
  61. */
  62. static int dev_seq_show(struct seq_file *seq, void *v)
  63. {
  64. if (v == SEQ_START_TOKEN)
  65. seq_puts(seq, "Inter-| Receive "
  66. " | Transmit\n"
  67. " face |bytes packets errs drop fifo frame "
  68. "compressed multicast|bytes packets errs "
  69. "drop fifo colls carrier compressed\n");
  70. else
  71. dev_seq_printf_stats(seq, v);
  72. return 0;
  73. }
  74. static u32 softnet_input_pkt_queue_len(struct softnet_data *sd)
  75. {
  76. return skb_queue_len_lockless(&sd->input_pkt_queue);
  77. }
  78. static u32 softnet_process_queue_len(struct softnet_data *sd)
  79. {
  80. return skb_queue_len_lockless(&sd->process_queue);
  81. }
  82. static struct softnet_data *softnet_get_online(loff_t *pos)
  83. {
  84. struct softnet_data *sd = NULL;
  85. while (*pos < nr_cpu_ids)
  86. if (cpu_online(*pos)) {
  87. sd = &per_cpu(softnet_data, *pos);
  88. break;
  89. } else
  90. ++*pos;
  91. return sd;
  92. }
  93. static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
  94. {
  95. return softnet_get_online(pos);
  96. }
  97. static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  98. {
  99. ++*pos;
  100. return softnet_get_online(pos);
  101. }
  102. static void softnet_seq_stop(struct seq_file *seq, void *v)
  103. {
  104. }
  105. static int softnet_seq_show(struct seq_file *seq, void *v)
  106. {
  107. struct softnet_data *sd = v;
  108. u32 input_qlen = softnet_input_pkt_queue_len(sd);
  109. u32 process_qlen = softnet_process_queue_len(sd);
  110. unsigned int flow_limit_count = 0;
  111. #ifdef CONFIG_NET_FLOW_LIMIT
  112. struct sd_flow_limit *fl;
  113. rcu_read_lock();
  114. fl = rcu_dereference(sd->flow_limit);
  115. if (fl)
  116. flow_limit_count = fl->count;
  117. rcu_read_unlock();
  118. #endif
  119. /* the index is the CPU id owing this sd. Since offline CPUs are not
  120. * displayed, it would be othrwise not trivial for the user-space
  121. * mapping the data a specific CPU
  122. */
  123. seq_printf(seq,
  124. "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x "
  125. "%08x %08x\n",
  126. sd->processed, atomic_read(&sd->dropped),
  127. sd->time_squeeze, 0,
  128. 0, 0, 0, 0, /* was fastroute */
  129. 0, /* was cpu_collision */
  130. sd->received_rps, flow_limit_count,
  131. input_qlen + process_qlen, (int)seq->index,
  132. input_qlen, process_qlen);
  133. return 0;
  134. }
  135. static const struct seq_operations dev_seq_ops = {
  136. .start = dev_seq_start,
  137. .next = dev_seq_next,
  138. .stop = dev_seq_stop,
  139. .show = dev_seq_show,
  140. };
  141. static const struct seq_operations softnet_seq_ops = {
  142. .start = softnet_seq_start,
  143. .next = softnet_seq_next,
  144. .stop = softnet_seq_stop,
  145. .show = softnet_seq_show,
  146. };
  147. static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
  148. {
  149. struct list_head *ptype_list = NULL;
  150. struct packet_type *pt = NULL;
  151. struct net_device *dev;
  152. loff_t i = 0;
  153. int t;
  154. for_each_netdev_rcu(seq_file_net(seq), dev) {
  155. ptype_list = &dev->ptype_all;
  156. list_for_each_entry_rcu(pt, ptype_list, list) {
  157. if (i == pos)
  158. return pt;
  159. ++i;
  160. }
  161. }
  162. list_for_each_entry_rcu(pt, &net_hotdata.ptype_all, list) {
  163. if (i == pos)
  164. return pt;
  165. ++i;
  166. }
  167. for (t = 0; t < PTYPE_HASH_SIZE; t++) {
  168. list_for_each_entry_rcu(pt, &ptype_base[t], list) {
  169. if (i == pos)
  170. return pt;
  171. ++i;
  172. }
  173. }
  174. return NULL;
  175. }
  176. static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
  177. __acquires(RCU)
  178. {
  179. rcu_read_lock();
  180. return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  181. }
  182. static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  183. {
  184. struct net_device *dev;
  185. struct packet_type *pt;
  186. struct list_head *nxt;
  187. int hash;
  188. ++*pos;
  189. if (v == SEQ_START_TOKEN)
  190. return ptype_get_idx(seq, 0);
  191. pt = v;
  192. nxt = pt->list.next;
  193. if (pt->dev) {
  194. if (nxt != &pt->dev->ptype_all)
  195. goto found;
  196. dev = pt->dev;
  197. for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
  198. if (!list_empty(&dev->ptype_all)) {
  199. nxt = dev->ptype_all.next;
  200. goto found;
  201. }
  202. }
  203. nxt = net_hotdata.ptype_all.next;
  204. goto ptype_all;
  205. }
  206. if (pt->type == htons(ETH_P_ALL)) {
  207. ptype_all:
  208. if (nxt != &net_hotdata.ptype_all)
  209. goto found;
  210. hash = 0;
  211. nxt = ptype_base[0].next;
  212. } else
  213. hash = ntohs(pt->type) & PTYPE_HASH_MASK;
  214. while (nxt == &ptype_base[hash]) {
  215. if (++hash >= PTYPE_HASH_SIZE)
  216. return NULL;
  217. nxt = ptype_base[hash].next;
  218. }
  219. found:
  220. return list_entry(nxt, struct packet_type, list);
  221. }
  222. static void ptype_seq_stop(struct seq_file *seq, void *v)
  223. __releases(RCU)
  224. {
  225. rcu_read_unlock();
  226. }
  227. static int ptype_seq_show(struct seq_file *seq, void *v)
  228. {
  229. struct packet_type *pt = v;
  230. if (v == SEQ_START_TOKEN)
  231. seq_puts(seq, "Type Device Function\n");
  232. else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
  233. (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
  234. if (pt->type == htons(ETH_P_ALL))
  235. seq_puts(seq, "ALL ");
  236. else
  237. seq_printf(seq, "%04x", ntohs(pt->type));
  238. seq_printf(seq, " %-8s %ps\n",
  239. pt->dev ? pt->dev->name : "", pt->func);
  240. }
  241. return 0;
  242. }
  243. static const struct seq_operations ptype_seq_ops = {
  244. .start = ptype_seq_start,
  245. .next = ptype_seq_next,
  246. .stop = ptype_seq_stop,
  247. .show = ptype_seq_show,
  248. };
  249. static int __net_init dev_proc_net_init(struct net *net)
  250. {
  251. int rc = -ENOMEM;
  252. if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops,
  253. sizeof(struct seq_net_private)))
  254. goto out;
  255. if (!proc_create_seq("softnet_stat", 0444, net->proc_net,
  256. &softnet_seq_ops))
  257. goto out_dev;
  258. if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
  259. sizeof(struct seq_net_private)))
  260. goto out_softnet;
  261. if (wext_proc_init(net))
  262. goto out_ptype;
  263. rc = 0;
  264. out:
  265. return rc;
  266. out_ptype:
  267. remove_proc_entry("ptype", net->proc_net);
  268. out_softnet:
  269. remove_proc_entry("softnet_stat", net->proc_net);
  270. out_dev:
  271. remove_proc_entry("dev", net->proc_net);
  272. goto out;
  273. }
  274. static void __net_exit dev_proc_net_exit(struct net *net)
  275. {
  276. wext_proc_exit(net);
  277. remove_proc_entry("ptype", net->proc_net);
  278. remove_proc_entry("softnet_stat", net->proc_net);
  279. remove_proc_entry("dev", net->proc_net);
  280. }
  281. static struct pernet_operations __net_initdata dev_proc_ops = {
  282. .init = dev_proc_net_init,
  283. .exit = dev_proc_net_exit,
  284. };
  285. static int dev_mc_seq_show(struct seq_file *seq, void *v)
  286. {
  287. struct netdev_hw_addr *ha;
  288. struct net_device *dev = v;
  289. if (v == SEQ_START_TOKEN)
  290. return 0;
  291. netif_addr_lock_bh(dev);
  292. netdev_for_each_mc_addr(ha, dev) {
  293. seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
  294. dev->ifindex, dev->name,
  295. ha->refcount, ha->global_use,
  296. (int)dev->addr_len, ha->addr);
  297. }
  298. netif_addr_unlock_bh(dev);
  299. return 0;
  300. }
  301. static const struct seq_operations dev_mc_seq_ops = {
  302. .start = dev_seq_start,
  303. .next = dev_seq_next,
  304. .stop = dev_seq_stop,
  305. .show = dev_mc_seq_show,
  306. };
  307. static int __net_init dev_mc_net_init(struct net *net)
  308. {
  309. if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops,
  310. sizeof(struct seq_net_private)))
  311. return -ENOMEM;
  312. return 0;
  313. }
  314. static void __net_exit dev_mc_net_exit(struct net *net)
  315. {
  316. remove_proc_entry("dev_mcast", net->proc_net);
  317. }
  318. static struct pernet_operations __net_initdata dev_mc_net_ops = {
  319. .init = dev_mc_net_init,
  320. .exit = dev_mc_net_exit,
  321. };
  322. int __init dev_proc_init(void)
  323. {
  324. int ret = register_pernet_subsys(&dev_proc_ops);
  325. if (!ret)
  326. return register_pernet_subsys(&dev_mc_net_ops);
  327. return ret;
  328. }