dev.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. #ifndef _NET_CORE_DEV_H
  3. #define _NET_CORE_DEV_H
  4. #include <linux/types.h>
  5. #include <linux/rwsem.h>
  6. #include <linux/netdevice.h>
  7. struct net;
  8. struct netlink_ext_ack;
  9. struct cpumask;
  10. /* Random bits of netdevice that don't need to be exposed */
  11. #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
  12. struct sd_flow_limit {
  13. u64 count;
  14. unsigned int num_buckets;
  15. unsigned int history_head;
  16. u16 history[FLOW_LIMIT_HISTORY];
  17. u8 buckets[];
  18. };
  19. extern int netdev_flow_limit_table_len;
  20. struct napi_struct *netdev_napi_by_id(struct net *net, unsigned int napi_id);
  21. #ifdef CONFIG_PROC_FS
  22. int __init dev_proc_init(void);
  23. #else
  24. #define dev_proc_init() 0
  25. #endif
  26. void linkwatch_init_dev(struct net_device *dev);
  27. void linkwatch_run_queue(void);
  28. void dev_addr_flush(struct net_device *dev);
  29. int dev_addr_init(struct net_device *dev);
  30. void dev_addr_check(struct net_device *dev);
  31. /* sysctls not referred to from outside net/core/ */
  32. extern int netdev_unregister_timeout_secs;
  33. extern int weight_p;
  34. extern int dev_weight_rx_bias;
  35. extern int dev_weight_tx_bias;
  36. extern struct rw_semaphore dev_addr_sem;
  37. /* rtnl helpers */
  38. extern struct list_head net_todo_list;
  39. void netdev_run_todo(void);
  40. /* netdev management, shared between various uAPI entry points */
  41. struct netdev_name_node {
  42. struct hlist_node hlist;
  43. struct list_head list;
  44. struct net_device *dev;
  45. const char *name;
  46. struct rcu_head rcu;
  47. };
  48. int netdev_get_name(struct net *net, char *name, int ifindex);
  49. int dev_change_name(struct net_device *dev, const char *newname);
  50. #define netdev_for_each_altname(dev, namenode) \
  51. list_for_each_entry((namenode), &(dev)->name_node->list, list)
  52. #define netdev_for_each_altname_safe(dev, namenode, next) \
  53. list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \
  54. list)
  55. int netdev_name_node_alt_create(struct net_device *dev, const char *name);
  56. int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
  57. int dev_validate_mtu(struct net_device *dev, int mtu,
  58. struct netlink_ext_ack *extack);
  59. int dev_set_mtu_ext(struct net_device *dev, int mtu,
  60. struct netlink_ext_ack *extack);
  61. int dev_get_phys_port_id(struct net_device *dev,
  62. struct netdev_phys_item_id *ppid);
  63. int dev_get_phys_port_name(struct net_device *dev,
  64. char *name, size_t len);
  65. int dev_change_proto_down(struct net_device *dev, bool proto_down);
  66. void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
  67. u32 value);
  68. typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
  69. int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
  70. int fd, int expected_fd, u32 flags);
  71. int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len);
  72. void dev_set_group(struct net_device *dev, int new_group);
  73. int dev_change_carrier(struct net_device *dev, bool new_carrier);
  74. void __dev_set_rx_mode(struct net_device *dev);
  75. void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
  76. unsigned int gchanges, u32 portid,
  77. const struct nlmsghdr *nlh);
  78. void unregister_netdevice_many_notify(struct list_head *head,
  79. u32 portid, const struct nlmsghdr *nlh);
  80. static inline void netif_set_gso_max_size(struct net_device *dev,
  81. unsigned int size)
  82. {
  83. /* dev->gso_max_size is read locklessly from sk_setup_caps() */
  84. WRITE_ONCE(dev->gso_max_size, size);
  85. if (size <= GSO_LEGACY_MAX_SIZE)
  86. WRITE_ONCE(dev->gso_ipv4_max_size, size);
  87. }
  88. static inline void netif_set_gso_max_segs(struct net_device *dev,
  89. unsigned int segs)
  90. {
  91. /* dev->gso_max_segs is read locklessly from sk_setup_caps() */
  92. WRITE_ONCE(dev->gso_max_segs, segs);
  93. }
  94. static inline void netif_set_gro_max_size(struct net_device *dev,
  95. unsigned int size)
  96. {
  97. /* This pairs with the READ_ONCE() in skb_gro_receive() */
  98. WRITE_ONCE(dev->gro_max_size, size);
  99. if (size <= GRO_LEGACY_MAX_SIZE)
  100. WRITE_ONCE(dev->gro_ipv4_max_size, size);
  101. }
  102. static inline void netif_set_gso_ipv4_max_size(struct net_device *dev,
  103. unsigned int size)
  104. {
  105. /* dev->gso_ipv4_max_size is read locklessly from sk_setup_caps() */
  106. WRITE_ONCE(dev->gso_ipv4_max_size, size);
  107. }
  108. static inline void netif_set_gro_ipv4_max_size(struct net_device *dev,
  109. unsigned int size)
  110. {
  111. /* This pairs with the READ_ONCE() in skb_gro_receive() */
  112. WRITE_ONCE(dev->gro_ipv4_max_size, size);
  113. }
  114. int rps_cpumask_housekeeping(struct cpumask *mask);
  115. #if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
  116. void xdp_do_check_flushed(struct napi_struct *napi);
  117. #else
  118. static inline void xdp_do_check_flushed(struct napi_struct *napi) { }
  119. #endif
  120. /* Best effort check that NAPI is not idle (can't be scheduled to run) */
  121. static inline void napi_assert_will_not_race(const struct napi_struct *napi)
  122. {
  123. /* uninitialized instance, can't race */
  124. if (!napi->poll_list.next)
  125. return;
  126. /* SCHED bit is set on disabled instances */
  127. WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state));
  128. WARN_ON(READ_ONCE(napi->list_owner) != -1);
  129. }
  130. void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
  131. #define XMIT_RECURSION_LIMIT 8
  132. #ifndef CONFIG_PREEMPT_RT
  133. static inline bool dev_xmit_recursion(void)
  134. {
  135. return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
  136. XMIT_RECURSION_LIMIT);
  137. }
  138. static inline void dev_xmit_recursion_inc(void)
  139. {
  140. __this_cpu_inc(softnet_data.xmit.recursion);
  141. }
  142. static inline void dev_xmit_recursion_dec(void)
  143. {
  144. __this_cpu_dec(softnet_data.xmit.recursion);
  145. }
  146. #else
  147. static inline bool dev_xmit_recursion(void)
  148. {
  149. return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
  150. }
  151. static inline void dev_xmit_recursion_inc(void)
  152. {
  153. current->net_xmit.recursion++;
  154. }
  155. static inline void dev_xmit_recursion_dec(void)
  156. {
  157. current->net_xmit.recursion--;
  158. }
  159. #endif
  160. int dev_set_hwtstamp_phylib(struct net_device *dev,
  161. struct kernel_hwtstamp_config *cfg,
  162. struct netlink_ext_ack *extack);
  163. #endif