dhd_linux_priv.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. /*
  2. * DHD Linux header file - contains private structure definition of the Linux specific layer
  3. *
  4. * Portions of this code are copyright (c) 2020 Cypress Semiconductor Corporation
  5. *
  6. * Copyright (C) 1999-2020, Broadcom Corporation
  7. *
  8. * Unless you and Broadcom execute a separate written software license
  9. * agreement governing use of this software, this software is licensed to you
  10. * under the terms of the GNU General Public License version 2 (the "GPL"),
  11. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  12. * following added to such license:
  13. *
  14. * As a special exception, the copyright holders of this software give you
  15. * permission to link this software with independent modules, and to copy and
  16. * distribute the resulting executable under terms of your choice, provided that
  17. * you also meet, for each linked independent module, the terms and conditions of
  18. * the license of that module. An independent module is a module which is not
  19. * derived from this software. The special exception does not apply to any
  20. * modifications of the software.
  21. *
  22. * Notwithstanding the above, under no circumstances may you combine this
  23. * software in any way with any other Broadcom software provided under a license
  24. * other than the GPL, without Broadcom's express prior written consent.
  25. *
  26. *
  27. * <<Broadcom-WL-IPTag/Open:>>
  28. *
  29. * $Id: dhd_linux_priv.h 725613 2020-05-12 09:31:19Z $
  30. */
  31. #ifndef __DHD_LINUX_PRIV_H__
  32. #define __DHD_LINUX_PRIV_H__
  33. #include <osl.h>
  34. #ifdef SHOW_LOGTRACE
  35. #include <linux/syscalls.h>
  36. #include <event_log.h>
  37. #endif /* SHOW_LOGTRACE */
  38. #include <linux/skbuff.h>
  39. #include <linux/spinlock.h>
  40. #include <dngl_stats.h>
  41. #include <dhd.h>
  42. #include <dhd_dbg.h>
  43. #include <dhd_debug.h>
  44. #include <dhd_linux.h>
  45. #include <dhd_bus.h>
  46. #ifdef PCIE_FULL_DONGLE
  47. #include <bcmmsgbuf.h>
  48. #include <dhd_flowring.h>
  49. #endif /* PCIE_FULL_DONGLE */
  50. /*
  51. * Do not include this header except for the dhd_linux.c dhd_linux_sysfs.c
  52. * Local private structure (extension of pub)
  53. */
  54. typedef struct dhd_info {
  55. #if defined(WL_WIRELESS_EXT)
  56. wl_iw_t iw; /* wireless extensions state (must be first) */
  57. #endif /* defined(WL_WIRELESS_EXT) */
  58. dhd_pub_t pub;
  59. /* for supporting multiple interfaces.
  60. * static_ifs hold the net ifaces without valid FW IF
  61. */
  62. dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS];
  63. void *adapter; /* adapter information, interrupt, fw path etc. */
  64. char fw_path[PATH_MAX]; /* path to firmware image */
  65. char nv_path[PATH_MAX]; /* path to nvram vars file */
  66. #ifdef DHD_UCODE_DOWNLOAD
  67. char uc_path[PATH_MAX]; /* path to ucode image */
  68. #endif /* DHD_UCODE_DOWNLOAD */
  69. /* serialize dhd iovars */
  70. struct mutex dhd_iovar_mutex;
  71. struct semaphore proto_sem;
  72. #ifdef PROP_TXSTATUS
  73. spinlock_t wlfc_spinlock;
  74. #endif /* PROP_TXSTATUS */
  75. wait_queue_head_t ioctl_resp_wait;
  76. wait_queue_head_t d3ack_wait;
  77. wait_queue_head_t dhd_bus_busy_state_wait;
  78. wait_queue_head_t dmaxfer_wait;
  79. uint32 default_wd_interval;
  80. timer_list_compat_t timer;
  81. bool wd_timer_valid;
  82. #ifdef DHD_PCIE_RUNTIMEPM
  83. timer_list_compat_t rpm_timer;
  84. bool rpm_timer_valid;
  85. tsk_ctl_t thr_rpm_ctl;
  86. #endif /* DHD_PCIE_RUNTIMEPM */
  87. struct tasklet_struct tasklet;
  88. spinlock_t sdlock;
  89. spinlock_t txqlock;
  90. spinlock_t dhd_lock;
  91. struct semaphore sdsem;
  92. tsk_ctl_t thr_dpc_ctl;
  93. tsk_ctl_t thr_wdt_ctl;
  94. tsk_ctl_t thr_rxf_ctl;
  95. spinlock_t rxf_lock;
  96. bool rxthread_enabled;
  97. /* Wakelocks */
  98. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  99. struct wake_lock wl_wifi; /* Wifi wakelock */
  100. struct wake_lock wl_rxwake; /* Wifi rx wakelock */
  101. struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
  102. struct wake_lock wl_wdwake; /* Wifi wd wakelock */
  103. struct wake_lock wl_evtwake; /* Wifi event wakelock */
  104. struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */
  105. struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */
  106. #ifdef BCMPCIE_OOB_HOST_WAKE
  107. struct wake_lock wl_intrwake; /* Host wakeup wakelock */
  108. #endif /* BCMPCIE_OOB_HOST_WAKE */
  109. #ifdef DHD_USE_SCAN_WAKELOCK
  110. struct wake_lock wl_scanwake; /* Wifi scan wakelock */
  111. #endif /* DHD_USE_SCAN_WAKELOCK */
  112. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  113. #if defined(OEM_ANDROID)
  114. /* net_device interface lock, prevent race conditions among net_dev interface
  115. * calls and wifi_on or wifi_off
  116. */
  117. struct mutex dhd_net_if_mutex;
  118. struct mutex dhd_suspend_mutex;
  119. #if defined(PKT_FILTER_SUPPORT) && defined(APF)
  120. struct mutex dhd_apf_mutex;
  121. #endif /* PKT_FILTER_SUPPORT && APF */
  122. #endif /* OEM_ANDROID */
  123. spinlock_t wakelock_spinlock;
  124. spinlock_t wakelock_evt_spinlock;
  125. uint32 wakelock_counter;
  126. int wakelock_wd_counter;
  127. int wakelock_rx_timeout_enable;
  128. int wakelock_ctrl_timeout_enable;
  129. bool waive_wakelock;
  130. uint32 wakelock_before_waive;
  131. /* Thread to issue ioctl for multicast */
  132. wait_queue_head_t ctrl_wait;
  133. atomic_t pend_8021x_cnt;
  134. dhd_attach_states_t dhd_state;
  135. #ifdef SHOW_LOGTRACE
  136. dhd_event_log_t event_data;
  137. #endif /* SHOW_LOGTRACE */
  138. #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
  139. struct early_suspend early_suspend;
  140. #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
  141. #ifdef ARP_OFFLOAD_SUPPORT
  142. u32 pend_ipaddr;
  143. #endif /* ARP_OFFLOAD_SUPPORT */
  144. #ifdef DHDTCPACK_SUPPRESS
  145. spinlock_t tcpack_lock;
  146. #endif /* DHDTCPACK_SUPPRESS */
  147. #ifdef FIX_CPU_MIN_CLOCK
  148. bool cpufreq_fix_status;
  149. struct mutex cpufreq_fix;
  150. struct pm_qos_request dhd_cpu_qos;
  151. #ifdef FIX_BUS_MIN_CLOCK
  152. struct pm_qos_request dhd_bus_qos;
  153. #endif /* FIX_BUS_MIN_CLOCK */
  154. #endif /* FIX_CPU_MIN_CLOCK */
  155. void *dhd_deferred_wq;
  156. #ifdef DEBUG_CPU_FREQ
  157. struct notifier_block freq_trans;
  158. int __percpu *new_freq;
  159. #endif // endif
  160. unsigned int unit;
  161. struct notifier_block pm_notifier;
  162. #ifdef DHD_PSTA
  163. uint32 psta_mode; /* PSTA or PSR */
  164. #endif /* DHD_PSTA */
  165. #ifdef DHD_WET
  166. uint32 wet_mode;
  167. #endif /* DHD_WET */
  168. #ifdef DHD_DEBUG
  169. dhd_dump_t *dump;
  170. struct timer_list join_timer;
  171. u32 join_timeout_val;
  172. bool join_timer_active;
  173. uint scan_time_count;
  174. struct timer_list scan_timer;
  175. bool scan_timer_active;
  176. #endif // endif
  177. #if defined(DHD_LB)
  178. /* CPU Load Balance dynamic CPU selection */
  179. /* Variable that tracks the currect CPUs available for candidacy */
  180. cpumask_var_t cpumask_curr_avail;
  181. /* Primary and secondary CPU mask */
  182. cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
  183. cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
  184. struct notifier_block cpu_notifier;
  185. /* Tasklet to handle Tx Completion packet freeing */
  186. struct tasklet_struct tx_compl_tasklet;
  187. atomic_t tx_compl_cpu;
  188. /* Tasklet to handle RxBuf Post during Rx completion */
  189. struct tasklet_struct rx_compl_tasklet;
  190. atomic_t rx_compl_cpu;
  191. /* Napi struct for handling rx packet sendup. Packets are removed from
  192. * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
  193. * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
  194. * to run to rx_napi_cpu.
  195. */
  196. struct sk_buff_head rx_pend_queue ____cacheline_aligned;
  197. struct sk_buff_head rx_napi_queue ____cacheline_aligned;
  198. struct napi_struct rx_napi_struct ____cacheline_aligned;
  199. atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
  200. struct net_device *rx_napi_netdev; /* netdev of primary interface */
  201. struct work_struct rx_napi_dispatcher_work;
  202. struct work_struct tx_compl_dispatcher_work;
  203. struct work_struct tx_dispatcher_work;
  204. struct work_struct rx_compl_dispatcher_work;
  205. /* Number of times DPC Tasklet ran */
  206. uint32 dhd_dpc_cnt;
  207. /* Number of times NAPI processing got scheduled */
  208. uint32 napi_sched_cnt;
  209. /* Number of times NAPI processing ran on each available core */
  210. uint32 *napi_percpu_run_cnt;
  211. /* Number of times RX Completions got scheduled */
  212. uint32 rxc_sched_cnt;
  213. /* Number of times RX Completion ran on each available core */
  214. uint32 *rxc_percpu_run_cnt;
  215. /* Number of times TX Completions got scheduled */
  216. uint32 txc_sched_cnt;
  217. /* Number of times TX Completions ran on each available core */
  218. uint32 *txc_percpu_run_cnt;
  219. /* CPU status */
  220. /* Number of times each CPU came online */
  221. uint32 *cpu_online_cnt;
  222. /* Number of times each CPU went offline */
  223. uint32 *cpu_offline_cnt;
  224. /* Number of times TX processing run on each core */
  225. uint32 *txp_percpu_run_cnt;
  226. /* Number of times TX start run on each core */
  227. uint32 *tx_start_percpu_run_cnt;
  228. /* Tx load balancing */
  229. /* TODO: Need to see if batch processing is really required in case of TX
  230. * processing. In case of RX the Dongle can send a bunch of rx completions,
  231. * hence we took a 3 queue approach
  232. * enque - adds the skbs to rx_pend_queue
  233. * dispatch - uses a lock and adds the list of skbs from pend queue to
  234. * napi queue
  235. * napi processing - copies the pend_queue into a local queue and works
  236. * on it.
  237. * But for TX its going to be 1 skb at a time, so we are just thinking
  238. * of using only one queue and use the lock supported skb queue functions
  239. * to add and process it. If its in-efficient we'll re-visit the queue
  240. * design.
  241. */
  242. /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */
  243. /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */
  244. /*
  245. * From the Tasklet that actually sends out data
  246. * copy the list tx_pend_queue into tx_active_queue. There by we need
  247. * to spinlock to only perform the copy the rest of the code ie to
  248. * construct the tx_pend_queue and the code to process tx_active_queue
  249. * can be lockless. The concept is borrowed as is from RX processing
  250. */
  251. /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */
  252. /* Control TXP in runtime, enable by default */
  253. atomic_t lb_txp_active;
  254. /* Control RXP in runtime, enable by default */
  255. atomic_t lb_rxp_active;
  256. /*
  257. * When the NET_TX tries to send a TX packet put it into tx_pend_queue
  258. * For now, the processing tasklet will also direcly operate on this
  259. * queue
  260. */
  261. struct sk_buff_head tx_pend_queue ____cacheline_aligned;
  262. /* Control RXP in runtime, enable by default */
  263. /* cpu on which the DHD Tx is happenning */
  264. atomic_t tx_cpu;
  265. /* CPU on which the Network stack is calling the DHD's xmit function */
  266. atomic_t net_tx_cpu;
  267. /* Tasklet context from which the DHD's TX processing happens */
  268. struct tasklet_struct tx_tasklet;
  269. /*
  270. * Consumer Histogram - NAPI RX Packet processing
  271. * -----------------------------------------------
  272. * On Each CPU, when the NAPI RX Packet processing call back was invoked
  273. * how many packets were processed is captured in this data structure.
  274. * Now its difficult to capture the "exact" number of packets processed.
  275. * So considering the packet counter to be a 32 bit one, we have a
  276. * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
  277. * processed is rounded off to the next power of 2 and put in the
  278. * approriate "bin" the value in the bin gets incremented.
  279. * For example, assume that in CPU 1 if NAPI Rx runs 3 times
  280. * and the packet count processed is as follows (assume the bin counters are 0)
  281. * iteration 1 - 10 (the bin counter 2^4 increments to 1)
  282. * iteration 2 - 30 (the bin counter 2^5 increments to 1)
  283. * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
  284. */
  285. uint32 *napi_rx_hist[HIST_BIN_SIZE];
  286. uint32 *txc_hist[HIST_BIN_SIZE];
  287. uint32 *rxc_hist[HIST_BIN_SIZE];
  288. #endif /* DHD_LB */
  289. #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
  290. struct work_struct axi_error_dispatcher_work;
  291. #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
  292. #ifdef SHOW_LOGTRACE
  293. #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
  294. tsk_ctl_t thr_logtrace_ctl;
  295. #else
  296. struct delayed_work event_log_dispatcher_work;
  297. #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
  298. #endif /* SHOW_LOGTRACE */
  299. #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
  300. #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
  301. struct kobject dhd_kobj;
  302. struct kobject dhd_conf_file_kobj;
  303. struct timer_list timesync_timer;
  304. #if defined(BT_OVER_SDIO)
  305. char btfw_path[PATH_MAX];
  306. #endif /* defined (BT_OVER_SDIO) */
  307. #ifdef WL_MONITOR
  308. struct net_device *monitor_dev; /* monitor pseudo device */
  309. struct sk_buff *monitor_skb;
  310. uint monitor_len;
  311. uint monitor_type; /* monitor pseudo device */
  312. #endif /* WL_MONITOR */
  313. #if defined(BT_OVER_SDIO)
  314. struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */
  315. int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */
  316. #endif /* BT_OVER_SDIO */
  317. #ifdef SHOW_LOGTRACE
  318. struct sk_buff_head evt_trace_queue ____cacheline_aligned;
  319. #endif // endif
  320. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  321. struct workqueue_struct *tx_wq;
  322. struct workqueue_struct *rx_wq;
  323. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  324. #ifdef DHD_DEBUG_UART
  325. bool duart_execute;
  326. #endif /* DHD_DEBUG_UART */
  327. struct mutex logdump_lock;
  328. /* indicates mem_dump was scheduled as work queue or called directly */
  329. bool scheduled_memdump;
  330. struct work_struct dhd_hang_process_work;
  331. #ifdef DHD_HP2P
  332. spinlock_t hp2p_lock;
  333. #endif /* DHD_HP2P */
  334. } dhd_info_t;
  335. extern int dhd_sysfs_init(dhd_info_t *dhd);
  336. extern void dhd_sysfs_exit(dhd_info_t *dhd);
  337. extern void dhd_dbg_ring_proc_create(dhd_pub_t *dhdp);
  338. extern void dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp);
  339. int __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf);
  340. #if defined(DHD_LB)
  341. #if defined(DHD_LB_TXP)
  342. int dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, int ifidx, void *skb);
  343. void dhd_tx_dispatcher_work(struct work_struct * work);
  344. void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
  345. void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
  346. void dhd_lb_tx_handler(unsigned long data);
  347. #endif /* DHD_LB_TXP */
  348. #if defined(DHD_LB_RXP)
  349. int dhd_napi_poll(struct napi_struct *napi, int budget);
  350. void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
  351. void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
  352. void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
  353. #endif /* DHD_LB_RXP */
  354. void dhd_lb_set_default_cpus(dhd_info_t *dhd);
  355. void dhd_cpumasks_deinit(dhd_info_t *dhd);
  356. int dhd_cpumasks_init(dhd_info_t *dhd);
  357. void dhd_select_cpu_candidacy(dhd_info_t *dhd);
  358. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
  359. int dhd_cpu_startup_callback(unsigned int cpu);
  360. int dhd_cpu_teardown_callback(unsigned int cpu);
  361. #else
  362. int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu);
  363. #endif /* LINUX_VERSION_CODE < 4.10.0 */
  364. int dhd_register_cpuhp_callback(dhd_info_t *dhd);
  365. int dhd_unregister_cpuhp_callback(dhd_info_t *dhd);
  366. #if defined(DHD_LB_TXC)
  367. void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
  368. #endif /* DHD_LB_TXC */
  369. #if defined(DHD_LB_RXC)
  370. void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
  371. void dhd_rx_compl_dispatcher_fn(struct work_struct * work);
  372. #endif /* DHD_LB_RXC */
  373. #endif /* DHD_LB */
  374. #if defined(DHD_LB_IRQSET) || defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
  375. void dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask);
  376. #endif /* DHD_LB_IRQSET || DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
  377. #endif /* __DHD_LINUX_PRIV_H__ */