linuxver.h 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885
  1. /*
  2. * Linux-specific abstractions to gain some independence from linux kernel versions.
  3. * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
  4. *
  5. * Portions of this code are copyright (c) 2020 Cypress Semiconductor Corporation
  6. *
  7. * Copyright (C) 1999-2020, Broadcom Corporation
  8. *
  9. * Unless you and Broadcom execute a separate written software license
  10. * agreement governing use of this software, this software is licensed to you
  11. * under the terms of the GNU General Public License version 2 (the "GPL"),
  12. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  13. * following added to such license:
  14. *
  15. * As a special exception, the copyright holders of this software give you
  16. * permission to link this software with independent modules, and to copy and
  17. * distribute the resulting executable under terms of your choice, provided that
  18. * you also meet, for each linked independent module, the terms and conditions of
  19. * the license of that module. An independent module is a module which is not
  20. * derived from this software. The special exception does not apply to any
  21. * modifications of the software.
  22. *
  23. * Notwithstanding the above, under no circumstances may you combine this
  24. * software in any way with any other Broadcom software provided under a license
  25. * other than the GPL, without Broadcom's express prior written consent.
  26. *
  27. *
  28. * <<Broadcom-WL-IPTag/Open:>>
  29. *
  30. * $Id: linuxver.h 646730 2016-06-30 13:01:49Z $
  31. */
  32. #ifndef _linuxver_h_
  33. #define _linuxver_h_
  34. #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
  35. #pragma GCC diagnostic push
  36. #pragma GCC diagnostic ignored "-Wunused-but-set-variable"
  37. #pragma GCC diagnostic ignored "-Wunused-but-set-parameter"
  38. #endif // endif
  39. #include <typedefs.h>
  40. #include <linux/version.h>
  41. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
  42. #include <linux/config.h>
  43. #else
  44. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
  45. #include <generated/autoconf.h>
  46. #else
  47. #include <linux/autoconf.h>
  48. #endif // endif
  49. #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
  50. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
  51. #include <linux/kconfig.h>
  52. #endif // endif
  53. #include <linux/module.h>
  54. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
  55. /* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
  56. #ifdef __UNDEF_NO_VERSION__
  57. #undef __NO_VERSION__
  58. #else
  59. #define __NO_VERSION__
  60. #endif // endif
  61. #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
  62. #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
  63. #define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i")
  64. #define module_param_string(_name_, _string_, _size_, _perm_) \
  65. MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
  66. #endif // endif
  67. /* linux/malloc.h is deprecated, use linux/slab.h instead. */
  68. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
  69. #include <linux/malloc.h>
  70. #else
  71. #include <linux/slab.h>
  72. #endif // endif
  73. #include <linux/types.h>
  74. #include <linux/init.h>
  75. #include <linux/mm.h>
  76. #include <linux/string.h>
  77. #include <linux/pci.h>
  78. #include <linux/interrupt.h>
  79. #include <linux/kthread.h>
  80. #include <linux/netdevice.h>
  81. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
  82. #include <linux/semaphore.h>
  83. #else
  84. #include <asm/semaphore.h>
  85. #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
  86. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
  87. #undef IP_TOS
  88. #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
  89. #include <asm/io.h>
  90. #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
  91. #include <linux/workqueue.h>
  92. #else
  93. #include <linux/tqueue.h>
  94. #ifndef work_struct
  95. #define work_struct tq_struct
  96. #endif // endif
  97. #ifndef INIT_WORK
  98. #define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
  99. #endif // endif
  100. #ifndef schedule_work
  101. #define schedule_work(_work) schedule_task((_work))
  102. #endif // endif
  103. #ifndef flush_scheduled_work
  104. #define flush_scheduled_work() flush_scheduled_tasks()
  105. #endif // endif
  106. #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
  107. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
  108. #define DAEMONIZE(a) do { \
  109. allow_signal(SIGKILL); \
  110. allow_signal(SIGTERM); \
  111. } while (0)
  112. #elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
  113. (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
  114. #define DAEMONIZE(a) daemonize(a); \
  115. allow_signal(SIGKILL); \
  116. allow_signal(SIGTERM);
  117. #else /* Linux 2.4 (w/o preemption patch) */
  118. #define RAISE_RX_SOFTIRQ() \
  119. cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
  120. #define DAEMONIZE(a) daemonize(); \
  121. do { if (a) \
  122. strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \
  123. } while (0);
  124. #endif /* LINUX_VERSION_CODE */
  125. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
  126. #define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func)
  127. #else
  128. #define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work)
  129. #if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \
  130. (RHEL_MAJOR == 5))
  131. /* Exclude RHEL 5 */
  132. typedef void (*work_func_t)(void *work);
  133. #endif // endif
  134. #endif /* >= 2.6.20 */
  135. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
  136. /* Some distributions have their own 2.6.x compatibility layers */
  137. #ifndef IRQ_NONE
  138. typedef void irqreturn_t;
  139. #define IRQ_NONE
  140. #define IRQ_HANDLED
  141. #define IRQ_RETVAL(x)
  142. #endif // endif
  143. #else
  144. typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
  145. #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
  146. #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
  147. #define IRQF_SHARED SA_SHIRQ
  148. #endif /* < 2.6.18 */
  149. #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
  150. #ifdef CONFIG_NET_RADIO
  151. #define CONFIG_WIRELESS_EXT
  152. #endif // endif
  153. #endif /* < 2.6.17 */
  154. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
  155. #define MOD_INC_USE_COUNT
  156. #define MOD_DEC_USE_COUNT
  157. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
  158. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
  159. #include <linux/sched.h>
  160. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
  161. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
  162. #include <linux/sched/rt.h>
  163. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
  164. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
  165. #include <net/lib80211.h>
  166. #endif // endif
  167. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
  168. #include <linux/ieee80211.h>
  169. #else
  170. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
  171. #include <net/ieee80211.h>
  172. #endif // endif
  173. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
  174. #ifndef __exit
  175. #define __exit
  176. #endif // endif
  177. #ifndef __devexit
  178. #define __devexit
  179. #endif // endif
  180. #ifndef __devinit
  181. # if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
  182. # define __devinit __init
  183. # else
  184. /* All devices are hotpluggable since linux 3.8.0 */
  185. # define __devinit
  186. # endif
  187. #endif /* !__devinit */
  188. #ifndef __devinitdata
  189. #define __devinitdata
  190. #endif // endif
  191. #ifndef __devexit_p
  192. #define __devexit_p(x) x
  193. #endif // endif
  194. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
  195. #define pci_get_drvdata(dev) (dev)->sysdata
  196. #define pci_set_drvdata(dev, value) (dev)->sysdata = (value)
  197. /*
  198. * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
  199. */
  200. struct pci_device_id {
  201. unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */
  202. unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
  203. unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */
  204. unsigned long driver_data; /* Data private to the driver */
  205. };
  206. struct pci_driver {
  207. struct list_head node;
  208. char *name;
  209. const struct pci_device_id *id_table; /* NULL if wants all devices */
  210. int (*probe)(struct pci_dev *dev,
  211. const struct pci_device_id *id); /* New device inserted */
  212. void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug
  213. * capable driver)
  214. */
  215. void (*suspend)(struct pci_dev *dev); /* Device suspended */
  216. void (*resume)(struct pci_dev *dev); /* Device woken up */
  217. };
  218. #define MODULE_DEVICE_TABLE(type, name)
  219. #define PCI_ANY_ID (~0)
  220. /* compatpci.c */
  221. #define pci_module_init pci_register_driver
  222. extern int pci_register_driver(struct pci_driver *drv);
  223. extern void pci_unregister_driver(struct pci_driver *drv);
  224. #endif /* PCI registration */
  225. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
  226. #define pci_module_init pci_register_driver
  227. #endif // endif
  228. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
  229. #ifdef MODULE
  230. #define module_init(x) int init_module(void) { return x(); }
  231. #define module_exit(x) void cleanup_module(void) { x(); }
  232. #else
  233. #define module_init(x) __initcall(x);
  234. #define module_exit(x) __exitcall(x);
  235. #endif // endif
  236. #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
  237. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
  238. #define WL_USE_NETDEV_OPS
  239. #else
  240. #undef WL_USE_NETDEV_OPS
  241. #endif // endif
  242. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
  243. #define WL_CONFIG_RFKILL
  244. #else
  245. #undef WL_CONFIG_RFKILL
  246. #endif // endif
  247. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
  248. #define list_for_each(pos, head) \
  249. for (pos = (head)->next; pos != (head); pos = pos->next)
  250. #endif // endif
  251. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
  252. #define pci_resource_start(dev, bar) ((dev)->base_address[(bar)])
  253. #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
  254. #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
  255. #endif // endif
  256. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
  257. #define pci_enable_device(dev) do { } while (0)
  258. #endif // endif
  259. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
  260. #define net_device device
  261. #endif // endif
  262. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
  263. /*
  264. * DMA mapping
  265. *
  266. * See linux/Documentation/DMA-mapping.txt
  267. */
  268. #ifndef PCI_DMA_TODEVICE
  269. #define PCI_DMA_TODEVICE 1
  270. #define PCI_DMA_FROMDEVICE 2
  271. #endif // endif
  272. typedef u32 dma_addr_t;
  273. /* Pure 2^n version of get_order */
  274. static inline int get_order(unsigned long size)
  275. {
  276. int order;
  277. size = (size-1) >> (PAGE_SHIFT-1);
  278. order = -1;
  279. do {
  280. size >>= 1;
  281. order++;
  282. } while (size);
  283. return order;
  284. }
  285. static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
  286. dma_addr_t *dma_handle)
  287. {
  288. void *ret;
  289. int gfp = GFP_ATOMIC | GFP_DMA;
  290. ret = (void *)__get_free_pages(gfp, get_order(size));
  291. if (ret != NULL) {
  292. memset(ret, 0, size);
  293. *dma_handle = virt_to_bus(ret);
  294. }
  295. return ret;
  296. }
  297. static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
  298. void *vaddr, dma_addr_t dma_handle)
  299. {
  300. free_pages((unsigned long)vaddr, get_order(size));
  301. }
  302. #define pci_map_single(cookie, address, size, dir) virt_to_bus(address)
  303. #define pci_unmap_single(cookie, address, size, dir)
  304. #endif /* DMA mapping */
  305. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
  306. typedef struct timer_list timer_list_compat_t;
  307. #define init_timer_compat(timer_compat, cb, priv) \
  308. init_timer(timer_compat); \
  309. (timer_compat)->data = (ulong)priv; \
  310. (timer_compat)->function = cb
  311. #define timer_set_private(timer_compat, priv) (timer_compat)->data = (ulong)priv
  312. #define timer_expires(timer_compat) (timer_compat)->expires
  313. #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
  314. typedef struct timer_list_compat {
  315. struct timer_list timer;
  316. void *arg;
  317. void (*callback)(ulong arg);
  318. } timer_list_compat_t;
  319. extern void timer_cb_compat(struct timer_list *tl);
  320. #define init_timer_compat(timer_compat, cb, priv) \
  321. (timer_compat)->arg = priv; \
  322. (timer_compat)->callback = cb; \
  323. timer_setup(&(timer_compat)->timer, timer_cb_compat, 0);
  324. #define timer_set_private(timer_compat, priv) (timer_compat)->arg = priv
  325. #define timer_expires(timer_compat) (timer_compat)->timer.expires
  326. #define del_timer(t) del_timer(&((t)->timer))
  327. #define del_timer_sync(t) del_timer_sync(&((t)->timer))
  328. #define timer_pending(t) timer_pending(&((t)->timer))
  329. #define add_timer(t) add_timer(&((t)->timer))
  330. #define mod_timer(t, j) mod_timer(&((t)->timer), j)
  331. #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
  332. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
  333. #define dev_kfree_skb_any(a) dev_kfree_skb(a)
  334. #define netif_down(dev) do { (dev)->start = 0; } while (0)
  335. /* pcmcia-cs provides its own netdevice compatibility layer */
  336. #ifndef _COMPAT_NETDEVICE_H
  337. /*
  338. * SoftNet
  339. *
  340. * For pre-softnet kernels we need to tell the upper layer not to
  341. * re-enter start_xmit() while we are in there. However softnet
  342. * guarantees not to enter while we are in there so there is no need
  343. * to do the netif_stop_queue() dance unless the transmit queue really
  344. * gets stuck. This should also improve performance according to tests
  345. * done by Aman Singla.
  346. */
  347. #define dev_kfree_skb_irq(a) dev_kfree_skb(a)
  348. #define netif_wake_queue(dev) \
  349. do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
  350. #define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy)
  351. static inline void netif_start_queue(struct net_device *dev)
  352. {
  353. dev->tbusy = 0;
  354. dev->interrupt = 0;
  355. dev->start = 1;
  356. }
  357. #define netif_queue_stopped(dev) (dev)->tbusy
  358. #define netif_running(dev) (dev)->start
  359. #endif /* _COMPAT_NETDEVICE_H */
  360. #define netif_device_attach(dev) netif_start_queue(dev)
  361. #define netif_device_detach(dev) netif_stop_queue(dev)
  362. /* 2.4.x renamed bottom halves to tasklets */
  363. #define tasklet_struct tq_struct
  364. static inline void tasklet_schedule(struct tasklet_struct *tasklet)
  365. {
  366. queue_task(tasklet, &tq_immediate);
  367. mark_bh(IMMEDIATE_BH);
  368. }
  369. static inline void tasklet_init(struct tasklet_struct *tasklet,
  370. void (*func)(unsigned long),
  371. unsigned long data)
  372. {
  373. tasklet->next = NULL;
  374. tasklet->sync = 0;
  375. tasklet->routine = (void (*)(void *))func;
  376. tasklet->data = (void *)data;
  377. }
  378. #define tasklet_kill(tasklet) { do {} while (0); }
  379. /* 2.4.x introduced del_timer_sync() */
  380. #define del_timer_sync(timer) del_timer(timer)
  381. #else
  382. #define netif_down(dev)
  383. #endif /* SoftNet */
  384. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
  385. /*
  386. * Emit code to initialise a tq_struct's routine and data pointers
  387. */
  388. #define PREPARE_TQUEUE(_tq, _routine, _data) \
  389. do { \
  390. (_tq)->routine = _routine; \
  391. (_tq)->data = _data; \
  392. } while (0)
  393. /*
  394. * Emit code to initialise all of a tq_struct
  395. */
  396. #define INIT_TQUEUE(_tq, _routine, _data) \
  397. do { \
  398. INIT_LIST_HEAD(&(_tq)->list); \
  399. (_tq)->sync = 0; \
  400. PREPARE_TQUEUE((_tq), (_routine), (_data)); \
  401. } while (0)
  402. #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
  403. /* Power management related macro & routines */
  404. #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
  405. #define PCI_SAVE_STATE(a, b) pci_save_state(a)
  406. #define PCI_RESTORE_STATE(a, b) pci_restore_state(a)
  407. #else
  408. #define PCI_SAVE_STATE(a, b) pci_save_state(a, b)
  409. #define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b)
  410. #endif // endif
  411. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
  412. static inline int
  413. pci_save_state(struct pci_dev *dev, u32 *buffer)
  414. {
  415. int i;
  416. if (buffer) {
  417. for (i = 0; i < 16; i++)
  418. pci_read_config_dword(dev, i * 4, &buffer[i]);
  419. }
  420. return 0;
  421. }
  422. static inline int
  423. pci_restore_state(struct pci_dev *dev, u32 *buffer)
  424. {
  425. int i;
  426. if (buffer) {
  427. for (i = 0; i < 16; i++)
  428. pci_write_config_dword(dev, i * 4, buffer[i]);
  429. }
  430. /*
  431. * otherwise, write the context information we know from bootup.
  432. * This works around a problem where warm-booting from Windows
  433. * combined with a D3(hot)->D0 transition causes PCI config
  434. * header data to be forgotten.
  435. */
  436. else {
  437. for (i = 0; i < 6; i ++)
  438. pci_write_config_dword(dev,
  439. PCI_BASE_ADDRESS_0 + (i * 4),
  440. pci_resource_start(dev, i));
  441. pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
  442. }
  443. return 0;
  444. }
  445. #endif /* PCI power management */
  446. /* Old cp0 access macros deprecated in 2.4.19 */
  447. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
  448. #define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
  449. #endif // endif
  450. /* Module refcount handled internally in 2.6.x */
  451. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
  452. #ifndef SET_MODULE_OWNER
  453. #define SET_MODULE_OWNER(dev) do {} while (0)
  454. #define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
  455. #define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
  456. #else
  457. #define OLD_MOD_INC_USE_COUNT do {} while (0)
  458. #define OLD_MOD_DEC_USE_COUNT do {} while (0)
  459. #endif // endif
  460. #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
  461. #ifndef SET_MODULE_OWNER
  462. #define SET_MODULE_OWNER(dev) do {} while (0)
  463. #endif // endif
  464. #ifndef MOD_INC_USE_COUNT
  465. #define MOD_INC_USE_COUNT do {} while (0)
  466. #endif // endif
  467. #ifndef MOD_DEC_USE_COUNT
  468. #define MOD_DEC_USE_COUNT do {} while (0)
  469. #endif // endif
  470. #define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
  471. #define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
  472. #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
  473. #ifndef SET_NETDEV_DEV
  474. #define SET_NETDEV_DEV(net, pdev) do {} while (0)
  475. #endif // endif
  476. #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
  477. #ifndef HAVE_FREE_NETDEV
  478. #define free_netdev(dev) kfree(dev)
  479. #endif // endif
  480. #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
  481. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
  482. /* struct packet_type redefined in 2.6.x */
  483. #define af_packet_priv data
  484. #endif // endif
  485. /* suspend args */
  486. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
  487. #define DRV_SUSPEND_STATE_TYPE pm_message_t
  488. #else
  489. #define DRV_SUSPEND_STATE_TYPE uint32
  490. #endif // endif
  491. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
  492. #define CHECKSUM_HW CHECKSUM_PARTIAL
  493. #endif // endif
  494. typedef struct {
  495. void *parent; /* some external entity that the thread supposed to work for */
  496. char *proc_name;
  497. struct task_struct *p_task;
  498. long thr_pid;
  499. int prio; /* priority */
  500. struct semaphore sema;
  501. int terminated;
  502. struct completion completed;
  503. int flush_ind;
  504. struct completion flushed;
  505. spinlock_t spinlock;
  506. int up_cnt;
  507. } tsk_ctl_t;
  508. /* requires tsk_ctl_t tsk argument, the caller's priv data is passed in owner ptr */
  509. /* note this macro assumes there may be only one context waiting on thread's completion */
  510. #ifdef DHD_DEBUG
  511. #define DBG_THR(x) printk x
  512. #else
  513. #define DBG_THR(x)
  514. #endif // endif
  515. static inline bool binary_sema_down(tsk_ctl_t *tsk)
  516. {
  517. if (down_interruptible(&tsk->sema) == 0) {
  518. unsigned long flags = 0;
  519. spin_lock_irqsave(&tsk->spinlock, flags);
  520. if (tsk->up_cnt == 1)
  521. tsk->up_cnt--;
  522. else {
  523. DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
  524. }
  525. spin_unlock_irqrestore(&tsk->spinlock, flags);
  526. return false;
  527. } else
  528. return true;
  529. }
  530. static inline bool binary_sema_up(tsk_ctl_t *tsk)
  531. {
  532. bool sem_up = false;
  533. unsigned long flags = 0;
  534. spin_lock_irqsave(&tsk->spinlock, flags);
  535. if (tsk->up_cnt == 0) {
  536. tsk->up_cnt++;
  537. sem_up = true;
  538. } else if (tsk->up_cnt == 1) {
  539. /* dhd_sched_dpc: dpc is alread up! */
  540. } else
  541. DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
  542. spin_unlock_irqrestore(&tsk->spinlock, flags);
  543. if (sem_up)
  544. up(&tsk->sema);
  545. return sem_up;
  546. }
  547. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
  548. #define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
  549. #else
  550. #define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
  551. #endif // endif
  552. #define PROC_START(thread_func, owner, tsk_ctl, flags, name) \
  553. { \
  554. sema_init(&((tsk_ctl)->sema), 0); \
  555. init_completion(&((tsk_ctl)->completed)); \
  556. init_completion(&((tsk_ctl)->flushed)); \
  557. (tsk_ctl)->parent = owner; \
  558. (tsk_ctl)->proc_name = name; \
  559. (tsk_ctl)->terminated = FALSE; \
  560. (tsk_ctl)->flush_ind = FALSE; \
  561. (tsk_ctl)->up_cnt = 0; \
  562. (tsk_ctl)->p_task = kthread_run(thread_func, tsk_ctl, (char*)name); \
  563. if (IS_ERR((tsk_ctl)->p_task)) { \
  564. (tsk_ctl)->thr_pid = -1; \
  565. DBG_THR(("%s(): thread:%s create failed\n", __FUNCTION__, \
  566. (tsk_ctl)->proc_name)); \
  567. } else { \
  568. (tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
  569. spin_lock_init(&((tsk_ctl)->spinlock)); \
  570. DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
  571. (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
  572. }; \
  573. }
  574. #define PROC_WAIT_TIMEOUT_MSEC 5000 /* 5 seconds */
  575. #define PROC_STOP(tsk_ctl) \
  576. { \
  577. uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
  578. (tsk_ctl)->terminated = TRUE; \
  579. smp_wmb(); \
  580. up(&((tsk_ctl)->sema)); \
  581. DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
  582. (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
  583. timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
  584. if (timeout == 0) \
  585. DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
  586. (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
  587. else \
  588. DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
  589. (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
  590. (tsk_ctl)->parent = NULL; \
  591. (tsk_ctl)->proc_name = NULL; \
  592. (tsk_ctl)->thr_pid = -1; \
  593. (tsk_ctl)->up_cnt = 0; \
  594. }
  595. #define PROC_STOP_USING_BINARY_SEMA(tsk_ctl) \
  596. { \
  597. uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
  598. (tsk_ctl)->terminated = TRUE; \
  599. smp_wmb(); \
  600. binary_sema_up(tsk_ctl); \
  601. DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
  602. (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
  603. timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
  604. if (timeout == 0) \
  605. DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
  606. (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
  607. else \
  608. DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
  609. (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
  610. (tsk_ctl)->parent = NULL; \
  611. (tsk_ctl)->proc_name = NULL; \
  612. (tsk_ctl)->thr_pid = -1; \
  613. }
  614. /*
  615. * Flush is non-rentrant, so callers must make sure
  616. * there is no race condition.
  617. * For safer exit, added wait_for_completion_timeout
  618. * with 1 sec timeout.
  619. */
  620. #define PROC_FLUSH_USING_BINARY_SEMA(tsk_ctl) \
  621. { \
  622. uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
  623. (tsk_ctl)->flush_ind = TRUE; \
  624. smp_wmb(); \
  625. binary_sema_up(tsk_ctl); \
  626. DBG_THR(("%s(): thread:%s:%lx wait for flush\n", __FUNCTION__, \
  627. (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
  628. timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \
  629. if (timeout == 0) \
  630. DBG_THR(("%s(): thread:%s:%lx flush timeout\n", __FUNCTION__, \
  631. (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
  632. else \
  633. DBG_THR(("%s(): thread:%s:%lx flushed OK\n", __FUNCTION__, \
  634. (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
  635. }
  636. /* ----------------------- */
  637. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
  638. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
  639. /* send_sig declaration moved */
  640. #include <linux/sched/signal.h>
  641. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) */
  642. #define KILL_PROC(nr, sig) \
  643. { \
  644. struct task_struct *tsk; \
  645. struct pid *pid; \
  646. pid = find_get_pid((pid_t)nr); \
  647. tsk = pid_task(pid, PIDTYPE_PID); \
  648. if (tsk) send_sig(sig, tsk, 1); \
  649. }
  650. #else
  651. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
  652. KERNEL_VERSION(2, 6, 30))
  653. #define KILL_PROC(pid, sig) \
  654. { \
  655. struct task_struct *tsk; \
  656. tsk = find_task_by_vpid(pid); \
  657. if (tsk) send_sig(sig, tsk, 1); \
  658. }
  659. #else
  660. #define KILL_PROC(pid, sig) \
  661. { \
  662. kill_proc(pid, sig, 1); \
  663. }
  664. #endif // endif
  665. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
  666. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
  667. #include <linux/time.h>
  668. #include <linux/wait.h>
  669. #else
  670. #include <linux/sched.h>
  671. #define __wait_event_interruptible_timeout(wq, condition, ret) \
  672. do { \
  673. wait_queue_t __wait; \
  674. init_waitqueue_entry(&__wait, current); \
  675. \
  676. add_wait_queue(&wq, &__wait); \
  677. for (;;) { \
  678. set_current_state(TASK_INTERRUPTIBLE); \
  679. if (condition) \
  680. break; \
  681. if (!signal_pending(current)) { \
  682. ret = schedule_timeout(ret); \
  683. if (!ret) \
  684. break; \
  685. continue; \
  686. } \
  687. ret = -ERESTARTSYS; \
  688. break; \
  689. } \
  690. current->state = TASK_RUNNING; \
  691. remove_wait_queue(&wq, &__wait); \
  692. } while (0)
  693. #define wait_event_interruptible_timeout(wq, condition, timeout) \
  694. ({ \
  695. long __ret = timeout; \
  696. if (!(condition)) \
  697. __wait_event_interruptible_timeout(wq, condition, __ret); \
  698. __ret; \
  699. })
  700. #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
  701. /*
  702. For < 2.6.24, wl creates its own netdev but doesn't
  703. align the priv area like the genuine alloc_netdev().
  704. Since netdev_priv() always gives us the aligned address, it will
  705. not match our unaligned address for < 2.6.24
  706. */
  707. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
  708. #define DEV_PRIV(dev) (dev->priv)
  709. #else
  710. #define DEV_PRIV(dev) netdev_priv(dev)
  711. #endif // endif
  712. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
  713. #define WL_ISR(i, d, p) wl_isr((i), (d))
  714. #else
  715. #define WL_ISR(i, d, p) wl_isr((i), (d), (p))
  716. #endif /* < 2.6.20 */
  717. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
  718. #define netdev_priv(dev) dev->priv
  719. #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
  720. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
  721. #define CAN_SLEEP() ((!in_atomic() && !irqs_disabled()))
  722. #else
  723. #define CAN_SLEEP() (FALSE)
  724. #endif // endif
  725. #define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC)
  726. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
  727. #define RANDOM32 prandom_u32
  728. #define RANDOM_BYTES prandom_bytes
  729. #else
  730. #define RANDOM32 random32
  731. #define RANDOM_BYTES get_random_bytes
  732. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
  733. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
  734. #define SRANDOM32(entropy) prandom_seed(entropy)
  735. #else
  736. #define SRANDOM32(entropy) srandom32(entropy)
  737. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
  738. /*
  739. * Overide latest kfifo functions with
  740. * older version to work on older kernels
  741. */
  742. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) && !defined(WL_COMPAT_WIRELESS)
  743. #define kfifo_in_spinlocked(a, b, c, d) kfifo_put(a, (u8 *)b, c)
  744. #define kfifo_out_spinlocked(a, b, c, d) kfifo_get(a, (u8 *)b, c)
  745. #define kfifo_esize(a) 1
  746. #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \
  747. (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) && !defined(WL_COMPAT_WIRELESS)
  748. #define kfifo_in_spinlocked(a, b, c, d) kfifo_in_locked(a, b, c, d)
  749. #define kfifo_out_spinlocked(a, b, c, d) kfifo_out_locked(a, b, c, d)
  750. #define kfifo_esize(a) 1
  751. #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
  752. #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
  753. #pragma GCC diagnostic pop
  754. #endif // endif
  755. #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
  756. static inline struct inode *file_inode(const struct file *f)
  757. {
  758. return f->f_dentry->d_inode;
  759. }
  760. #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
  761. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
  762. #define vfs_write(fp, buf, len, pos) kernel_write(fp, buf, len, pos)
  763. #define vfs_read(fp, buf, len, pos) kernel_read(fp, buf, len, pos)
  764. int kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count);
  765. #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
  766. #define kernel_read_compat(file, offset, addr, count) kernel_read(file, offset, addr, count)
  767. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
  768. #endif /* _linuxver_h_ */