dhd_linux_wq.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. /*
  2. * Broadcom Dongle Host Driver (DHD), Generic work queue framework
  3. * Generic interface to handle dhd deferred work events
  4. *
  5. * Portions of this code are copyright (c) 2020 Cypress Semiconductor Corporation
  6. *
  7. * Copyright (C) 1999-2020, Broadcom Corporation
  8. *
  9. * Unless you and Broadcom execute a separate written software license
  10. * agreement governing use of this software, this software is licensed to you
  11. * under the terms of the GNU General Public License version 2 (the "GPL"),
  12. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  13. * following added to such license:
  14. *
  15. * As a special exception, the copyright holders of this software give you
  16. * permission to link this software with independent modules, and to copy and
  17. * distribute the resulting executable under terms of your choice, provided that
  18. * you also meet, for each linked independent module, the terms and conditions of
  19. * the license of that module. An independent module is a module which is not
  20. * derived from this software. The special exception does not apply to any
  21. * modifications of the software.
  22. *
  23. * Notwithstanding the above, under no circumstances may you combine this
  24. * software in any way with any other Broadcom software provided under a license
  25. * other than the GPL, without Broadcom's express prior written consent.
  26. *
  27. *
  28. * <<Broadcom-WL-IPTag/Open:>>
  29. *
  30. * $Id: dhd_linux_wq.c 675839 2016-12-19 03:07:26Z $
  31. */
  32. #include <linux/init.h>
  33. #include <linux/kernel.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/fcntl.h>
  36. #include <linux/fs.h>
  37. #include <linux/ip.h>
  38. #include <linux/kfifo.h>
  39. #include <linuxver.h>
  40. #include <osl.h>
  41. #include <bcmutils.h>
  42. #include <bcmendian.h>
  43. #include <bcmdevs.h>
  44. #include <dngl_stats.h>
  45. #include <dhd.h>
  46. #include <dhd_dbg.h>
  47. #include <dhd_linux_wq.h>
  48. typedef struct dhd_deferred_event {
  49. u8 event; /* holds the event */
  50. void *event_data; /* holds event specific data */
  51. event_handler_t event_handler;
  52. unsigned long pad; /* for memory alignment to power of 2 */
  53. } dhd_deferred_event_t;
  54. #define DEFRD_EVT_SIZE (sizeof(dhd_deferred_event_t))
  55. /*
  56. * work events may occur simultaneously.
  57. * can hold upto 64 low priority events and 16 high priority events
  58. */
  59. #define DHD_PRIO_WORK_FIFO_SIZE (16 * DEFRD_EVT_SIZE)
  60. #define DHD_WORK_FIFO_SIZE (64 * DEFRD_EVT_SIZE)
  61. #define DHD_FIFO_HAS_FREE_SPACE(fifo) \
  62. ((fifo) && (kfifo_avail(fifo) >= DEFRD_EVT_SIZE))
  63. #define DHD_FIFO_HAS_ENOUGH_DATA(fifo) \
  64. ((fifo) && (kfifo_len(fifo) >= DEFRD_EVT_SIZE))
  65. struct dhd_deferred_wq {
  66. struct work_struct deferred_work; /* should be the first member */
  67. struct kfifo *prio_fifo;
  68. struct kfifo *work_fifo;
  69. u8 *prio_fifo_buf;
  70. u8 *work_fifo_buf;
  71. spinlock_t work_lock;
  72. void *dhd_info; /* review: does it require */
  73. u32 event_skip_mask;
  74. };
  75. static inline struct kfifo*
  76. dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
  77. {
  78. struct kfifo *fifo;
  79. gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
  80. fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
  81. if (!fifo) {
  82. return NULL;
  83. }
  84. kfifo_init(fifo, buf, size);
  85. return fifo;
  86. }
  87. static inline void
  88. dhd_kfifo_free(struct kfifo *fifo)
  89. {
  90. kfifo_free(fifo);
  91. }
  92. /* deferred work functions */
  93. static void dhd_deferred_work_handler(struct work_struct *data);
  94. void*
  95. dhd_deferred_work_init(void *dhd_info)
  96. {
  97. struct dhd_deferred_wq *work = NULL;
  98. u8* buf;
  99. unsigned long fifo_size = 0;
  100. gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
  101. if (!dhd_info) {
  102. DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
  103. goto return_null;
  104. }
  105. work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
  106. flags);
  107. if (!work) {
  108. DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__));
  109. goto return_null;
  110. }
  111. INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler);
  112. /* initialize event fifo */
  113. spin_lock_init(&work->work_lock);
  114. /* allocate buffer to hold prio events */
  115. fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
  116. fifo_size = is_power_of_2(fifo_size) ? fifo_size :
  117. roundup_pow_of_two(fifo_size);
  118. buf = (u8*)kzalloc(fifo_size, flags);
  119. if (!buf) {
  120. DHD_ERROR(("%s: prio work fifo allocation failed\n",
  121. __FUNCTION__));
  122. goto return_null;
  123. }
  124. /* Initialize prio event fifo */
  125. work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
  126. if (!work->prio_fifo) {
  127. kfree(buf);
  128. goto return_null;
  129. }
  130. /* allocate buffer to hold work events */
  131. fifo_size = DHD_WORK_FIFO_SIZE;
  132. fifo_size = is_power_of_2(fifo_size) ? fifo_size :
  133. roundup_pow_of_two(fifo_size);
  134. buf = (u8*)kzalloc(fifo_size, flags);
  135. if (!buf) {
  136. DHD_ERROR(("%s: work fifo allocation failed\n", __FUNCTION__));
  137. goto return_null;
  138. }
  139. /* Initialize event fifo */
  140. work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
  141. if (!work->work_fifo) {
  142. kfree(buf);
  143. goto return_null;
  144. }
  145. work->dhd_info = dhd_info;
  146. work->event_skip_mask = 0;
  147. DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__));
  148. return work;
  149. return_null:
  150. if (work) {
  151. dhd_deferred_work_deinit(work);
  152. }
  153. return NULL;
  154. }
  155. void
  156. dhd_deferred_work_deinit(void *work)
  157. {
  158. struct dhd_deferred_wq *deferred_work = work;
  159. if (!deferred_work) {
  160. DHD_ERROR(("%s: deferred work has been freed already\n",
  161. __FUNCTION__));
  162. return;
  163. }
  164. /* cancel the deferred work handling */
  165. cancel_work_sync((struct work_struct *)deferred_work);
  166. /*
  167. * free work event fifo.
  168. * kfifo_free frees locally allocated fifo buffer
  169. */
  170. if (deferred_work->prio_fifo) {
  171. dhd_kfifo_free(deferred_work->prio_fifo);
  172. }
  173. if (deferred_work->work_fifo) {
  174. dhd_kfifo_free(deferred_work->work_fifo);
  175. }
  176. kfree(deferred_work);
  177. }
  178. /* select kfifo according to priority */
  179. static inline struct kfifo *
  180. dhd_deferred_work_select_kfifo(struct dhd_deferred_wq *deferred_wq,
  181. u8 priority)
  182. {
  183. if (priority == DHD_WQ_WORK_PRIORITY_HIGH) {
  184. return deferred_wq->prio_fifo;
  185. } else if (priority == DHD_WQ_WORK_PRIORITY_LOW) {
  186. return deferred_wq->work_fifo;
  187. } else {
  188. return NULL;
  189. }
  190. }
  191. /*
  192. * Prepares event to be queued
  193. * Schedules the event
  194. */
  195. int
  196. dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
  197. event_handler_t event_handler, u8 priority)
  198. {
  199. struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)workq;
  200. struct kfifo *fifo;
  201. dhd_deferred_event_t deferred_event;
  202. int bytes_copied = 0;
  203. if (!deferred_wq) {
  204. DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
  205. ASSERT(0);
  206. return DHD_WQ_STS_UNINITIALIZED;
  207. }
  208. if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
  209. DHD_ERROR(("%s: unknown event, event=%d\n", __FUNCTION__,
  210. event));
  211. return DHD_WQ_STS_UNKNOWN_EVENT;
  212. }
  213. if (!priority || (priority >= DHD_WQ_MAX_PRIORITY)) {
  214. DHD_ERROR(("%s: unknown priority, priority=%d\n",
  215. __FUNCTION__, priority));
  216. return DHD_WQ_STS_UNKNOWN_PRIORITY;
  217. }
  218. if ((deferred_wq->event_skip_mask & (1 << event))) {
  219. DHD_ERROR(("%s: Skip event requested. Mask = 0x%x\n",
  220. __FUNCTION__, deferred_wq->event_skip_mask));
  221. return DHD_WQ_STS_EVENT_SKIPPED;
  222. }
  223. /*
  224. * default element size is 1, which can be changed
  225. * using kfifo_esize(). Older kernel(FC11) doesn't support
  226. * changing element size. For compatibility changing
  227. * element size is not prefered
  228. */
  229. ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
  230. ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
  231. deferred_event.event = event;
  232. deferred_event.event_data = event_data;
  233. deferred_event.event_handler = event_handler;
  234. fifo = dhd_deferred_work_select_kfifo(deferred_wq, priority);
  235. if (DHD_FIFO_HAS_FREE_SPACE(fifo)) {
  236. bytes_copied = kfifo_in_spinlocked(fifo, &deferred_event,
  237. DEFRD_EVT_SIZE, &deferred_wq->work_lock);
  238. }
  239. if (bytes_copied != DEFRD_EVT_SIZE) {
  240. DHD_ERROR(("%s: failed to schedule deferred work, "
  241. "priority=%d, bytes_copied=%d\n", __FUNCTION__,
  242. priority, bytes_copied));
  243. return DHD_WQ_STS_SCHED_FAILED;
  244. }
  245. schedule_work((struct work_struct *)deferred_wq);
  246. return DHD_WQ_STS_OK;
  247. }
  248. static bool
  249. dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq,
  250. dhd_deferred_event_t *event)
  251. {
  252. int bytes_copied = 0;
  253. if (!deferred_wq) {
  254. DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
  255. return DHD_WQ_STS_UNINITIALIZED;
  256. }
  257. /*
  258. * default element size is 1 byte, which can be changed
  259. * using kfifo_esize(). Older kernel(FC11) doesn't support
  260. * changing element size. For compatibility changing
  261. * element size is not prefered
  262. */
  263. ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
  264. ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
  265. /* handle priority work */
  266. if (DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->prio_fifo)) {
  267. bytes_copied = kfifo_out_spinlocked(deferred_wq->prio_fifo,
  268. event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
  269. }
  270. /* handle normal work if priority work doesn't have enough data */
  271. if ((bytes_copied != DEFRD_EVT_SIZE) &&
  272. DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->work_fifo)) {
  273. bytes_copied = kfifo_out_spinlocked(deferred_wq->work_fifo,
  274. event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
  275. }
  276. return (bytes_copied == DEFRD_EVT_SIZE);
  277. }
  278. static inline void
  279. dhd_deferred_dump_work_event(dhd_deferred_event_t *work_event)
  280. {
  281. if (!work_event) {
  282. DHD_ERROR(("%s: work_event is null\n", __FUNCTION__));
  283. return;
  284. }
  285. DHD_ERROR(("%s: work_event->event = %d\n", __FUNCTION__,
  286. work_event->event));
  287. DHD_ERROR(("%s: work_event->event_data = %p\n", __FUNCTION__,
  288. work_event->event_data));
  289. DHD_ERROR(("%s: work_event->event_handler = %p\n", __FUNCTION__,
  290. work_event->event_handler));
  291. }
  292. /*
  293. * Called when work is scheduled
  294. */
  295. static void
  296. dhd_deferred_work_handler(struct work_struct *work)
  297. {
  298. struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work;
  299. dhd_deferred_event_t work_event;
  300. if (!deferred_work) {
  301. DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
  302. return;
  303. }
  304. do {
  305. if (!dhd_get_scheduled_work(deferred_work, &work_event)) {
  306. DHD_TRACE(("%s: no event to handle\n", __FUNCTION__));
  307. break;
  308. }
  309. if (work_event.event >= DHD_MAX_WQ_EVENTS) {
  310. DHD_ERROR(("%s: unknown event\n", __FUNCTION__));
  311. dhd_deferred_dump_work_event(&work_event);
  312. ASSERT(work_event.event < DHD_MAX_WQ_EVENTS);
  313. continue;
  314. }
  315. if (work_event.event_handler) {
  316. work_event.event_handler(deferred_work->dhd_info,
  317. work_event.event_data, work_event.event);
  318. } else {
  319. DHD_ERROR(("%s: event handler is null\n",
  320. __FUNCTION__));
  321. dhd_deferred_dump_work_event(&work_event);
  322. ASSERT(work_event.event_handler != NULL);
  323. }
  324. } while (1);
  325. return;
  326. }
  327. void
  328. dhd_deferred_work_set_skip(void *work, u8 event, bool set)
  329. {
  330. struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)work;
  331. if (!deferred_wq || !event || (event >= DHD_MAX_WQ_EVENTS)) {
  332. DHD_ERROR(("%s: Invalid!!\n", __FUNCTION__));
  333. return;
  334. }
  335. if (set) {
  336. /* Set */
  337. deferred_wq->event_skip_mask |= (1 << event);
  338. } else {
  339. /* Clear */
  340. deferred_wq->event_skip_mask &= ~(1 << event);
  341. }
  342. }