rx-offload.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. /*
  2. * Copyright (c) 2014 David Jander, Protonic Holland
  3. * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the version 2 of the GNU General Public License
  7. * as published by the Free Software Foundation
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/can/dev.h>
  18. #include <linux/can/rx-offload.h>
  19. struct can_rx_offload_cb {
  20. u32 timestamp;
  21. };
  22. static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
  23. {
  24. BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
  25. return (struct can_rx_offload_cb *)skb->cb;
  26. }
  27. static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
  28. {
  29. if (offload->inc)
  30. return a <= b;
  31. else
  32. return a >= b;
  33. }
  34. static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
  35. {
  36. if (offload->inc)
  37. return (*val)++;
  38. else
  39. return (*val)--;
  40. }
  41. static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
  42. {
  43. struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
  44. struct net_device *dev = offload->dev;
  45. struct net_device_stats *stats = &dev->stats;
  46. struct sk_buff *skb;
  47. int work_done = 0;
  48. while ((work_done < quota) &&
  49. (skb = skb_dequeue(&offload->skb_queue))) {
  50. struct can_frame *cf = (struct can_frame *)skb->data;
  51. work_done++;
  52. stats->rx_packets++;
  53. stats->rx_bytes += cf->can_dlc;
  54. netif_receive_skb(skb);
  55. }
  56. if (work_done < quota) {
  57. napi_complete_done(napi, work_done);
  58. /* Check if there was another interrupt */
  59. if (!skb_queue_empty(&offload->skb_queue))
  60. napi_reschedule(&offload->napi);
  61. }
  62. can_led_event(offload->dev, CAN_LED_EVENT_RX);
  63. return work_done;
  64. }
  65. static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
  66. int (*compare)(struct sk_buff *a, struct sk_buff *b))
  67. {
  68. struct sk_buff *pos, *insert = (struct sk_buff *)head;
  69. skb_queue_reverse_walk(head, pos) {
  70. const struct can_rx_offload_cb *cb_pos, *cb_new;
  71. cb_pos = can_rx_offload_get_cb(pos);
  72. cb_new = can_rx_offload_get_cb(new);
  73. netdev_dbg(new->dev,
  74. "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
  75. __func__,
  76. cb_pos->timestamp, cb_new->timestamp,
  77. cb_new->timestamp - cb_pos->timestamp,
  78. skb_queue_len(head));
  79. if (compare(pos, new) < 0)
  80. continue;
  81. insert = pos;
  82. break;
  83. }
  84. __skb_queue_after(head, insert, new);
  85. }
  86. static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
  87. {
  88. const struct can_rx_offload_cb *cb_a, *cb_b;
  89. cb_a = can_rx_offload_get_cb(a);
  90. cb_b = can_rx_offload_get_cb(b);
  91. /* Substract two u32 and return result as int, to keep
  92. * difference steady around the u32 overflow.
  93. */
  94. return cb_b->timestamp - cb_a->timestamp;
  95. }
  96. /**
  97. * can_rx_offload_offload_one() - Read one CAN frame from HW
  98. * @offload: pointer to rx_offload context
  99. * @n: number of mailbox to read
  100. *
  101. * The task of this function is to read a CAN frame from mailbox @n
  102. * from the device and return the mailbox's content as a struct
  103. * sk_buff.
  104. *
  105. * If the struct can_rx_offload::skb_queue exceeds the maximal queue
  106. * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
  107. * allocated, the mailbox contents is discarded by reading it into an
  108. * overflow buffer. This way the mailbox is marked as free by the
  109. * driver.
  110. *
  111. * Return: A pointer to skb containing the CAN frame on success.
  112. *
  113. * NULL if the mailbox @n is empty.
  114. *
  115. * ERR_PTR() in case of an error
  116. */
  117. static struct sk_buff *
  118. can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
  119. {
  120. struct sk_buff *skb = NULL, *skb_error = NULL;
  121. struct can_rx_offload_cb *cb;
  122. struct can_frame *cf;
  123. int ret;
  124. if (likely(skb_queue_len(&offload->skb_queue) <
  125. offload->skb_queue_len_max)) {
  126. skb = alloc_can_skb(offload->dev, &cf);
  127. if (unlikely(!skb))
  128. skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */
  129. } else {
  130. skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */
  131. }
  132. /* If queue is full or skb not available, drop by reading into
  133. * overflow buffer.
  134. */
  135. if (unlikely(skb_error)) {
  136. struct can_frame cf_overflow;
  137. u32 timestamp;
  138. ret = offload->mailbox_read(offload, &cf_overflow,
  139. &timestamp, n);
  140. /* Mailbox was empty. */
  141. if (unlikely(!ret))
  142. return NULL;
  143. /* Mailbox has been read and we're dropping it or
  144. * there was a problem reading the mailbox.
  145. *
  146. * Increment error counters in any case.
  147. */
  148. offload->dev->stats.rx_dropped++;
  149. offload->dev->stats.rx_fifo_errors++;
  150. /* There was a problem reading the mailbox, propagate
  151. * error value.
  152. */
  153. if (unlikely(ret < 0))
  154. return ERR_PTR(ret);
  155. return skb_error;
  156. }
  157. cb = can_rx_offload_get_cb(skb);
  158. ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
  159. /* Mailbox was empty. */
  160. if (unlikely(!ret)) {
  161. kfree_skb(skb);
  162. return NULL;
  163. }
  164. /* There was a problem reading the mailbox, propagate error value. */
  165. if (unlikely(ret < 0)) {
  166. kfree_skb(skb);
  167. offload->dev->stats.rx_dropped++;
  168. offload->dev->stats.rx_fifo_errors++;
  169. return ERR_PTR(ret);
  170. }
  171. /* Mailbox was read. */
  172. return skb;
  173. }
  174. int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
  175. {
  176. struct sk_buff_head skb_queue;
  177. unsigned int i;
  178. __skb_queue_head_init(&skb_queue);
  179. for (i = offload->mb_first;
  180. can_rx_offload_le(offload, i, offload->mb_last);
  181. can_rx_offload_inc(offload, &i)) {
  182. struct sk_buff *skb;
  183. if (!(pending & BIT_ULL(i)))
  184. continue;
  185. skb = can_rx_offload_offload_one(offload, i);
  186. if (IS_ERR_OR_NULL(skb))
  187. continue;
  188. __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
  189. }
  190. if (!skb_queue_empty(&skb_queue)) {
  191. unsigned long flags;
  192. u32 queue_len;
  193. spin_lock_irqsave(&offload->skb_queue.lock, flags);
  194. skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
  195. spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
  196. if ((queue_len = skb_queue_len(&offload->skb_queue)) >
  197. (offload->skb_queue_len_max / 8))
  198. netdev_dbg(offload->dev, "%s: queue_len=%d\n",
  199. __func__, queue_len);
  200. can_rx_offload_schedule(offload);
  201. }
  202. return skb_queue_len(&skb_queue);
  203. }
  204. EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
  205. int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
  206. {
  207. struct sk_buff *skb;
  208. int received = 0;
  209. while (1) {
  210. skb = can_rx_offload_offload_one(offload, 0);
  211. if (IS_ERR(skb))
  212. continue;
  213. if (!skb)
  214. break;
  215. skb_queue_tail(&offload->skb_queue, skb);
  216. received++;
  217. }
  218. if (received)
  219. can_rx_offload_schedule(offload);
  220. return received;
  221. }
  222. EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
  223. int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
  224. struct sk_buff *skb, u32 timestamp)
  225. {
  226. struct can_rx_offload_cb *cb;
  227. unsigned long flags;
  228. if (skb_queue_len(&offload->skb_queue) >
  229. offload->skb_queue_len_max) {
  230. dev_kfree_skb_any(skb);
  231. return -ENOBUFS;
  232. }
  233. cb = can_rx_offload_get_cb(skb);
  234. cb->timestamp = timestamp;
  235. spin_lock_irqsave(&offload->skb_queue.lock, flags);
  236. __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
  237. spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
  238. can_rx_offload_schedule(offload);
  239. return 0;
  240. }
  241. EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
  242. unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
  243. unsigned int idx, u32 timestamp)
  244. {
  245. struct net_device *dev = offload->dev;
  246. struct net_device_stats *stats = &dev->stats;
  247. struct sk_buff *skb;
  248. u8 len;
  249. int err;
  250. skb = __can_get_echo_skb(dev, idx, &len);
  251. if (!skb)
  252. return 0;
  253. err = can_rx_offload_queue_sorted(offload, skb, timestamp);
  254. if (err) {
  255. stats->rx_errors++;
  256. stats->tx_fifo_errors++;
  257. }
  258. return len;
  259. }
  260. EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
  261. int can_rx_offload_queue_tail(struct can_rx_offload *offload,
  262. struct sk_buff *skb)
  263. {
  264. if (skb_queue_len(&offload->skb_queue) >
  265. offload->skb_queue_len_max) {
  266. dev_kfree_skb_any(skb);
  267. return -ENOBUFS;
  268. }
  269. skb_queue_tail(&offload->skb_queue, skb);
  270. can_rx_offload_schedule(offload);
  271. return 0;
  272. }
  273. EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
  274. static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
  275. {
  276. offload->dev = dev;
  277. /* Limit queue len to 4x the weight (rounted to next power of two) */
  278. offload->skb_queue_len_max = 2 << fls(weight);
  279. offload->skb_queue_len_max *= 4;
  280. skb_queue_head_init(&offload->skb_queue);
  281. can_rx_offload_reset(offload);
  282. netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
  283. dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
  284. __func__, offload->skb_queue_len_max);
  285. return 0;
  286. }
  287. int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
  288. {
  289. unsigned int weight;
  290. if (offload->mb_first > BITS_PER_LONG_LONG ||
  291. offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
  292. return -EINVAL;
  293. if (offload->mb_first < offload->mb_last) {
  294. offload->inc = true;
  295. weight = offload->mb_last - offload->mb_first;
  296. } else {
  297. offload->inc = false;
  298. weight = offload->mb_first - offload->mb_last;
  299. }
  300. return can_rx_offload_init_queue(dev, offload, weight);
  301. }
  302. EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
  303. int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
  304. {
  305. if (!offload->mailbox_read)
  306. return -EINVAL;
  307. return can_rx_offload_init_queue(dev, offload, weight);
  308. }
  309. EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
  310. void can_rx_offload_enable(struct can_rx_offload *offload)
  311. {
  312. can_rx_offload_reset(offload);
  313. napi_enable(&offload->napi);
  314. }
  315. EXPORT_SYMBOL_GPL(can_rx_offload_enable);
  316. void can_rx_offload_del(struct can_rx_offload *offload)
  317. {
  318. netif_napi_del(&offload->napi);
  319. skb_queue_purge(&offload->skb_queue);
  320. }
  321. EXPORT_SYMBOL_GPL(can_rx_offload_del);
  322. void can_rx_offload_reset(struct can_rx_offload *offload)
  323. {
  324. }
  325. EXPORT_SYMBOL_GPL(can_rx_offload_reset);