util.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Authors:
  5. * Alexander Aring <aar@pengutronix.de>
  6. *
  7. * Based on: net/mac80211/util.c
  8. */
  9. #include "ieee802154_i.h"
  10. #include "driver-ops.h"
  11. /* privid for wpan_phys to determine whether they belong to us or not */
  12. const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid;
  13. /**
  14. * ieee802154_wake_queue - wake ieee802154 queue
  15. * @hw: main hardware object
  16. *
  17. * Tranceivers usually have either one transmit framebuffer or one framebuffer
  18. * for both transmitting and receiving. Hence, the core currently only handles
  19. * one frame at a time for each phy, which means we had to stop the queue to
  20. * avoid new skb to come during the transmission. The queue then needs to be
  21. * woken up after the operation.
  22. */
  23. static void ieee802154_wake_queue(struct ieee802154_hw *hw)
  24. {
  25. struct ieee802154_local *local = hw_to_local(hw);
  26. struct ieee802154_sub_if_data *sdata;
  27. rcu_read_lock();
  28. clear_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
  29. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  30. if (!sdata->dev)
  31. continue;
  32. netif_wake_queue(sdata->dev);
  33. }
  34. rcu_read_unlock();
  35. }
  36. /**
  37. * ieee802154_stop_queue - stop ieee802154 queue
  38. * @hw: main hardware object
  39. *
  40. * Tranceivers usually have either one transmit framebuffer or one framebuffer
  41. * for both transmitting and receiving. Hence, the core currently only handles
  42. * one frame at a time for each phy, which means we need to tell upper layers to
  43. * stop giving us new skbs while we are busy with the transmitted one. The queue
  44. * must then be stopped before transmitting.
  45. */
  46. static void ieee802154_stop_queue(struct ieee802154_hw *hw)
  47. {
  48. struct ieee802154_local *local = hw_to_local(hw);
  49. struct ieee802154_sub_if_data *sdata;
  50. rcu_read_lock();
  51. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  52. if (!sdata->dev)
  53. continue;
  54. netif_stop_queue(sdata->dev);
  55. }
  56. rcu_read_unlock();
  57. }
  58. void ieee802154_hold_queue(struct ieee802154_local *local)
  59. {
  60. unsigned long flags;
  61. spin_lock_irqsave(&local->phy->queue_lock, flags);
  62. if (!atomic_fetch_inc(&local->phy->hold_txs))
  63. ieee802154_stop_queue(&local->hw);
  64. spin_unlock_irqrestore(&local->phy->queue_lock, flags);
  65. }
  66. void ieee802154_release_queue(struct ieee802154_local *local)
  67. {
  68. unsigned long flags;
  69. spin_lock_irqsave(&local->phy->queue_lock, flags);
  70. if (atomic_dec_and_test(&local->phy->hold_txs))
  71. ieee802154_wake_queue(&local->hw);
  72. spin_unlock_irqrestore(&local->phy->queue_lock, flags);
  73. }
  74. void ieee802154_disable_queue(struct ieee802154_local *local)
  75. {
  76. struct ieee802154_sub_if_data *sdata;
  77. rcu_read_lock();
  78. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  79. if (!sdata->dev)
  80. continue;
  81. netif_tx_disable(sdata->dev);
  82. }
  83. rcu_read_unlock();
  84. }
  85. enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer)
  86. {
  87. struct ieee802154_local *local =
  88. container_of(timer, struct ieee802154_local, ifs_timer);
  89. ieee802154_release_queue(local);
  90. return HRTIMER_NORESTART;
  91. }
  92. void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
  93. bool ifs_handling)
  94. {
  95. struct ieee802154_local *local = hw_to_local(hw);
  96. local->tx_result = IEEE802154_SUCCESS;
  97. if (ifs_handling) {
  98. u8 max_sifs_size;
  99. /* If transceiver sets CRC on his own we need to use lifs
  100. * threshold len above 16 otherwise 18, because it's not
  101. * part of skb->len.
  102. */
  103. if (hw->flags & IEEE802154_HW_TX_OMIT_CKSUM)
  104. max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE -
  105. IEEE802154_FCS_LEN;
  106. else
  107. max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE;
  108. if (skb->len > max_sifs_size)
  109. hrtimer_start(&local->ifs_timer,
  110. hw->phy->lifs_period * NSEC_PER_USEC,
  111. HRTIMER_MODE_REL);
  112. else
  113. hrtimer_start(&local->ifs_timer,
  114. hw->phy->sifs_period * NSEC_PER_USEC,
  115. HRTIMER_MODE_REL);
  116. } else {
  117. ieee802154_release_queue(local);
  118. }
  119. dev_consume_skb_any(skb);
  120. if (atomic_dec_and_test(&hw->phy->ongoing_txs))
  121. wake_up(&hw->phy->sync_txq);
  122. }
  123. EXPORT_SYMBOL(ieee802154_xmit_complete);
  124. void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb,
  125. int reason)
  126. {
  127. struct ieee802154_local *local = hw_to_local(hw);
  128. local->tx_result = reason;
  129. ieee802154_release_queue(local);
  130. dev_kfree_skb_any(skb);
  131. if (atomic_dec_and_test(&hw->phy->ongoing_txs))
  132. wake_up(&hw->phy->sync_txq);
  133. }
  134. EXPORT_SYMBOL(ieee802154_xmit_error);
  135. void ieee802154_xmit_hw_error(struct ieee802154_hw *hw, struct sk_buff *skb)
  136. {
  137. ieee802154_xmit_error(hw, skb, IEEE802154_SYSTEM_ERROR);
  138. }
  139. EXPORT_SYMBOL(ieee802154_xmit_hw_error);
  140. void ieee802154_stop_device(struct ieee802154_local *local)
  141. {
  142. flush_workqueue(local->workqueue);
  143. hrtimer_cancel(&local->ifs_timer);
  144. drv_stop(local);
  145. }