1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * |
4 | * Authors: |
5 | * Alexander Aring <aar@pengutronix.de> |
6 | * |
7 | * Based on: net/mac80211/util.c |
8 | */ |
9 | |
10 | #include "ieee802154_i.h" |
11 | #include "driver-ops.h" |
12 | |
13 | /* privid for wpan_phys to determine whether they belong to us or not */ |
14 | const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid; |
15 | |
16 | /** |
17 | * ieee802154_wake_queue - wake ieee802154 queue |
18 | * @hw: main hardware object |
19 | * |
20 | * Tranceivers usually have either one transmit framebuffer or one framebuffer |
21 | * for both transmitting and receiving. Hence, the core currently only handles |
22 | * one frame at a time for each phy, which means we had to stop the queue to |
23 | * avoid new skb to come during the transmission. The queue then needs to be |
24 | * woken up after the operation. |
25 | */ |
26 | static void ieee802154_wake_queue(struct ieee802154_hw *hw) |
27 | { |
28 | struct ieee802154_local *local = hw_to_local(hw); |
29 | struct ieee802154_sub_if_data *sdata; |
30 | |
31 | rcu_read_lock(); |
32 | clear_bit(nr: WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, addr: &local->phy->flags); |
33 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
34 | if (!sdata->dev) |
35 | continue; |
36 | |
37 | netif_wake_queue(dev: sdata->dev); |
38 | } |
39 | rcu_read_unlock(); |
40 | } |
41 | |
42 | /** |
43 | * ieee802154_stop_queue - stop ieee802154 queue |
44 | * @hw: main hardware object |
45 | * |
46 | * Tranceivers usually have either one transmit framebuffer or one framebuffer |
47 | * for both transmitting and receiving. Hence, the core currently only handles |
48 | * one frame at a time for each phy, which means we need to tell upper layers to |
49 | * stop giving us new skbs while we are busy with the transmitted one. The queue |
50 | * must then be stopped before transmitting. |
51 | */ |
52 | static void ieee802154_stop_queue(struct ieee802154_hw *hw) |
53 | { |
54 | struct ieee802154_local *local = hw_to_local(hw); |
55 | struct ieee802154_sub_if_data *sdata; |
56 | |
57 | rcu_read_lock(); |
58 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
59 | if (!sdata->dev) |
60 | continue; |
61 | |
62 | netif_stop_queue(dev: sdata->dev); |
63 | } |
64 | rcu_read_unlock(); |
65 | } |
66 | |
67 | void ieee802154_hold_queue(struct ieee802154_local *local) |
68 | { |
69 | unsigned long flags; |
70 | |
71 | spin_lock_irqsave(&local->phy->queue_lock, flags); |
72 | if (!atomic_fetch_inc(v: &local->phy->hold_txs)) |
73 | ieee802154_stop_queue(hw: &local->hw); |
74 | spin_unlock_irqrestore(lock: &local->phy->queue_lock, flags); |
75 | } |
76 | |
77 | void ieee802154_release_queue(struct ieee802154_local *local) |
78 | { |
79 | unsigned long flags; |
80 | |
81 | spin_lock_irqsave(&local->phy->queue_lock, flags); |
82 | if (atomic_dec_and_test(v: &local->phy->hold_txs)) |
83 | ieee802154_wake_queue(hw: &local->hw); |
84 | spin_unlock_irqrestore(lock: &local->phy->queue_lock, flags); |
85 | } |
86 | |
87 | void ieee802154_disable_queue(struct ieee802154_local *local) |
88 | { |
89 | struct ieee802154_sub_if_data *sdata; |
90 | |
91 | rcu_read_lock(); |
92 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
93 | if (!sdata->dev) |
94 | continue; |
95 | |
96 | netif_tx_disable(dev: sdata->dev); |
97 | } |
98 | rcu_read_unlock(); |
99 | } |
100 | |
101 | enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer) |
102 | { |
103 | struct ieee802154_local *local = |
104 | container_of(timer, struct ieee802154_local, ifs_timer); |
105 | |
106 | ieee802154_release_queue(local); |
107 | |
108 | return HRTIMER_NORESTART; |
109 | } |
110 | |
111 | void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, |
112 | bool ifs_handling) |
113 | { |
114 | struct ieee802154_local *local = hw_to_local(hw); |
115 | |
116 | local->tx_result = IEEE802154_SUCCESS; |
117 | |
118 | if (ifs_handling) { |
119 | u8 max_sifs_size; |
120 | |
121 | /* If transceiver sets CRC on his own we need to use lifs |
122 | * threshold len above 16 otherwise 18, because it's not |
123 | * part of skb->len. |
124 | */ |
125 | if (hw->flags & IEEE802154_HW_TX_OMIT_CKSUM) |
126 | max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE - |
127 | IEEE802154_FCS_LEN; |
128 | else |
129 | max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE; |
130 | |
131 | if (skb->len > max_sifs_size) |
132 | hrtimer_start(timer: &local->ifs_timer, |
133 | tim: hw->phy->lifs_period * NSEC_PER_USEC, |
134 | mode: HRTIMER_MODE_REL); |
135 | else |
136 | hrtimer_start(timer: &local->ifs_timer, |
137 | tim: hw->phy->sifs_period * NSEC_PER_USEC, |
138 | mode: HRTIMER_MODE_REL); |
139 | } else { |
140 | ieee802154_release_queue(local); |
141 | } |
142 | |
143 | dev_consume_skb_any(skb); |
144 | if (atomic_dec_and_test(v: &hw->phy->ongoing_txs)) |
145 | wake_up(&hw->phy->sync_txq); |
146 | } |
147 | EXPORT_SYMBOL(ieee802154_xmit_complete); |
148 | |
149 | void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb, |
150 | int reason) |
151 | { |
152 | struct ieee802154_local *local = hw_to_local(hw); |
153 | |
154 | local->tx_result = reason; |
155 | ieee802154_release_queue(local); |
156 | dev_kfree_skb_any(skb); |
157 | if (atomic_dec_and_test(v: &hw->phy->ongoing_txs)) |
158 | wake_up(&hw->phy->sync_txq); |
159 | } |
160 | EXPORT_SYMBOL(ieee802154_xmit_error); |
161 | |
162 | void ieee802154_xmit_hw_error(struct ieee802154_hw *hw, struct sk_buff *skb) |
163 | { |
164 | ieee802154_xmit_error(hw, skb, IEEE802154_SYSTEM_ERROR); |
165 | } |
166 | EXPORT_SYMBOL(ieee802154_xmit_hw_error); |
167 | |
168 | void ieee802154_stop_device(struct ieee802154_local *local) |
169 | { |
170 | flush_workqueue(local->workqueue); |
171 | hrtimer_cancel(timer: &local->ifs_timer); |
172 | drv_stop(local); |
173 | } |
174 | |