1 | // SPDX-License-Identifier: ISC |
2 | /* |
3 | * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> |
4 | */ |
5 | |
6 | #include "mt76x02_usb.h" |
7 | |
8 | static void mt76x02u_remove_dma_hdr(struct sk_buff *skb) |
9 | { |
10 | int hdr_len; |
11 | |
12 | skb_pull(skb, len: sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN); |
13 | hdr_len = ieee80211_get_hdrlen_from_skb(skb); |
14 | if (hdr_len % 4) |
15 | mt76x02_remove_hdr_pad(skb, len: 2); |
16 | } |
17 | |
18 | void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) |
19 | { |
20 | mt76x02u_remove_dma_hdr(skb: e->skb); |
21 | mt76_tx_complete_skb(dev: mdev, wcid: e->wcid, skb: e->skb); |
22 | } |
23 | EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb); |
24 | |
25 | int mt76x02u_mac_start(struct mt76x02_dev *dev) |
26 | { |
27 | mt76x02_mac_reset_counters(dev); |
28 | |
29 | mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); |
30 | if (!mt76x02_wait_for_wpdma(dev: &dev->mt76, timeout: 200000)) |
31 | return -ETIMEDOUT; |
32 | |
33 | mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); |
34 | |
35 | mt76_wr(dev, MT_MAC_SYS_CTRL, |
36 | MT_MAC_SYS_CTRL_ENABLE_TX | |
37 | MT_MAC_SYS_CTRL_ENABLE_RX); |
38 | |
39 | if (!mt76x02_wait_for_wpdma(dev: &dev->mt76, timeout: 50)) |
40 | return -ETIMEDOUT; |
41 | |
42 | return 0; |
43 | } |
44 | EXPORT_SYMBOL_GPL(mt76x02u_mac_start); |
45 | |
46 | int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) |
47 | { |
48 | u32 info, pad; |
49 | |
50 | /* Buffer layout: |
51 | * | 4B | xfer len | pad | 4B | |
52 | * | TXINFO | pkt/cmd | zero pad to 4B | zero | |
53 | * |
54 | * length field of TXINFO should be set to 'xfer len'. |
55 | */ |
56 | info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) | |
57 | FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags; |
58 | put_unaligned_le32(val: info, p: skb_push(skb, len: sizeof(info))); |
59 | |
60 | pad = round_up(skb->len, 4) + 4 - skb->len; |
61 | return mt76_skb_adjust_pad(skb, pad); |
62 | } |
63 | |
64 | int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, |
65 | enum mt76_txq_id qid, struct mt76_wcid *wcid, |
66 | struct ieee80211_sta *sta, |
67 | struct mt76_tx_info *tx_info) |
68 | { |
69 | struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); |
70 | int pid, len = tx_info->skb->len, ep = dev->mphy.q_tx[qid]->ep; |
71 | struct mt76x02_txwi *txwi; |
72 | bool ampdu = IEEE80211_SKB_CB(skb: tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU; |
73 | enum mt76_qsel qsel; |
74 | u32 flags; |
75 | int err; |
76 | |
77 | mt76_insert_hdr_pad(skb: tx_info->skb); |
78 | |
79 | txwi = (struct mt76x02_txwi *)(tx_info->skb->data - sizeof(*txwi)); |
80 | mt76x02_mac_write_txwi(dev, txwi, skb: tx_info->skb, wcid, sta, len); |
81 | skb_push(skb: tx_info->skb, len: sizeof(*txwi)); |
82 | |
83 | pid = mt76_tx_status_skb_add(dev: mdev, wcid, skb: tx_info->skb); |
84 | |
85 | /* encode packet rate for no-skb packet id to fix up status reporting */ |
86 | if (pid == MT_PACKET_ID_NO_SKB) |
87 | pid = MT_PACKET_ID_HAS_RATE | |
88 | (le16_to_cpu(txwi->rate) & MT_PKTID_RATE) | |
89 | FIELD_PREP(MT_PKTID_AC, |
90 | skb_get_queue_mapping(tx_info->skb)); |
91 | |
92 | txwi->pktid = pid; |
93 | |
94 | if ((mt76_is_skb_pktid(pktid: pid) && ampdu) || ep == MT_EP_OUT_HCCA) |
95 | qsel = MT_QSEL_MGMT; |
96 | else |
97 | qsel = MT_QSEL_EDCA; |
98 | |
99 | flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | |
100 | MT_TXD_INFO_80211; |
101 | if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv) |
102 | flags |= MT_TXD_INFO_WIV; |
103 | |
104 | if (sta) { |
105 | struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv; |
106 | |
107 | ewma_pktlen_add(e: &msta->pktlen, val: tx_info->skb->len); |
108 | } |
109 | |
110 | err = mt76x02u_skb_dma_info(skb: tx_info->skb, port: WLAN_PORT, flags); |
111 | if (err && wcid) |
112 | /* Release pktid in case of error. */ |
113 | idr_remove(&wcid->pktid, id: pid); |
114 | |
115 | return err; |
116 | } |
117 | EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb); |
118 | |
119 | /* Trigger pre-TBTT event 8 ms before TBTT */ |
120 | #define PRE_TBTT_USEC 8000 |
121 | |
122 | /* Beacon SRAM memory is limited to 8kB. We need to send PS buffered frames |
123 | * (which can be 1500 bytes big) via beacon memory. That make limit of number |
124 | * of slots to 5. TODO: dynamically calculate offsets in beacon SRAM. |
125 | */ |
126 | #define N_BCN_SLOTS 5 |
127 | |
128 | static void mt76x02u_start_pre_tbtt_timer(struct mt76x02_dev *dev) |
129 | { |
130 | u64 time; |
131 | u32 tbtt; |
132 | |
133 | /* Get remaining TBTT in usec */ |
134 | tbtt = mt76_get_field(dev, MT_TBTT_TIMER, MT_TBTT_TIMER_VAL); |
135 | tbtt *= 32; |
136 | |
137 | if (tbtt <= PRE_TBTT_USEC) { |
138 | queue_work(wq: system_highpri_wq, work: &dev->pre_tbtt_work); |
139 | return; |
140 | } |
141 | |
142 | time = (tbtt - PRE_TBTT_USEC) * 1000ull; |
143 | hrtimer_start(timer: &dev->pre_tbtt_timer, tim: time, mode: HRTIMER_MODE_REL); |
144 | } |
145 | |
146 | static void mt76x02u_restart_pre_tbtt_timer(struct mt76x02_dev *dev) |
147 | { |
148 | u32 tbtt, dw0, dw1; |
149 | u64 tsf, time; |
150 | |
151 | /* Get remaining TBTT in usec */ |
152 | tbtt = mt76_get_field(dev, MT_TBTT_TIMER, MT_TBTT_TIMER_VAL); |
153 | tbtt *= 32; |
154 | |
155 | dw0 = mt76_rr(dev, MT_TSF_TIMER_DW0); |
156 | dw1 = mt76_rr(dev, MT_TSF_TIMER_DW1); |
157 | tsf = (u64)dw0 << 32 | dw1; |
158 | dev_dbg(dev->mt76.dev, "TSF: %llu us TBTT %u us\n" , tsf, tbtt); |
159 | |
160 | /* Convert beacon interval in TU (1024 usec) to nsec */ |
161 | time = ((1000000000ull * dev->mt76.beacon_int) >> 10); |
162 | |
163 | /* Adjust time to trigger hrtimer 8ms before TBTT */ |
164 | if (tbtt < PRE_TBTT_USEC) |
165 | time -= (PRE_TBTT_USEC - tbtt) * 1000ull; |
166 | else |
167 | time += (tbtt - PRE_TBTT_USEC) * 1000ull; |
168 | |
169 | hrtimer_start(timer: &dev->pre_tbtt_timer, tim: time, mode: HRTIMER_MODE_REL); |
170 | } |
171 | |
172 | static void mt76x02u_stop_pre_tbtt_timer(struct mt76x02_dev *dev) |
173 | { |
174 | do { |
175 | hrtimer_cancel(timer: &dev->pre_tbtt_timer); |
176 | cancel_work_sync(work: &dev->pre_tbtt_work); |
177 | /* Timer can be rearmed by work. */ |
178 | } while (hrtimer_active(timer: &dev->pre_tbtt_timer)); |
179 | } |
180 | |
181 | static void mt76x02u_pre_tbtt_work(struct work_struct *work) |
182 | { |
183 | struct mt76x02_dev *dev = |
184 | container_of(work, struct mt76x02_dev, pre_tbtt_work); |
185 | struct beacon_bc_data data = { |
186 | .dev = dev, |
187 | }; |
188 | struct sk_buff *skb; |
189 | int nbeacons; |
190 | |
191 | if (!dev->mt76.beacon_mask) |
192 | return; |
193 | |
194 | if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL) |
195 | return; |
196 | |
197 | __skb_queue_head_init(list: &data.q); |
198 | |
199 | mt76x02_resync_beacon_timer(dev); |
200 | |
201 | /* Prevent corrupt transmissions during update */ |
202 | mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff); |
203 | dev->beacon_data_count = 0; |
204 | |
205 | ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), |
206 | iter_flags: IEEE80211_IFACE_ITER_RESUME_ALL, |
207 | iterator: mt76x02_update_beacon_iter, data: &data); |
208 | |
209 | while ((skb = __skb_dequeue(list: &data.q)) != NULL) |
210 | mt76x02_mac_set_beacon(dev, skb); |
211 | |
212 | mt76_csa_check(dev: &dev->mt76); |
213 | |
214 | if (dev->mt76.csa_complete) { |
215 | mt76_csa_finish(dev: &dev->mt76); |
216 | goto out; |
217 | } |
218 | |
219 | nbeacons = hweight8(dev->mt76.beacon_mask); |
220 | mt76x02_enqueue_buffered_bc(dev, data: &data, N_BCN_SLOTS - nbeacons); |
221 | |
222 | while ((skb = __skb_dequeue(list: &data.q)) != NULL) |
223 | mt76x02_mac_set_beacon(dev, skb); |
224 | |
225 | out: |
226 | mt76_wr(dev, MT_BCN_BYPASS_MASK, |
227 | 0xff00 | ~(0xff00 >> dev->beacon_data_count)); |
228 | |
229 | mt76x02u_restart_pre_tbtt_timer(dev); |
230 | } |
231 | |
232 | static enum hrtimer_restart mt76x02u_pre_tbtt_interrupt(struct hrtimer *timer) |
233 | { |
234 | struct mt76x02_dev *dev = |
235 | container_of(timer, struct mt76x02_dev, pre_tbtt_timer); |
236 | |
237 | queue_work(wq: system_highpri_wq, work: &dev->pre_tbtt_work); |
238 | |
239 | return HRTIMER_NORESTART; |
240 | } |
241 | |
242 | static void mt76x02u_pre_tbtt_enable(struct mt76x02_dev *dev, bool en) |
243 | { |
244 | if (en && dev->mt76.beacon_mask && |
245 | !hrtimer_active(timer: &dev->pre_tbtt_timer)) |
246 | mt76x02u_start_pre_tbtt_timer(dev); |
247 | if (!en) |
248 | mt76x02u_stop_pre_tbtt_timer(dev); |
249 | } |
250 | |
251 | static void mt76x02u_beacon_enable(struct mt76x02_dev *dev, bool en) |
252 | { |
253 | if (WARN_ON_ONCE(!dev->mt76.beacon_int)) |
254 | return; |
255 | |
256 | if (en) |
257 | mt76x02u_start_pre_tbtt_timer(dev); |
258 | } |
259 | |
260 | void mt76x02u_init_beacon_config(struct mt76x02_dev *dev) |
261 | { |
262 | static const struct mt76x02_beacon_ops beacon_ops = { |
263 | .nslots = N_BCN_SLOTS, |
264 | .slot_size = (8192 / N_BCN_SLOTS) & ~63, |
265 | .pre_tbtt_enable = mt76x02u_pre_tbtt_enable, |
266 | .beacon_enable = mt76x02u_beacon_enable, |
267 | }; |
268 | dev->beacon_ops = &beacon_ops; |
269 | |
270 | hrtimer_init(timer: &dev->pre_tbtt_timer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); |
271 | dev->pre_tbtt_timer.function = mt76x02u_pre_tbtt_interrupt; |
272 | INIT_WORK(&dev->pre_tbtt_work, mt76x02u_pre_tbtt_work); |
273 | |
274 | mt76x02_init_beacon_config(dev); |
275 | } |
276 | EXPORT_SYMBOL_GPL(mt76x02u_init_beacon_config); |
277 | |
278 | void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev) |
279 | { |
280 | if (!test_bit(MT76_REMOVED, &dev->mphy.state)) |
281 | mt76_clear(dev, MT_BEACON_TIME_CFG, |
282 | MT_BEACON_TIME_CFG_TIMER_EN | |
283 | MT_BEACON_TIME_CFG_SYNC_MODE | |
284 | MT_BEACON_TIME_CFG_TBTT_EN | |
285 | MT_BEACON_TIME_CFG_BEACON_TX); |
286 | |
287 | mt76x02u_stop_pre_tbtt_timer(dev); |
288 | } |
289 | EXPORT_SYMBOL_GPL(mt76x02u_exit_beacon_config); |
290 | |