1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /****************************************************************************** |
3 | * |
4 | * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. |
5 | * |
6 | ******************************************************************************/ |
7 | #ifndef _RTW_XMIT_H_ |
8 | #define _RTW_XMIT_H_ |
9 | |
10 | #include <linux/completion.h> |
11 | |
12 | #define MAX_XMITBUF_SZ (20480) /* 20k */ |
13 | |
14 | #define NR_XMITBUFF (16) |
15 | |
16 | #define XMITBUF_ALIGN_SZ 512 |
17 | |
18 | /* xmit extension buff defination */ |
19 | #define MAX_XMIT_EXTBUF_SZ (1536) |
20 | #define NR_XMIT_EXTBUFF (32) |
21 | |
22 | #define MAX_CMDBUF_SZ (5120) /* 4096) */ |
23 | |
24 | #define MAX_NUMBLKS (1) |
25 | |
26 | #define XMIT_VO_QUEUE (0) |
27 | #define XMIT_VI_QUEUE (1) |
28 | #define XMIT_BE_QUEUE (2) |
29 | #define XMIT_BK_QUEUE (3) |
30 | |
31 | #define VO_QUEUE_INX 0 |
32 | #define VI_QUEUE_INX 1 |
33 | #define BE_QUEUE_INX 2 |
34 | #define BK_QUEUE_INX 3 |
35 | #define BCN_QUEUE_INX 4 |
36 | #define MGT_QUEUE_INX 5 |
37 | #define HIGH_QUEUE_INX 6 |
38 | #define TXCMD_QUEUE_INX 7 |
39 | |
40 | #define HW_QUEUE_ENTRY 8 |
41 | |
42 | #define WEP_IV(pattrib_iv, dot11txpn, keyidx)\ |
43 | do {\ |
44 | pattrib_iv[0] = dot11txpn._byte_.TSC0;\ |
45 | pattrib_iv[1] = dot11txpn._byte_.TSC1;\ |
46 | pattrib_iv[2] = dot11txpn._byte_.TSC2;\ |
47 | pattrib_iv[3] = ((keyidx & 0x3)<<6);\ |
48 | dot11txpn.val = (dot11txpn.val == 0xffffff) ? 0 : (dot11txpn.val + 1);\ |
49 | } while (0) |
50 | |
51 | |
52 | #define TKIP_IV(pattrib_iv, dot11txpn, keyidx)\ |
53 | do {\ |
54 | pattrib_iv[0] = dot11txpn._byte_.TSC1;\ |
55 | pattrib_iv[1] = (dot11txpn._byte_.TSC1 | 0x20) & 0x7f;\ |
56 | pattrib_iv[2] = dot11txpn._byte_.TSC0;\ |
57 | pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6);\ |
58 | pattrib_iv[4] = dot11txpn._byte_.TSC2;\ |
59 | pattrib_iv[5] = dot11txpn._byte_.TSC3;\ |
60 | pattrib_iv[6] = dot11txpn._byte_.TSC4;\ |
61 | pattrib_iv[7] = dot11txpn._byte_.TSC5;\ |
62 | dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : (dot11txpn.val + 1);\ |
63 | } while (0) |
64 | |
65 | #define AES_IV(pattrib_iv, dot11txpn, keyidx)\ |
66 | do {\ |
67 | pattrib_iv[0] = dot11txpn._byte_.TSC0;\ |
68 | pattrib_iv[1] = dot11txpn._byte_.TSC1;\ |
69 | pattrib_iv[2] = 0;\ |
70 | pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6);\ |
71 | pattrib_iv[4] = dot11txpn._byte_.TSC2;\ |
72 | pattrib_iv[5] = dot11txpn._byte_.TSC3;\ |
73 | pattrib_iv[6] = dot11txpn._byte_.TSC4;\ |
74 | pattrib_iv[7] = dot11txpn._byte_.TSC5;\ |
75 | dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : (dot11txpn.val + 1);\ |
76 | } while (0) |
77 | |
78 | |
79 | #define HWXMIT_ENTRY 4 |
80 | |
81 | /* For Buffer Descriptor ring architecture */ |
82 | #define TXDESC_SIZE 40 |
83 | |
84 | #define TXDESC_OFFSET TXDESC_SIZE |
85 | |
86 | #define TXDESC_40_BYTES |
87 | |
88 | struct tx_desc { |
89 | __le32 txdw0; |
90 | __le32 txdw1; |
91 | __le32 txdw2; |
92 | __le32 txdw3; |
93 | __le32 txdw4; |
94 | __le32 txdw5; |
95 | __le32 txdw6; |
96 | __le32 txdw7; |
97 | |
98 | #if defined(TXDESC_40_BYTES) || defined(TXDESC_64_BYTES) |
99 | __le32 txdw8; |
100 | __le32 txdw9; |
101 | #endif /* TXDESC_40_BYTES */ |
102 | |
103 | #ifdef TXDESC_64_BYTES |
104 | __le32 txdw10; |
105 | __le32 txdw11; |
106 | |
107 | /* 2008/05/15 MH Because PCIE HW memory R/W 4K limit. And now, our descriptor */ |
108 | /* size is 40 bytes. If you use more than 102 descriptor(103*40>4096), HW will execute */ |
109 | /* memoryR/W CRC error. And then all DMA fetch will fail. We must decrease descriptor */ |
110 | /* number or enlarge descriptor size as 64 bytes. */ |
111 | __le32 txdw12; |
112 | __le32 txdw13; |
113 | __le32 txdw14; |
114 | __le32 txdw15; |
115 | #endif |
116 | }; |
117 | |
118 | union txdesc { |
119 | struct tx_desc txdesc; |
120 | unsigned int value[TXDESC_SIZE>>2]; |
121 | }; |
122 | |
123 | struct hw_xmit { |
124 | /* spinlock_t xmit_lock; */ |
125 | /* struct list_head pending; */ |
126 | struct __queue *sta_queue; |
127 | /* struct hw_txqueue *phwtxqueue; */ |
128 | /* signed int txcmdcnt; */ |
129 | int accnt; |
130 | }; |
131 | |
132 | /* reduce size */ |
133 | struct pkt_attrib { |
134 | u8 type; |
135 | u8 subtype; |
136 | u8 bswenc; |
137 | u8 dhcp_pkt; |
138 | u16 ether_type; |
139 | u16 seqnum; |
140 | u16 pkt_hdrlen; /* the original 802.3 pkt header len */ |
141 | u16 hdrlen; /* the WLAN Header Len */ |
142 | u32 pktlen; /* the original 802.3 pkt raw_data len (not include ether_hdr data) */ |
143 | u32 last_txcmdsz; |
144 | u8 nr_frags; |
145 | u8 encrypt; /* when 0 indicates no encryption; when non-zero, indicates the encryption algorithm */ |
146 | u8 iv_len; |
147 | u8 icv_len; |
148 | u8 iv[18]; |
149 | u8 icv[16]; |
150 | u8 priority; |
151 | u8 ack_policy; |
152 | u8 mac_id; |
153 | u8 vcs_mode; /* virtual carrier sense method */ |
154 | u8 dst[ETH_ALEN]; |
155 | u8 src[ETH_ALEN]; |
156 | u8 ta[ETH_ALEN]; |
157 | u8 ra[ETH_ALEN]; |
158 | u8 key_idx; |
159 | u8 qos_en; |
160 | u8 ht_en; |
161 | u8 raid;/* rate adpative id */ |
162 | u8 bwmode; |
163 | u8 ch_offset;/* PRIME_CHNL_OFFSET */ |
164 | u8 sgi;/* short GI */ |
165 | u8 ampdu_en;/* tx ampdu enable */ |
166 | u8 ampdu_spacing; /* ampdu_min_spacing for peer sta's rx */ |
167 | u8 mdata;/* more data bit */ |
168 | u8 pctrl;/* per packet txdesc control enable */ |
169 | u8 triggered;/* for ap mode handling Power Saving sta */ |
170 | u8 qsel; |
171 | u8 order;/* order bit */ |
172 | u8 eosp; |
173 | u8 rate; |
174 | u8 intel_proxim; |
175 | u8 retry_ctrl; |
176 | u8 mbssid; |
177 | u8 ldpc; |
178 | u8 stbc; |
179 | struct sta_info *psta; |
180 | |
181 | u8 rtsen; |
182 | u8 cts2self; |
183 | union Keytype dot11tkiptxmickey; |
184 | /* union Keytype dot11tkiprxmickey; */ |
185 | union Keytype dot118021x_UncstKey; |
186 | |
187 | u8 icmp_pkt; |
188 | |
189 | }; |
190 | |
191 | #define WLANHDR_OFFSET 64 |
192 | |
193 | #define NULL_FRAMETAG (0x0) |
194 | #define DATA_FRAMETAG 0x01 |
195 | #define L2_FRAMETAG 0x02 |
196 | #define MGNT_FRAMETAG 0x03 |
197 | #define AMSDU_FRAMETAG 0x04 |
198 | |
199 | #define EII_FRAMETAG 0x05 |
200 | #define IEEE8023_FRAMETAG 0x06 |
201 | |
202 | #define MP_FRAMETAG 0x07 |
203 | |
204 | #define TXAGG_FRAMETAG 0x08 |
205 | |
206 | enum { |
207 | XMITBUF_DATA = 0, |
208 | XMITBUF_MGNT = 1, |
209 | XMITBUF_CMD = 2, |
210 | }; |
211 | |
212 | struct submit_ctx { |
213 | unsigned long submit_time; /* */ |
214 | u32 timeout_ms; /* <0: not synchronous, 0: wait forever, >0: up to ms waiting */ |
215 | int status; /* status for operation */ |
216 | struct completion done; |
217 | }; |
218 | |
219 | enum { |
220 | RTW_SCTX_SUBMITTED = -1, |
221 | RTW_SCTX_DONE_SUCCESS = 0, |
222 | RTW_SCTX_DONE_UNKNOWN, |
223 | RTW_SCTX_DONE_TIMEOUT, |
224 | RTW_SCTX_DONE_BUF_ALLOC, |
225 | RTW_SCTX_DONE_BUF_FREE, |
226 | RTW_SCTX_DONE_WRITE_PORT_ERR, |
227 | RTW_SCTX_DONE_TX_DESC_NA, |
228 | RTW_SCTX_DONE_TX_DENY, |
229 | RTW_SCTX_DONE_CCX_PKT_FAIL, |
230 | RTW_SCTX_DONE_DRV_STOP, |
231 | RTW_SCTX_DONE_DEV_REMOVE, |
232 | RTW_SCTX_DONE_CMD_ERROR, |
233 | }; |
234 | |
235 | |
236 | void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms); |
237 | int rtw_sctx_wait(struct submit_ctx *sctx); |
238 | void rtw_sctx_done_err(struct submit_ctx **sctx, int status); |
239 | void rtw_sctx_done(struct submit_ctx **sctx); |
240 | |
241 | struct xmit_buf { |
242 | struct list_head list; |
243 | |
244 | struct adapter *padapter; |
245 | |
246 | u8 *pallocated_buf; |
247 | |
248 | u8 *pbuf; |
249 | |
250 | void *priv_data; |
251 | |
252 | u16 buf_tag; /* 0: Normal xmitbuf, 1: extension xmitbuf, 2:cmd xmitbuf */ |
253 | u16 flags; |
254 | u32 alloc_sz; |
255 | |
256 | u32 len; |
257 | |
258 | struct submit_ctx *sctx; |
259 | |
260 | u8 *phead; |
261 | u8 *pdata; |
262 | u8 *ptail; |
263 | u8 *pend; |
264 | u32 ff_hwaddr; |
265 | u8 pg_num; |
266 | u8 agg_num; |
267 | |
268 | #if defined(DBG_XMIT_BUF) || defined(DBG_XMIT_BUF_EXT) |
269 | u8 no; |
270 | #endif |
271 | |
272 | }; |
273 | |
274 | |
275 | struct xmit_frame { |
276 | struct list_head list; |
277 | |
278 | struct pkt_attrib attrib; |
279 | |
280 | struct sk_buff *pkt; |
281 | |
282 | int frame_tag; |
283 | |
284 | struct adapter *padapter; |
285 | |
286 | u8 *buf_addr; |
287 | |
288 | struct xmit_buf *pxmitbuf; |
289 | |
290 | u8 pg_num; |
291 | u8 agg_num; |
292 | |
293 | u8 ack_report; |
294 | |
295 | u8 *alloc_addr; /* the actual address this xmitframe allocated */ |
296 | u8 ext_tag; /* 0:data, 1:mgmt */ |
297 | |
298 | }; |
299 | |
300 | struct tx_servq { |
301 | struct list_head tx_pending; |
302 | struct __queue sta_pending; |
303 | int qcnt; |
304 | }; |
305 | |
306 | |
307 | struct sta_xmit_priv { |
308 | spinlock_t lock; |
309 | signed int option; |
310 | signed int apsd_setting; /* When bit mask is on, the associated edca queue supports APSD. */ |
311 | |
312 | |
313 | /* struct tx_servq blk_q[MAX_NUMBLKS]; */ |
314 | struct tx_servq be_q; /* priority == 0, 3 */ |
315 | struct tx_servq bk_q; /* priority == 1, 2 */ |
316 | struct tx_servq vi_q; /* priority == 4, 5 */ |
317 | struct tx_servq vo_q; /* priority == 6, 7 */ |
318 | struct list_head legacy_dz; |
319 | struct list_head apsd; |
320 | |
321 | u16 txseq_tid[16]; |
322 | |
323 | /* uint sta_tx_bytes; */ |
324 | /* u64 sta_tx_pkts; */ |
325 | /* uint sta_tx_fail; */ |
326 | |
327 | |
328 | }; |
329 | |
330 | |
331 | struct hw_txqueue { |
332 | volatile signed int head; |
333 | volatile signed int tail; |
334 | volatile signed int free_sz; /* in units of 64 bytes */ |
335 | volatile signed int free_cmdsz; |
336 | volatile signed int txsz[8]; |
337 | uint ff_hwaddr; |
338 | uint cmd_hwaddr; |
339 | signed int ac_tag; |
340 | }; |
341 | |
342 | enum cmdbuf_type { |
343 | CMDBUF_BEACON = 0x00, |
344 | CMDBUF_RSVD, |
345 | CMDBUF_MAX |
346 | }; |
347 | |
348 | struct xmit_priv { |
349 | |
350 | spinlock_t lock; |
351 | |
352 | struct completion xmit_comp; |
353 | struct completion terminate_xmitthread_comp; |
354 | |
355 | /* struct __queue blk_strms[MAX_NUMBLKS]; */ |
356 | struct __queue be_pending; |
357 | struct __queue bk_pending; |
358 | struct __queue vi_pending; |
359 | struct __queue vo_pending; |
360 | struct __queue bm_pending; |
361 | |
362 | /* struct __queue legacy_dz_queue; */ |
363 | /* struct __queue apsd_queue; */ |
364 | |
365 | u8 *pallocated_frame_buf; |
366 | u8 *pxmit_frame_buf; |
367 | uint free_xmitframe_cnt; |
368 | struct __queue free_xmit_queue; |
369 | |
370 | /* uint mapping_addr; */ |
371 | /* uint pkt_sz; */ |
372 | |
373 | u8 *xframe_ext_alloc_addr; |
374 | u8 *xframe_ext; |
375 | uint free_xframe_ext_cnt; |
376 | struct __queue free_xframe_ext_queue; |
377 | |
378 | /* struct hw_txqueue be_txqueue; */ |
379 | /* struct hw_txqueue bk_txqueue; */ |
380 | /* struct hw_txqueue vi_txqueue; */ |
381 | /* struct hw_txqueue vo_txqueue; */ |
382 | /* struct hw_txqueue bmc_txqueue; */ |
383 | |
384 | uint frag_len; |
385 | |
386 | struct adapter *adapter; |
387 | |
388 | u8 vcs_setting; |
389 | u8 vcs; |
390 | u8 vcs_type; |
391 | /* u16 rts_thresh; */ |
392 | |
393 | u64 tx_bytes; |
394 | u64 tx_pkts; |
395 | u64 tx_drop; |
396 | u64 last_tx_pkts; |
397 | |
398 | struct hw_xmit *hwxmits; |
399 | u8 hwxmit_entry; |
400 | |
401 | u8 wmm_para_seq[4];/* sequence for wmm ac parameter strength from large to small. it's value is 0->vo, 1->vi, 2->be, 3->bk. */ |
402 | |
403 | void *SdioXmitThread; |
404 | struct completion SdioXmitStart; |
405 | struct completion SdioXmitTerminate; |
406 | |
407 | struct __queue free_xmitbuf_queue; |
408 | struct __queue pending_xmitbuf_queue; |
409 | u8 *pallocated_xmitbuf; |
410 | u8 *pxmitbuf; |
411 | uint free_xmitbuf_cnt; |
412 | |
413 | struct __queue free_xmit_extbuf_queue; |
414 | u8 *pallocated_xmit_extbuf; |
415 | u8 *pxmit_extbuf; |
416 | uint free_xmit_extbuf_cnt; |
417 | |
418 | struct xmit_buf pcmd_xmitbuf[CMDBUF_MAX]; |
419 | |
420 | u16 nqos_ssn; |
421 | |
422 | int ack_tx; |
423 | struct mutex ack_tx_mutex; |
424 | struct submit_ctx ack_tx_ops; |
425 | u8 seq_no; |
426 | spinlock_t lock_sctx; |
427 | }; |
428 | |
429 | extern struct xmit_frame *__rtw_alloc_cmdxmitframe(struct xmit_priv *pxmitpriv, |
430 | enum cmdbuf_type buf_type); |
431 | #define rtw_alloc_cmdxmitframe(p) __rtw_alloc_cmdxmitframe(p, CMDBUF_RSVD) |
432 | #define rtw_alloc_bcnxmitframe(p) __rtw_alloc_cmdxmitframe(p, CMDBUF_BEACON) |
433 | |
434 | extern struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv); |
435 | extern s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf); |
436 | |
437 | extern struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv); |
438 | extern s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf); |
439 | |
440 | void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, int sz); |
441 | extern void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len); |
442 | extern s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattrib); |
443 | extern s32 rtw_put_snap(u8 *data, u16 h_proto); |
444 | |
445 | extern struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv); |
446 | struct xmit_frame *rtw_alloc_xmitframe_ext(struct xmit_priv *pxmitpriv); |
447 | struct xmit_frame *rtw_alloc_xmitframe_once(struct xmit_priv *pxmitpriv); |
448 | extern s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe); |
449 | extern void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pframequeue); |
450 | struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *psta, signed int up, u8 *ac); |
451 | extern s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe); |
452 | |
453 | extern s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe); |
454 | extern u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib); |
455 | #define rtw_wlan_pkt_size(f) rtw_calculate_wlan_pkt_size_by_attribue(&f->attrib) |
456 | extern s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe); |
457 | extern s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe); |
458 | s32 _rtw_init_hw_txqueue(struct hw_txqueue *phw_txqueue, u8 ac_tag); |
459 | void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv); |
460 | |
461 | |
462 | s32 rtw_txframes_pending(struct adapter *padapter); |
463 | void rtw_init_hwxmits(struct hw_xmit *phwxmit, signed int entry); |
464 | |
465 | |
466 | s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); |
467 | void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv); |
468 | |
469 | |
470 | s32 rtw_alloc_hwxmits(struct adapter *padapter); |
471 | void rtw_free_hwxmits(struct adapter *padapter); |
472 | |
473 | |
474 | s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt); |
475 | bool xmitframe_hiq_filter(struct xmit_frame *xmitframe); |
476 | |
477 | signed int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe); |
478 | void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta); |
479 | void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta); |
480 | void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta); |
481 | |
482 | u8 query_ra_short_GI(struct sta_info *psta); |
483 | |
484 | u8 qos_acm(u8 acm_mask, u8 priority); |
485 | |
486 | void enqueue_pending_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf); |
487 | void enqueue_pending_xmitbuf_to_head(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf); |
488 | struct xmit_buf *dequeue_pending_xmitbuf(struct xmit_priv *pxmitpriv); |
489 | struct xmit_buf *dequeue_pending_xmitbuf_under_survey(struct xmit_priv *pxmitpriv); |
490 | signed int check_pending_xmitbuf(struct xmit_priv *pxmitpriv); |
491 | int rtw_xmit_thread(void *context); |
492 | |
493 | u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe); |
494 | |
495 | int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms); |
496 | void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status); |
497 | |
498 | /* include after declaring struct xmit_buf, in order to avoid warning */ |
499 | #include <xmit_osdep.h> |
500 | |
501 | #endif /* _RTL871X_XMIT_H_ */ |
502 | |