1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause |
2 | /* Copyright(c) 2022-2023 Realtek Corporation |
3 | */ |
4 | |
5 | #include "coex.h" |
6 | #include "debug.h" |
7 | #include "mac.h" |
8 | #include "phy.h" |
9 | #include "reg.h" |
10 | #include "rtw8851b.h" |
11 | #include "rtw8851b_rfk.h" |
12 | #include "rtw8851b_rfk_table.h" |
13 | #include "rtw8851b_table.h" |
14 | |
15 | #define DPK_VER_8851B 0x5 |
16 | #define DPK_KIP_REG_NUM_8851B 7 |
17 | #define DPK_RF_REG_NUM_8851B 4 |
18 | #define DPK_KSET_NUM 4 |
19 | #define RTW8851B_RXK_GROUP_NR 4 |
20 | #define RTW8851B_RXK_GROUP_IDX_NR 2 |
21 | #define RTW8851B_TXK_GROUP_NR 1 |
22 | #define RTW8851B_IQK_VER 0x2a |
23 | #define RTW8851B_IQK_SS 1 |
24 | #define RTW8851B_LOK_GRAM 10 |
25 | #define RTW8851B_TSSI_PATH_NR 1 |
26 | |
27 | #define _TSSI_DE_MASK GENMASK(21, 12) |
28 | |
29 | enum dpk_id { |
30 | LBK_RXIQK = 0x06, |
31 | SYNC = 0x10, |
32 | MDPK_IDL = 0x11, |
33 | MDPK_MPA = 0x12, |
34 | GAIN_LOSS = 0x13, |
35 | GAIN_CAL = 0x14, |
36 | DPK_RXAGC = 0x15, |
37 | KIP_PRESET = 0x16, |
38 | KIP_RESTORE = 0x17, |
39 | DPK_TXAGC = 0x19, |
40 | D_KIP_PRESET = 0x28, |
41 | D_TXAGC = 0x29, |
42 | D_RXAGC = 0x2a, |
43 | D_SYNC = 0x2b, |
44 | D_GAIN_LOSS = 0x2c, |
45 | D_MDPK_IDL = 0x2d, |
46 | D_MDPK_LDL = 0x2e, |
47 | D_GAIN_NORM = 0x2f, |
48 | D_KIP_THERMAL = 0x30, |
49 | D_KIP_RESTORE = 0x31 |
50 | }; |
51 | |
52 | enum dpk_agc_step { |
53 | DPK_AGC_STEP_SYNC_DGAIN, |
54 | DPK_AGC_STEP_GAIN_LOSS_IDX, |
55 | DPK_AGC_STEP_GL_GT_CRITERION, |
56 | DPK_AGC_STEP_GL_LT_CRITERION, |
57 | DPK_AGC_STEP_SET_TX_GAIN, |
58 | }; |
59 | |
60 | enum rtw8851b_iqk_type { |
61 | ID_TXAGC = 0x0, |
62 | ID_FLOK_COARSE = 0x1, |
63 | ID_FLOK_FINE = 0x2, |
64 | ID_TXK = 0x3, |
65 | ID_RXAGC = 0x4, |
66 | ID_RXK = 0x5, |
67 | ID_NBTXK = 0x6, |
68 | ID_NBRXK = 0x7, |
69 | ID_FLOK_VBUFFER = 0x8, |
70 | ID_A_FLOK_COARSE = 0x9, |
71 | ID_G_FLOK_COARSE = 0xa, |
72 | ID_A_FLOK_FINE = 0xb, |
73 | ID_G_FLOK_FINE = 0xc, |
74 | ID_IQK_RESTORE = 0x10, |
75 | }; |
76 | |
77 | enum rf_mode { |
78 | RF_SHUT_DOWN = 0x0, |
79 | RF_STANDBY = 0x1, |
80 | RF_TX = 0x2, |
81 | RF_RX = 0x3, |
82 | RF_TXIQK = 0x4, |
83 | RF_DPK = 0x5, |
84 | RF_RXK1 = 0x6, |
85 | RF_RXK2 = 0x7, |
86 | }; |
87 | |
88 | static const u32 _tssi_de_cck_long[RF_PATH_NUM_8851B] = {0x5858}; |
89 | static const u32 _tssi_de_cck_short[RF_PATH_NUM_8851B] = {0x5860}; |
90 | static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8851B] = {0x5838}; |
91 | static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8851B] = {0x5840}; |
92 | static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8851B] = {0x5848}; |
93 | static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8851B] = {0x5850}; |
94 | static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8851B] = {0x5828}; |
95 | static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8851B] = {0x5830}; |
96 | static const u32 g_idxrxgain[RTW8851B_RXK_GROUP_NR] = {0x10e, 0x116, 0x28e, 0x296}; |
97 | static const u32 g_idxattc2[RTW8851B_RXK_GROUP_NR] = {0x0, 0xf, 0x0, 0xf}; |
98 | static const u32 g_idxrxagc[RTW8851B_RXK_GROUP_NR] = {0x0, 0x1, 0x2, 0x3}; |
99 | static const u32 a_idxrxgain[RTW8851B_RXK_GROUP_IDX_NR] = {0x10C, 0x28c}; |
100 | static const u32 a_idxattc2[RTW8851B_RXK_GROUP_IDX_NR] = {0xf, 0xf}; |
101 | static const u32 a_idxrxagc[RTW8851B_RXK_GROUP_IDX_NR] = {0x4, 0x6}; |
102 | static const u32 a_power_range[RTW8851B_TXK_GROUP_NR] = {0x0}; |
103 | static const u32 a_track_range[RTW8851B_TXK_GROUP_NR] = {0x6}; |
104 | static const u32 a_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x0a}; |
105 | static const u32 a_itqt[RTW8851B_TXK_GROUP_NR] = {0x12}; |
106 | static const u32 g_power_range[RTW8851B_TXK_GROUP_NR] = {0x0}; |
107 | static const u32 g_track_range[RTW8851B_TXK_GROUP_NR] = {0x6}; |
108 | static const u32 g_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x10}; |
109 | static const u32 g_itqt[RTW8851B_TXK_GROUP_NR] = {0x12}; |
110 | |
111 | static const u32 rtw8851b_backup_bb_regs[] = {0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec, 0xc0e8}; |
112 | static const u32 rtw8851b_backup_rf_regs[] = { |
113 | 0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5}; |
114 | |
115 | #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8851b_backup_bb_regs) |
116 | #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8851b_backup_rf_regs) |
117 | |
118 | static const u32 dpk_kip_reg[DPK_KIP_REG_NUM_8851B] = { |
119 | 0x813c, 0x8124, 0xc0ec, 0xc0e8, 0xc0c4, 0xc0d4, 0xc0d8}; |
120 | static const u32 dpk_rf_reg[DPK_RF_REG_NUM_8851B] = {0xde, 0x8f, 0x5, 0x10005}; |
121 | |
122 | static void _set_ch(struct rtw89_dev *rtwdev, u32 val); |
123 | |
124 | static u8 _rxk_5ghz_group_from_idx(u8 idx) |
125 | { |
126 | /* There are four RXK groups (RTW8851B_RXK_GROUP_NR), but only group 0 |
127 | * and 2 are used in 5 GHz band, so reduce elements to 2. |
128 | */ |
129 | if (idx < RTW8851B_RXK_GROUP_IDX_NR) |
130 | return idx * 2; |
131 | |
132 | return 0; |
133 | } |
134 | |
135 | static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) |
136 | { |
137 | return RF_A; |
138 | } |
139 | |
140 | static void _adc_fifo_rst(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, |
141 | u8 path) |
142 | { |
143 | rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, data: 0x0101); |
144 | fsleep(usecs: 10); |
145 | rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, data: 0x1111); |
146 | } |
147 | |
148 | static void _rfk_rf_direct_cntrl(struct rtw89_dev *rtwdev, |
149 | enum rtw89_rf_path path, bool is_bybb) |
150 | { |
151 | if (is_bybb) |
152 | rtw89_write_rf(rtwdev, rf_path: path, RR_RSV1, RR_RSV1_RST, data: 0x1); |
153 | else |
154 | rtw89_write_rf(rtwdev, rf_path: path, RR_RSV1, RR_RSV1_RST, data: 0x0); |
155 | } |
156 | |
157 | static void _rfk_drf_direct_cntrl(struct rtw89_dev *rtwdev, |
158 | enum rtw89_rf_path path, bool is_bybb) |
159 | { |
160 | if (is_bybb) |
161 | rtw89_write_rf(rtwdev, rf_path: path, RR_BBDC, RR_BBDC_SEL, data: 0x1); |
162 | else |
163 | rtw89_write_rf(rtwdev, rf_path: path, RR_BBDC, RR_BBDC_SEL, data: 0x0); |
164 | } |
165 | |
166 | static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath) |
167 | { |
168 | u32 rf_mode; |
169 | u8 path; |
170 | int ret; |
171 | |
172 | for (path = 0; path < RF_PATH_MAX; path++) { |
173 | if (!(kpath & BIT(path))) |
174 | continue; |
175 | |
176 | ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, |
177 | rf_mode != 2, 2, 5000, false, |
178 | rtwdev, path, 0x00, RR_MOD_MASK); |
179 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
180 | fmt: "[RFK] Wait S%d to Rx mode!! (ret = %d)\n" , |
181 | path, ret); |
182 | } |
183 | } |
184 | |
185 | static void _dack_reset(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) |
186 | { |
187 | rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, data: 0x0); |
188 | rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, data: 0x1); |
189 | } |
190 | |
191 | static void _drck(struct rtw89_dev *rtwdev) |
192 | { |
193 | u32 rck_d; |
194 | u32 val; |
195 | int ret; |
196 | |
197 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]Ddie RCK start!!!\n" ); |
198 | |
199 | rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, data: 0x1); |
200 | rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, data: 0x1); |
201 | |
202 | ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, |
203 | 1, 10000, false, |
204 | rtwdev, R_DRCK_RES, B_DRCK_POL); |
205 | if (ret) |
206 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]DRCK timeout\n" ); |
207 | |
208 | rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, data: 0x0); |
209 | rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, data: 0x1); |
210 | udelay(1); |
211 | rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, data: 0x0); |
212 | |
213 | rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, mask: 0x7c00); |
214 | rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, data: 0x0); |
215 | rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, data: rck_d); |
216 | |
217 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0xc0c4 = 0x%x\n" , |
218 | rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD)); |
219 | } |
220 | |
221 | static void _addck_backup(struct rtw89_dev *rtwdev) |
222 | { |
223 | struct rtw89_dack_info *dack = &rtwdev->dack; |
224 | |
225 | rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, data: 0x0); |
226 | |
227 | dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0); |
228 | dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1); |
229 | } |
230 | |
231 | static void _addck_reload(struct rtw89_dev *rtwdev) |
232 | { |
233 | struct rtw89_dack_info *dack = &rtwdev->dack; |
234 | |
235 | rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, data: dack->addck_d[0][0]); |
236 | rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, data: dack->addck_d[0][1]); |
237 | rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, data: 0x3); |
238 | } |
239 | |
240 | static void _dack_backup_s0(struct rtw89_dev *rtwdev) |
241 | { |
242 | struct rtw89_dack_info *dack = &rtwdev->dack; |
243 | u8 i; |
244 | |
245 | rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, data: 0x1); |
246 | |
247 | for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { |
248 | rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, data: i); |
249 | dack->msbk_d[0][0][i] = |
250 | rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0); |
251 | |
252 | rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, data: i); |
253 | dack->msbk_d[0][1][i] = |
254 | rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1); |
255 | } |
256 | |
257 | dack->biask_d[0][0] = |
258 | rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00); |
259 | dack->biask_d[0][1] = |
260 | rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01); |
261 | dack->dadck_d[0][0] = |
262 | rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00) + 24; |
263 | dack->dadck_d[0][1] = |
264 | rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01) + 24; |
265 | } |
266 | |
267 | static void _dack_reload_by_path(struct rtw89_dev *rtwdev, |
268 | enum rtw89_rf_path path, u8 index) |
269 | { |
270 | struct rtw89_dack_info *dack = &rtwdev->dack; |
271 | u32 idx_offset, path_offset; |
272 | u32 offset, reg; |
273 | u32 tmp; |
274 | u8 i; |
275 | |
276 | if (index == 0) |
277 | idx_offset = 0; |
278 | else |
279 | idx_offset = 0x14; |
280 | |
281 | if (path == RF_PATH_A) |
282 | path_offset = 0; |
283 | else |
284 | path_offset = 0x28; |
285 | |
286 | offset = idx_offset + path_offset; |
287 | |
288 | rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_RST, data: 0x1); |
289 | rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_RST, data: 0x1); |
290 | |
291 | /* msbk_d: 15/14/13/12 */ |
292 | tmp = 0x0; |
293 | for (i = 0; i < 4; i++) |
294 | tmp |= dack->msbk_d[path][index][i + 12] << (i * 8); |
295 | reg = 0xc200 + offset; |
296 | rtw89_phy_write32(rtwdev, addr: reg, data: tmp); |
297 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x=0x%x\n" , reg, |
298 | rtw89_phy_read32_mask(rtwdev, addr: reg, MASKDWORD)); |
299 | |
300 | /* msbk_d: 11/10/9/8 */ |
301 | tmp = 0x0; |
302 | for (i = 0; i < 4; i++) |
303 | tmp |= dack->msbk_d[path][index][i + 8] << (i * 8); |
304 | reg = 0xc204 + offset; |
305 | rtw89_phy_write32(rtwdev, addr: reg, data: tmp); |
306 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x=0x%x\n" , reg, |
307 | rtw89_phy_read32_mask(rtwdev, addr: reg, MASKDWORD)); |
308 | |
309 | /* msbk_d: 7/6/5/4 */ |
310 | tmp = 0x0; |
311 | for (i = 0; i < 4; i++) |
312 | tmp |= dack->msbk_d[path][index][i + 4] << (i * 8); |
313 | reg = 0xc208 + offset; |
314 | rtw89_phy_write32(rtwdev, addr: reg, data: tmp); |
315 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x=0x%x\n" , reg, |
316 | rtw89_phy_read32_mask(rtwdev, addr: reg, MASKDWORD)); |
317 | |
318 | /* msbk_d: 3/2/1/0 */ |
319 | tmp = 0x0; |
320 | for (i = 0; i < 4; i++) |
321 | tmp |= dack->msbk_d[path][index][i] << (i * 8); |
322 | reg = 0xc20c + offset; |
323 | rtw89_phy_write32(rtwdev, addr: reg, data: tmp); |
324 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x=0x%x\n" , reg, |
325 | rtw89_phy_read32_mask(rtwdev, addr: reg, MASKDWORD)); |
326 | |
327 | /* dadak_d/biask_d */ |
328 | tmp = 0x0; |
329 | tmp = (dack->biask_d[path][index] << 22) | |
330 | (dack->dadck_d[path][index] << 14); |
331 | reg = 0xc210 + offset; |
332 | rtw89_phy_write32(rtwdev, addr: reg, data: tmp); |
333 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x=0x%x\n" , reg, |
334 | rtw89_phy_read32_mask(rtwdev, addr: reg, MASKDWORD)); |
335 | |
336 | rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL + offset, B_DACKN0_EN, data: 0x1); |
337 | } |
338 | |
339 | static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) |
340 | { |
341 | u8 index; |
342 | |
343 | for (index = 0; index < 2; index++) |
344 | _dack_reload_by_path(rtwdev, path, index); |
345 | } |
346 | |
347 | static void _addck(struct rtw89_dev *rtwdev) |
348 | { |
349 | struct rtw89_dack_info *dack = &rtwdev->dack; |
350 | u32 val; |
351 | int ret; |
352 | |
353 | rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, data: 0x1); |
354 | rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, data: 0x1); |
355 | rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, data: 0x0); |
356 | udelay(1); |
357 | rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, data: 0x1); |
358 | |
359 | ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, |
360 | 1, 10000, false, |
361 | rtwdev, R_ADDCKR0, BIT(0)); |
362 | if (ret) { |
363 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S0 ADDCK timeout\n" ); |
364 | dack->addck_timeout[0] = true; |
365 | } |
366 | |
367 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]ADDCK ret = %d\n" , ret); |
368 | |
369 | rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, data: 0x0); |
370 | } |
371 | |
372 | static void _new_dadck(struct rtw89_dev *rtwdev) |
373 | { |
374 | struct rtw89_dack_info *dack = &rtwdev->dack; |
375 | u32 i_dc, q_dc, ic, qc; |
376 | u32 val; |
377 | int ret; |
378 | |
379 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_dadck_setup_defs_tbl); |
380 | |
381 | ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, |
382 | 1, 10000, false, |
383 | rtwdev, R_ADDCKR0, BIT(0)); |
384 | if (ret) { |
385 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S0 DADCK timeout\n" ); |
386 | dack->addck_timeout[0] = true; |
387 | } |
388 | |
389 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]DADCK ret = %d\n" , ret); |
390 | |
391 | rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_IQ, data: 0x0); |
392 | i_dc = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_DC); |
393 | rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_IQ, data: 0x1); |
394 | q_dc = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_DC); |
395 | |
396 | ic = 0x80 - sign_extend32(value: i_dc, index: 11) * 6; |
397 | qc = 0x80 - sign_extend32(value: q_dc, index: 11) * 6; |
398 | |
399 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
400 | fmt: "[DACK]before DADCK, i_dc=0x%x, q_dc=0x%x\n" , i_dc, q_dc); |
401 | |
402 | dack->dadck_d[0][0] = ic; |
403 | dack->dadck_d[0][1] = qc; |
404 | |
405 | rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL, B_DACKN0_V, data: dack->dadck_d[0][0]); |
406 | rtw89_phy_write32_mask(rtwdev, R_DACKN1_CTL, B_DACKN1_V, data: dack->dadck_d[0][1]); |
407 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
408 | fmt: "[DACK]after DADCK, 0xc210=0x%x, 0xc224=0x%x\n" , |
409 | rtw89_phy_read32_mask(rtwdev, R_DACKN0_CTL, MASKDWORD), |
410 | rtw89_phy_read32_mask(rtwdev, R_DACKN1_CTL, MASKDWORD)); |
411 | |
412 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_dadck_post_defs_tbl); |
413 | } |
414 | |
415 | static bool _dack_s0_poll(struct rtw89_dev *rtwdev) |
416 | { |
417 | if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 || |
418 | rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 || |
419 | rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 || |
420 | rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0) |
421 | return false; |
422 | |
423 | return true; |
424 | } |
425 | |
426 | static void _dack_s0(struct rtw89_dev *rtwdev) |
427 | { |
428 | struct rtw89_dack_info *dack = &rtwdev->dack; |
429 | bool done; |
430 | int ret; |
431 | |
432 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_dack_s0_1_defs_tbl); |
433 | _dack_reset(rtwdev, path: RF_PATH_A); |
434 | rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, data: 0x1); |
435 | |
436 | ret = read_poll_timeout_atomic(_dack_s0_poll, done, done, |
437 | 1, 10000, false, rtwdev); |
438 | if (ret) { |
439 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S0 DACK timeout\n" ); |
440 | dack->msbk_timeout[0] = true; |
441 | } |
442 | |
443 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]DACK ret = %d\n" , ret); |
444 | |
445 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_dack_s0_2_defs_tbl); |
446 | |
447 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]after S0 DADCK\n" ); |
448 | |
449 | _dack_backup_s0(rtwdev); |
450 | _dack_reload(rtwdev, path: RF_PATH_A); |
451 | |
452 | rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, data: 0x0); |
453 | } |
454 | |
455 | static void _dack(struct rtw89_dev *rtwdev) |
456 | { |
457 | _dack_s0(rtwdev); |
458 | } |
459 | |
460 | static void _dack_dump(struct rtw89_dev *rtwdev) |
461 | { |
462 | struct rtw89_dack_info *dack = &rtwdev->dack; |
463 | u8 i; |
464 | u8 t; |
465 | |
466 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n" , |
467 | dack->addck_d[0][0], dack->addck_d[0][1]); |
468 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n" , |
469 | dack->dadck_d[0][0], dack->dadck_d[0][1]); |
470 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n" , |
471 | dack->biask_d[0][0], dack->biask_d[0][1]); |
472 | |
473 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S0 MSBK ic:\n" ); |
474 | for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { |
475 | t = dack->msbk_d[0][0][i]; |
476 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x\n" , t); |
477 | } |
478 | |
479 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]S0 MSBK qc:\n" ); |
480 | for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { |
481 | t = dack->msbk_d[0][1][i]; |
482 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]0x%x\n" , t); |
483 | } |
484 | } |
485 | |
486 | static void _dack_manual_off(struct rtw89_dev *rtwdev) |
487 | { |
488 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_dack_manual_off_defs_tbl); |
489 | } |
490 | |
491 | static void _dac_cal(struct rtw89_dev *rtwdev, bool force) |
492 | { |
493 | struct rtw89_dack_info *dack = &rtwdev->dack; |
494 | u32 rf0_0; |
495 | |
496 | dack->dack_done = false; |
497 | |
498 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]DACK 0x2\n" ); |
499 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]DACK start!!!\n" ); |
500 | rf0_0 = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_MOD, RFREG_MASK); |
501 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]RF0=0x%x\n" , rf0_0); |
502 | |
503 | _drck(rtwdev); |
504 | _dack_manual_off(rtwdev); |
505 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MOD, RFREG_MASK, data: 0x337e1); |
506 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_RSV1, RR_RSV1_RST, data: 0x0); |
507 | |
508 | _addck(rtwdev); |
509 | _addck_backup(rtwdev); |
510 | _addck_reload(rtwdev); |
511 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MOD, RFREG_MASK, data: 0x40001); |
512 | |
513 | _dack(rtwdev); |
514 | _new_dadck(rtwdev); |
515 | _dack_dump(rtwdev); |
516 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_RSV1, RR_RSV1_RST, data: 0x1); |
517 | |
518 | dack->dack_done = true; |
519 | dack->dack_cnt++; |
520 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DACK]DACK finish!!!\n" ); |
521 | } |
522 | |
523 | static void _rx_dck_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
524 | enum rtw89_rf_path path, bool is_afe) |
525 | { |
526 | const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: RTW89_SUB_ENTITY_0); |
527 | |
528 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
529 | fmt: "[RX_DCK] ==== S%d RX DCK (%s / CH%d / %s / by %s)====\n" , path, |
530 | chan->band_type == RTW89_BAND_2G ? "2G" : |
531 | chan->band_type == RTW89_BAND_5G ? "5G" : "6G" , |
532 | chan->channel, |
533 | chan->band_width == RTW89_CHANNEL_WIDTH_20 ? "20M" : |
534 | chan->band_width == RTW89_CHANNEL_WIDTH_40 ? "40M" : "80M" , |
535 | is_afe ? "AFE" : "RFC" ); |
536 | } |
537 | |
538 | static void _rxbb_ofst_swap(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 rf_mode) |
539 | { |
540 | u32 val, val_i, val_q; |
541 | |
542 | val_i = rtw89_read_rf(rtwdev, rf_path: path, RR_DCK, RR_DCK_S1); |
543 | val_q = rtw89_read_rf(rtwdev, rf_path: path, RR_DCK1, RR_DCK1_S1); |
544 | |
545 | val = val_q << 4 | val_i; |
546 | |
547 | rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWE2, RR_LUTWE2_DIS, data: 0x1); |
548 | rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWA, RFREG_MASK, data: rf_mode); |
549 | rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWD0, RFREG_MASK, data: val); |
550 | rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWE2, RR_LUTWE2_DIS, data: 0x0); |
551 | |
552 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
553 | fmt: "[RX_DCK] val_i = 0x%x, val_q = 0x%x, 0x3F = 0x%x\n" , |
554 | val_i, val_q, val); |
555 | } |
556 | |
557 | static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 rf_mode) |
558 | { |
559 | u32 val; |
560 | int ret; |
561 | |
562 | rtw89_write_rf(rtwdev, rf_path: path, RR_DCK, RR_DCK_LV, data: 0x0); |
563 | rtw89_write_rf(rtwdev, rf_path: path, RR_DCK, RR_DCK_LV, data: 0x1); |
564 | |
565 | ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, |
566 | 2, 2000, false, |
567 | rtwdev, path, RR_DCK, BIT(8)); |
568 | |
569 | rtw89_write_rf(rtwdev, rf_path: path, RR_DCK, RR_DCK_LV, data: 0x0); |
570 | |
571 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RX_DCK] S%d RXDCK finish (ret = %d)\n" , |
572 | path, ret); |
573 | |
574 | _rxbb_ofst_swap(rtwdev, path, rf_mode); |
575 | } |
576 | |
577 | static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe) |
578 | { |
579 | u32 rf_reg5; |
580 | u8 path; |
581 | |
582 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
583 | fmt: "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n" , |
584 | 0x2, rtwdev->hal.cv); |
585 | |
586 | for (path = 0; path < RF_PATH_NUM_8851B; path++) { |
587 | _rx_dck_info(rtwdev, phy, path, is_afe); |
588 | |
589 | rf_reg5 = rtw89_read_rf(rtwdev, rf_path: path, RR_RSV1, RFREG_MASK); |
590 | |
591 | if (rtwdev->is_tssi_mode[path]) |
592 | rtw89_phy_write32_mask(rtwdev, |
593 | R_P0_TSSI_TRK + (path << 13), |
594 | B_P0_TSSI_TRK_EN, data: 0x1); |
595 | |
596 | rtw89_write_rf(rtwdev, rf_path: path, RR_RSV1, RR_RSV1_RST, data: 0x0); |
597 | rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_MASK, data: RF_RX); |
598 | _set_rx_dck(rtwdev, path, rf_mode: RF_RX); |
599 | rtw89_write_rf(rtwdev, rf_path: path, RR_RSV1, RFREG_MASK, data: rf_reg5); |
600 | |
601 | if (rtwdev->is_tssi_mode[path]) |
602 | rtw89_phy_write32_mask(rtwdev, |
603 | R_P0_TSSI_TRK + (path << 13), |
604 | B_P0_TSSI_TRK_EN, data: 0x0); |
605 | } |
606 | } |
607 | |
608 | static void _iqk_sram(struct rtw89_dev *rtwdev, u8 path) |
609 | { |
610 | u32 i; |
611 | |
612 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
613 | |
614 | rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, data: 0x00020000); |
615 | rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, MASKDWORD, data: 0x80000000); |
616 | rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, data: 0x00000080); |
617 | rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, data: 0x00010000); |
618 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, data: 0x009); |
619 | |
620 | for (i = 0; i <= 0x9f; i++) { |
621 | rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, |
622 | data: 0x00010000 + i); |
623 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]0x%x\n" , |
624 | rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI)); |
625 | } |
626 | |
627 | for (i = 0; i <= 0x9f; i++) { |
628 | rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, |
629 | data: 0x00010000 + i); |
630 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]0x%x\n" , |
631 | rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ)); |
632 | } |
633 | |
634 | rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, data: 0x00000000); |
635 | rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, data: 0x00000000); |
636 | } |
637 | |
638 | static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path) |
639 | { |
640 | rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_MASK, data: 0xc); |
641 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RR_RXKPLL_POW, data: 0x0); |
642 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RR_RXKPLL_POW, data: 0x1); |
643 | } |
644 | |
645 | static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path) |
646 | { |
647 | bool fail1 = false, fail2 = false; |
648 | u32 val; |
649 | int ret; |
650 | |
651 | ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, |
652 | 10, 8200, false, |
653 | rtwdev, 0xbff8, MASKBYTE0); |
654 | if (ret) { |
655 | fail1 = true; |
656 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
657 | fmt: "[IQK]NCTL1 IQK timeout!!!\n" ); |
658 | } |
659 | |
660 | fsleep(usecs: 10); |
661 | |
662 | ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000, |
663 | 10, 200, false, |
664 | rtwdev, R_RPT_COM, B_RPT_COM_RDY); |
665 | if (ret) { |
666 | fail2 = true; |
667 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
668 | fmt: "[IQK]NCTL2 IQK timeout!!!\n" ); |
669 | } |
670 | |
671 | fsleep(usecs: 10); |
672 | rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, data: 0x0); |
673 | |
674 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
675 | fmt: "[IQK]S%x, ret = %d, notready = %x fail=%d,%d\n" , |
676 | path, ret, fail1 || fail2, fail1, fail2); |
677 | |
678 | return fail1 || fail2; |
679 | } |
680 | |
681 | static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, |
682 | u8 path, u8 ktype) |
683 | { |
684 | struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; |
685 | bool notready; |
686 | u32 iqk_cmd; |
687 | |
688 | switch (ktype) { |
689 | case ID_A_FLOK_COARSE: |
690 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
691 | fmt: "[IQK]============ S%d ID_A_FLOK_COARSE ============\n" , path); |
692 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, data: 0x1); |
693 | iqk_cmd = 0x108 | (1 << (4 + path)); |
694 | break; |
695 | case ID_G_FLOK_COARSE: |
696 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
697 | fmt: "[IQK]============ S%d ID_G_FLOK_COARSE ============\n" , path); |
698 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, data: 0x1); |
699 | iqk_cmd = 0x108 | (1 << (4 + path)); |
700 | break; |
701 | case ID_A_FLOK_FINE: |
702 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
703 | fmt: "[IQK]============ S%d ID_A_FLOK_FINE ============\n" , path); |
704 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, data: 0x1); |
705 | iqk_cmd = 0x308 | (1 << (4 + path)); |
706 | break; |
707 | case ID_G_FLOK_FINE: |
708 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
709 | fmt: "[IQK]============ S%d ID_G_FLOK_FINE ============\n" , path); |
710 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, data: 0x1); |
711 | iqk_cmd = 0x308 | (1 << (4 + path)); |
712 | break; |
713 | case ID_TXK: |
714 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
715 | fmt: "[IQK]============ S%d ID_TXK ============\n" , path); |
716 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, data: 0x0); |
717 | iqk_cmd = 0x008 | (1 << (path + 4)) | |
718 | (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8); |
719 | break; |
720 | case ID_RXAGC: |
721 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
722 | fmt: "[IQK]============ S%d ID_RXAGC ============\n" , path); |
723 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, data: 0x1); |
724 | iqk_cmd = 0x708 | (1 << (4 + path)) | (path << 1); |
725 | break; |
726 | case ID_RXK: |
727 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
728 | fmt: "[IQK]============ S%d ID_RXK ============\n" , path); |
729 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, data: 0x1); |
730 | iqk_cmd = 0x008 | (1 << (path + 4)) | |
731 | (((0xc + iqk_info->iqk_bw[path]) & 0xf) << 8); |
732 | break; |
733 | case ID_NBTXK: |
734 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
735 | fmt: "[IQK]============ S%d ID_NBTXK ============\n" , path); |
736 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, data: 0x0); |
737 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, |
738 | data: 0x00b); |
739 | iqk_cmd = 0x408 | (1 << (4 + path)); |
740 | break; |
741 | case ID_NBRXK: |
742 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
743 | fmt: "[IQK]============ S%d ID_NBRXK ============\n" , path); |
744 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, data: 0x1); |
745 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, |
746 | data: 0x011); |
747 | iqk_cmd = 0x608 | (1 << (4 + path)); |
748 | break; |
749 | default: |
750 | return false; |
751 | } |
752 | |
753 | rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, data: iqk_cmd + 1); |
754 | notready = _iqk_check_cal(rtwdev, path); |
755 | if (iqk_info->iqk_sram_en && |
756 | (ktype == ID_NBRXK || ktype == ID_RXK)) |
757 | _iqk_sram(rtwdev, path); |
758 | |
759 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, data: 0x0); |
760 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
761 | fmt: "[IQK]S%x, ktype= %x, id = %x, notready = %x\n" , |
762 | path, ktype, iqk_cmd + 1, notready); |
763 | |
764 | return notready; |
765 | } |
766 | |
767 | static bool _rxk_2g_group_sel(struct rtw89_dev *rtwdev, |
768 | enum rtw89_phy_idx phy_idx, u8 path) |
769 | { |
770 | struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; |
771 | bool kfail = false; |
772 | bool notready; |
773 | u32 rf_0; |
774 | u8 gp; |
775 | |
776 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
777 | |
778 | for (gp = 0; gp < RTW8851B_RXK_GROUP_NR; gp++) { |
779 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]S%x, gp = %x\n" , path, gp); |
780 | |
781 | rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_RGM, data: g_idxrxgain[gp]); |
782 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXBB, RR_RXBB_C2, data: g_idxattc2[gp]); |
783 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, data: 0x1); |
784 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, data: 0x0); |
785 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, data: gp); |
786 | |
787 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RFREG_MASK, data: 0x80013); |
788 | fsleep(usecs: 10); |
789 | rf_0 = rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASK); |
790 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, data: rf_0); |
791 | rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, data: g_idxrxagc[gp]); |
792 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, data: 0x11); |
793 | |
794 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_RXAGC); |
795 | |
796 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
797 | fmt: "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n" , path, |
798 | rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), |
799 | rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, mask: 0x003e0)); |
800 | |
801 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RR_RXKPLL_OFF, data: 0x13); |
802 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, data: 0x011); |
803 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_NBRXK); |
804 | iqk_info->nb_rxcfir[path] = |
805 | rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; |
806 | |
807 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_RXK); |
808 | |
809 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
810 | fmt: "[IQK]S%x, WBRXK 0x8008 = 0x%x\n" , path, |
811 | rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); |
812 | } |
813 | |
814 | if (!notready) |
815 | kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); |
816 | |
817 | if (kfail) |
818 | _iqk_sram(rtwdev, path); |
819 | |
820 | if (kfail) { |
821 | rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), |
822 | MASKDWORD, data: iqk_info->nb_rxcfir[path] | 0x2); |
823 | iqk_info->is_wb_txiqk[path] = false; |
824 | } else { |
825 | rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), |
826 | MASKDWORD, data: 0x40000000); |
827 | iqk_info->is_wb_txiqk[path] = true; |
828 | } |
829 | |
830 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
831 | fmt: "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n" , path, kfail, |
832 | 1 << path, iqk_info->nb_rxcfir[path]); |
833 | return kfail; |
834 | } |
835 | |
836 | static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev, |
837 | enum rtw89_phy_idx phy_idx, u8 path) |
838 | { |
839 | struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; |
840 | bool kfail = false; |
841 | bool notready; |
842 | u32 rf_0; |
843 | u8 idx; |
844 | u8 gp; |
845 | |
846 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
847 | |
848 | for (idx = 0; idx < RTW8851B_RXK_GROUP_IDX_NR; idx++) { |
849 | gp = _rxk_5ghz_group_from_idx(idx); |
850 | |
851 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]S%x, gp = %x\n" , path, gp); |
852 | |
853 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MOD, RR_MOD_RGM, data: a_idxrxgain[idx]); |
854 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_RXA2, RR_RXA2_ATT, data: a_idxattc2[idx]); |
855 | |
856 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, data: 0x1); |
857 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, data: 0x0); |
858 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, data: gp); |
859 | |
860 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RFREG_MASK, data: 0x80013); |
861 | fsleep(usecs: 100); |
862 | rf_0 = rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASK); |
863 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, data: rf_0); |
864 | rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, data: a_idxrxagc[idx]); |
865 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, data: 0x11); |
866 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_RXAGC); |
867 | |
868 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
869 | fmt: "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n" , path, |
870 | rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), |
871 | rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_RXB)); |
872 | |
873 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RR_RXKPLL_OFF, data: 0x13); |
874 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, data: 0x011); |
875 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_NBRXK); |
876 | iqk_info->nb_rxcfir[path] = |
877 | rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; |
878 | |
879 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
880 | fmt: "[IQK]S%x, NBRXK 0x8008 = 0x%x\n" , path, |
881 | rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); |
882 | |
883 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_RXK); |
884 | |
885 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
886 | fmt: "[IQK]S%x, WBRXK 0x8008 = 0x%x\n" , path, |
887 | rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); |
888 | } |
889 | |
890 | if (!notready) |
891 | kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); |
892 | |
893 | if (kfail) |
894 | _iqk_sram(rtwdev, path); |
895 | |
896 | if (kfail) { |
897 | rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, |
898 | data: iqk_info->nb_rxcfir[path] | 0x2); |
899 | iqk_info->is_wb_txiqk[path] = false; |
900 | } else { |
901 | rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, |
902 | data: 0x40000000); |
903 | iqk_info->is_wb_txiqk[path] = true; |
904 | } |
905 | |
906 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
907 | fmt: "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n" , path, kfail, |
908 | 1 << path, iqk_info->nb_rxcfir[path]); |
909 | return kfail; |
910 | } |
911 | |
912 | static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, |
913 | u8 path) |
914 | { |
915 | struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; |
916 | bool kfail = false; |
917 | bool notready; |
918 | u8 idx = 0x1; |
919 | u32 rf_0; |
920 | u8 gp; |
921 | |
922 | gp = _rxk_5ghz_group_from_idx(idx); |
923 | |
924 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
925 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]S%x, gp = %x\n" , path, gp); |
926 | |
927 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MOD, RR_MOD_RGM, data: a_idxrxgain[idx]); |
928 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_RXA2, RR_RXA2_ATT, data: a_idxattc2[idx]); |
929 | |
930 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, data: 0x1); |
931 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, data: 0x0); |
932 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, data: gp); |
933 | |
934 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RFREG_MASK, data: 0x80013); |
935 | fsleep(usecs: 100); |
936 | rf_0 = rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASK); |
937 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, data: rf_0); |
938 | rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, data: a_idxrxagc[idx]); |
939 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, data: 0x11); |
940 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_RXAGC); |
941 | |
942 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
943 | fmt: "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n" , path, |
944 | rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), |
945 | rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, mask: 0x003e0)); |
946 | |
947 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RR_RXKPLL_OFF, data: 0x13); |
948 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, data: 0x011); |
949 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_NBRXK); |
950 | iqk_info->nb_rxcfir[path] = |
951 | rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; |
952 | |
953 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
954 | fmt: "[IQK]S%x, NBRXK 0x8008 = 0x%x\n" , path, |
955 | rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); |
956 | |
957 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]S%x, WBRXK 0x8008 = 0x%x\n" , |
958 | path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); |
959 | |
960 | if (!notready) |
961 | kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); |
962 | |
963 | if (kfail) { |
964 | rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), |
965 | MASKDWORD, data: 0x40000002); |
966 | iqk_info->is_wb_rxiqk[path] = false; |
967 | } else { |
968 | iqk_info->is_wb_rxiqk[path] = false; |
969 | } |
970 | |
971 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
972 | fmt: "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n" , path, kfail, |
973 | 1 << path, iqk_info->nb_rxcfir[path]); |
974 | |
975 | return kfail; |
976 | } |
977 | |
978 | static bool _iqk_2g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, |
979 | u8 path) |
980 | { |
981 | struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; |
982 | bool kfail = false; |
983 | bool notready; |
984 | u8 gp = 0x3; |
985 | u32 rf_0; |
986 | |
987 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
988 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]S%x, gp = %x\n" , path, gp); |
989 | |
990 | rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_RGM, data: g_idxrxgain[gp]); |
991 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXBB, RR_RXBB_C2, data: g_idxattc2[gp]); |
992 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, data: 0x1); |
993 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, data: 0x0); |
994 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, data: gp); |
995 | |
996 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RFREG_MASK, data: 0x80013); |
997 | fsleep(usecs: 10); |
998 | rf_0 = rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASK); |
999 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, data: rf_0); |
1000 | rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, data: g_idxrxagc[gp]); |
1001 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, data: 0x11); |
1002 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_RXAGC); |
1003 | |
1004 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1005 | fmt: "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n" , |
1006 | path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD), |
1007 | rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, mask: 0x003e0)); |
1008 | |
1009 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXKPLL, RR_RXKPLL_OFF, data: 0x13); |
1010 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, data: 0x011); |
1011 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_NBRXK); |
1012 | iqk_info->nb_rxcfir[path] = |
1013 | rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2; |
1014 | |
1015 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1016 | fmt: "[IQK]S%x, NBRXK 0x8008 = 0x%x\n" , path, |
1017 | rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); |
1018 | |
1019 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]S%x, WBRXK 0x8008 = 0x%x\n" , |
1020 | path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD)); |
1021 | |
1022 | if (!notready) |
1023 | kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); |
1024 | |
1025 | if (kfail) { |
1026 | rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), |
1027 | MASKDWORD, data: 0x40000002); |
1028 | iqk_info->is_wb_rxiqk[path] = false; |
1029 | } else { |
1030 | iqk_info->is_wb_rxiqk[path] = false; |
1031 | } |
1032 | |
1033 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1034 | fmt: "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n" , path, kfail, |
1035 | 1 << path, iqk_info->nb_rxcfir[path]); |
1036 | return kfail; |
1037 | } |
1038 | |
1039 | static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path) |
1040 | { |
1041 | struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; |
1042 | |
1043 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXBB2, RR_RXBB2_CKT, data: 0x1); |
1044 | |
1045 | if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) |
1046 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_iqk_rxclk_80_defs_tbl); |
1047 | else |
1048 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_iqk_rxclk_others_defs_tbl); |
1049 | } |
1050 | |
1051 | static bool _txk_5g_group_sel(struct rtw89_dev *rtwdev, |
1052 | enum rtw89_phy_idx phy_idx, u8 path) |
1053 | { |
1054 | struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; |
1055 | bool kfail = false; |
1056 | bool notready; |
1057 | u8 gp; |
1058 | |
1059 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
1060 | |
1061 | for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { |
1062 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR0, data: a_power_range[gp]); |
1063 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR1, data: a_track_range[gp]); |
1064 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_TG, data: a_gain_bb[gp]); |
1065 | |
1066 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, data: 0x1); |
1067 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, data: 0x1); |
1068 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, data: 0x0); |
1069 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, data: gp); |
1070 | rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00); |
1071 | rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, data: a_itqt[gp]); |
1072 | |
1073 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_NBTXK); |
1074 | iqk_info->nb_txcfir[path] = |
1075 | rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2; |
1076 | |
1077 | rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), |
1078 | MASKDWORD, data: a_itqt[gp]); |
1079 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_TXK); |
1080 | } |
1081 | |
1082 | if (!notready) |
1083 | kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); |
1084 | |
1085 | if (kfail) { |
1086 | rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), |
1087 | MASKDWORD, data: iqk_info->nb_txcfir[path] | 0x2); |
1088 | iqk_info->is_wb_txiqk[path] = false; |
1089 | } else { |
1090 | rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), |
1091 | MASKDWORD, data: 0x40000000); |
1092 | iqk_info->is_wb_txiqk[path] = true; |
1093 | } |
1094 | |
1095 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1096 | fmt: "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n" , path, kfail, |
1097 | 1 << path, iqk_info->nb_txcfir[path]); |
1098 | return kfail; |
1099 | } |
1100 | |
1101 | static bool _txk_2g_group_sel(struct rtw89_dev *rtwdev, |
1102 | enum rtw89_phy_idx phy_idx, u8 path) |
1103 | { |
1104 | struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; |
1105 | bool kfail = false; |
1106 | bool notready; |
1107 | u8 gp; |
1108 | |
1109 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
1110 | |
1111 | for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { |
1112 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR0, data: g_power_range[gp]); |
1113 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR1, data: g_track_range[gp]); |
1114 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_TG, data: g_gain_bb[gp]); |
1115 | |
1116 | rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, data: g_itqt[gp]); |
1117 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, data: 0x1); |
1118 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, data: 0x1); |
1119 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, data: 0x0); |
1120 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, data: gp); |
1121 | rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00); |
1122 | |
1123 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_NBTXK); |
1124 | iqk_info->nb_txcfir[path] = |
1125 | rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2; |
1126 | |
1127 | rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), |
1128 | MASKDWORD, data: g_itqt[gp]); |
1129 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_TXK); |
1130 | } |
1131 | |
1132 | if (!notready) |
1133 | kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); |
1134 | |
1135 | if (kfail) { |
1136 | rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), |
1137 | MASKDWORD, data: iqk_info->nb_txcfir[path] | 0x2); |
1138 | iqk_info->is_wb_txiqk[path] = false; |
1139 | } else { |
1140 | rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), |
1141 | MASKDWORD, data: 0x40000000); |
1142 | iqk_info->is_wb_txiqk[path] = true; |
1143 | } |
1144 | |
1145 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1146 | fmt: "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n" , path, kfail, |
1147 | 1 << path, iqk_info->nb_txcfir[path]); |
1148 | return kfail; |
1149 | } |
1150 | |
1151 | static bool _iqk_5g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, |
1152 | u8 path) |
1153 | { |
1154 | struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; |
1155 | bool kfail = false; |
1156 | bool notready; |
1157 | u8 gp; |
1158 | |
1159 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
1160 | |
1161 | for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { |
1162 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR0, data: a_power_range[gp]); |
1163 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR1, data: a_track_range[gp]); |
1164 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_TG, data: a_gain_bb[gp]); |
1165 | |
1166 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, data: 0x1); |
1167 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, data: 0x1); |
1168 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, data: 0x0); |
1169 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, data: gp); |
1170 | rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00); |
1171 | rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, data: a_itqt[gp]); |
1172 | |
1173 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_NBTXK); |
1174 | iqk_info->nb_txcfir[path] = |
1175 | rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2; |
1176 | } |
1177 | |
1178 | if (!notready) |
1179 | kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); |
1180 | |
1181 | if (kfail) { |
1182 | rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), |
1183 | MASKDWORD, data: 0x40000002); |
1184 | iqk_info->is_wb_rxiqk[path] = false; |
1185 | } else { |
1186 | iqk_info->is_wb_rxiqk[path] = false; |
1187 | } |
1188 | |
1189 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1190 | fmt: "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n" , path, kfail, |
1191 | 1 << path, iqk_info->nb_txcfir[path]); |
1192 | return kfail; |
1193 | } |
1194 | |
1195 | static bool _iqk_2g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, |
1196 | u8 path) |
1197 | { |
1198 | struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; |
1199 | bool kfail = false; |
1200 | bool notready; |
1201 | u8 gp; |
1202 | |
1203 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
1204 | |
1205 | for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { |
1206 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR0, data: g_power_range[gp]); |
1207 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR1, data: g_track_range[gp]); |
1208 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_TG, data: g_gain_bb[gp]); |
1209 | |
1210 | rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, data: g_itqt[gp]); |
1211 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, data: 0x1); |
1212 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, data: 0x1); |
1213 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, data: 0x0); |
1214 | rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, data: gp); |
1215 | rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00); |
1216 | |
1217 | notready = _iqk_one_shot(rtwdev, phy_idx, path, ktype: ID_NBTXK); |
1218 | iqk_info->nb_txcfir[path] = |
1219 | rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), |
1220 | MASKDWORD) | 0x2; |
1221 | } |
1222 | |
1223 | if (!notready) |
1224 | kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); |
1225 | |
1226 | if (kfail) { |
1227 | rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), |
1228 | MASKDWORD, data: 0x40000002); |
1229 | iqk_info->is_wb_rxiqk[path] = false; |
1230 | } else { |
1231 | iqk_info->is_wb_rxiqk[path] = false; |
1232 | } |
1233 | |
1234 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1235 | fmt: "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n" , path, kfail, |
1236 | 1 << path, iqk_info->nb_txcfir[path]); |
1237 | return kfail; |
1238 | } |
1239 | |
1240 | static bool _iqk_2g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, |
1241 | u8 path) |
1242 | { |
1243 | static const u32 g_txbb[RTW8851B_LOK_GRAM] = { |
1244 | 0x02, 0x06, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17}; |
1245 | static const u32 g_itqt[RTW8851B_LOK_GRAM] = { |
1246 | 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x12, 0x12, 0x12, 0x1b}; |
1247 | static const u32 g_wa[RTW8851B_LOK_GRAM] = { |
1248 | 0x00, 0x04, 0x08, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17}; |
1249 | bool fail = false; |
1250 | u8 i; |
1251 | |
1252 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
1253 | |
1254 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LUTDBG, RR_LUTDBG_LOK, data: 0x0); |
1255 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_TXIG, RR_TXIG_GR0, data: 0x0); |
1256 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_TXIG, RR_TXIG_GR1, data: 0x6); |
1257 | |
1258 | for (i = 0; i < RTW8851B_LOK_GRAM; i++) { |
1259 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_TXIG, RR_TXIG_TG, data: g_txbb[i]); |
1260 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LUTWA, RR_LUTWA_M1, data: g_wa[i]); |
1261 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, data: 0x1); |
1262 | rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, data: g_itqt[i]); |
1263 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, data: 0x021); |
1264 | rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, |
1265 | data: 0x00000109 | (1 << (4 + path))); |
1266 | fail |= _iqk_check_cal(rtwdev, path); |
1267 | |
1268 | rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00); |
1269 | rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, data: g_itqt[i]); |
1270 | rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, |
1271 | data: 0x00000309 | (1 << (4 + path))); |
1272 | fail |= _iqk_check_cal(rtwdev, path); |
1273 | |
1274 | rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00); |
1275 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, data: 0x0); |
1276 | |
1277 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1278 | fmt: "[IQK]S0, i = %x, 0x8[19:15] = 0x%x,0x8[09:05] = 0x%x\n" , i, |
1279 | rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_DTXLOK, mask: 0xf8000), |
1280 | rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_DTXLOK, mask: 0x003e0)); |
1281 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1282 | fmt: "[IQK]S0, i = %x, 0x9[19:16] = 0x%x,0x9[09:06] = 0x%x\n" , i, |
1283 | rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_RSV2, mask: 0xf0000), |
1284 | rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_RSV2, mask: 0x003c0)); |
1285 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1286 | fmt: "[IQK]S0, i = %x, 0x58 = %x\n" , i, |
1287 | rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_TXMO, RFREG_MASK)); |
1288 | } |
1289 | |
1290 | return fail; |
1291 | } |
1292 | |
1293 | static bool _iqk_5g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, |
1294 | u8 path) |
1295 | { |
1296 | static const u32 a_txbb[RTW8851B_LOK_GRAM] = { |
1297 | 0x02, 0x06, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17}; |
1298 | static const u32 a_itqt[RTW8851B_LOK_GRAM] = { |
1299 | 0x09, 0x09, 0x09, 0x12, 0x12, 0x12, 0x1b, 0x1b, 0x1b, 0x1b}; |
1300 | static const u32 a_wa[RTW8851B_LOK_GRAM] = { |
1301 | 0x80, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x97}; |
1302 | bool fail = false; |
1303 | u8 i; |
1304 | |
1305 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
1306 | |
1307 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LUTDBG, RR_LUTDBG_LOK, data: 0x0); |
1308 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_TXIG, RR_TXIG_GR0, data: 0x0); |
1309 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_TXIG, RR_TXIG_GR1, data: 0x7); |
1310 | |
1311 | for (i = 0; i < RTW8851B_LOK_GRAM; i++) { |
1312 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_TXIG, RR_TXIG_TG, data: a_txbb[i]); |
1313 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LUTWA, RR_LUTWA_M1, data: a_wa[i]); |
1314 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, data: 0x1); |
1315 | rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, data: a_itqt[i]); |
1316 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, data: 0x021); |
1317 | rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, |
1318 | data: 0x00000109 | (1 << (4 + path))); |
1319 | fail |= _iqk_check_cal(rtwdev, path); |
1320 | |
1321 | rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00); |
1322 | rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, data: a_itqt[i]); |
1323 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, data: 0x021); |
1324 | rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, |
1325 | data: 0x00000309 | (1 << (4 + path))); |
1326 | fail |= _iqk_check_cal(rtwdev, path); |
1327 | |
1328 | rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00); |
1329 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, data: 0x0); |
1330 | |
1331 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1332 | fmt: "[IQK]S0, i = %x, 0x8[19:15] = 0x%x,0x8[09:05] = 0x%x\n" , i, |
1333 | rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_DTXLOK, mask: 0xf8000), |
1334 | rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_DTXLOK, mask: 0x003e0)); |
1335 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1336 | fmt: "[IQK]S0, i = %x, 0x9[19:16] = 0x%x,0x9[09:06] = 0x%x\n" , i, |
1337 | rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_RSV2, mask: 0xf0000), |
1338 | rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_RSV2, mask: 0x003c0)); |
1339 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1340 | fmt: "[IQK]S0, i = %x, 0x58 = %x\n" , i, |
1341 | rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_TXMO, RFREG_MASK)); |
1342 | } |
1343 | |
1344 | return fail; |
1345 | } |
1346 | |
1347 | static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path) |
1348 | { |
1349 | struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; |
1350 | |
1351 | switch (iqk_info->iqk_band[path]) { |
1352 | case RTW89_BAND_2G: |
1353 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]RTW89_BAND_2G\n" ); |
1354 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_iqk_txk_2ghz_defs_tbl); |
1355 | break; |
1356 | case RTW89_BAND_5G: |
1357 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]RTW89_BAND_5G\n" ); |
1358 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_iqk_txk_5ghz_defs_tbl); |
1359 | break; |
1360 | default: |
1361 | break; |
1362 | } |
1363 | } |
1364 | |
1365 | #define IQK_LOK_RETRY 1 |
1366 | |
1367 | static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, |
1368 | u8 path) |
1369 | { |
1370 | struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; |
1371 | bool lok_is_fail; |
1372 | u8 i; |
1373 | |
1374 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
1375 | |
1376 | for (i = 0; i < IQK_LOK_RETRY; i++) { |
1377 | _iqk_txk_setting(rtwdev, path); |
1378 | if (iqk_info->iqk_band[path] == RTW89_BAND_2G) |
1379 | lok_is_fail = _iqk_2g_lok(rtwdev, phy_idx, path); |
1380 | else |
1381 | lok_is_fail = _iqk_5g_lok(rtwdev, phy_idx, path); |
1382 | |
1383 | if (!lok_is_fail) |
1384 | break; |
1385 | } |
1386 | |
1387 | if (iqk_info->is_nbiqk) { |
1388 | if (iqk_info->iqk_band[path] == RTW89_BAND_2G) |
1389 | iqk_info->iqk_tx_fail[0][path] = |
1390 | _iqk_2g_nbtxk(rtwdev, phy_idx, path); |
1391 | else |
1392 | iqk_info->iqk_tx_fail[0][path] = |
1393 | _iqk_5g_nbtxk(rtwdev, phy_idx, path); |
1394 | } else { |
1395 | if (iqk_info->iqk_band[path] == RTW89_BAND_2G) |
1396 | iqk_info->iqk_tx_fail[0][path] = |
1397 | _txk_2g_group_sel(rtwdev, phy_idx, path); |
1398 | else |
1399 | iqk_info->iqk_tx_fail[0][path] = |
1400 | _txk_5g_group_sel(rtwdev, phy_idx, path); |
1401 | } |
1402 | |
1403 | _iqk_rxclk_setting(rtwdev, path); |
1404 | _iqk_rxk_setting(rtwdev, path); |
1405 | _adc_fifo_rst(rtwdev, phy_idx, path); |
1406 | |
1407 | if (iqk_info->is_nbiqk) { |
1408 | if (iqk_info->iqk_band[path] == RTW89_BAND_2G) |
1409 | iqk_info->iqk_rx_fail[0][path] = |
1410 | _iqk_2g_nbrxk(rtwdev, phy_idx, path); |
1411 | else |
1412 | iqk_info->iqk_rx_fail[0][path] = |
1413 | _iqk_5g_nbrxk(rtwdev, phy_idx, path); |
1414 | } else { |
1415 | if (iqk_info->iqk_band[path] == RTW89_BAND_2G) |
1416 | iqk_info->iqk_rx_fail[0][path] = |
1417 | _rxk_2g_group_sel(rtwdev, phy_idx, path); |
1418 | else |
1419 | iqk_info->iqk_rx_fail[0][path] = |
1420 | _rxk_5g_group_sel(rtwdev, phy_idx, path); |
1421 | } |
1422 | } |
1423 | |
1424 | static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, |
1425 | u32 backup_bb_reg_val[]) |
1426 | { |
1427 | u32 i; |
1428 | |
1429 | for (i = 0; i < BACKUP_BB_REGS_NR; i++) { |
1430 | backup_bb_reg_val[i] = |
1431 | rtw89_phy_read32_mask(rtwdev, addr: rtw8851b_backup_bb_regs[i], |
1432 | MASKDWORD); |
1433 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1434 | fmt: "[RFK]backup bb reg : %x, value =%x\n" , |
1435 | rtw8851b_backup_bb_regs[i], backup_bb_reg_val[i]); |
1436 | } |
1437 | } |
1438 | |
1439 | static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, |
1440 | u32 backup_rf_reg_val[], u8 rf_path) |
1441 | { |
1442 | u32 i; |
1443 | |
1444 | for (i = 0; i < BACKUP_RF_REGS_NR; i++) { |
1445 | backup_rf_reg_val[i] = |
1446 | rtw89_read_rf(rtwdev, rf_path, |
1447 | addr: rtw8851b_backup_rf_regs[i], RFREG_MASK); |
1448 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1449 | fmt: "[RFK]backup rf S%d reg : %x, value =%x\n" , rf_path, |
1450 | rtw8851b_backup_rf_regs[i], backup_rf_reg_val[i]); |
1451 | } |
1452 | } |
1453 | |
1454 | static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, |
1455 | const u32 backup_bb_reg_val[]) |
1456 | { |
1457 | u32 i; |
1458 | |
1459 | for (i = 0; i < BACKUP_BB_REGS_NR; i++) { |
1460 | rtw89_phy_write32_mask(rtwdev, addr: rtw8851b_backup_bb_regs[i], |
1461 | MASKDWORD, data: backup_bb_reg_val[i]); |
1462 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1463 | fmt: "[RFK]restore bb reg : %x, value =%x\n" , |
1464 | rtw8851b_backup_bb_regs[i], backup_bb_reg_val[i]); |
1465 | } |
1466 | } |
1467 | |
1468 | static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, |
1469 | const u32 backup_rf_reg_val[], u8 rf_path) |
1470 | { |
1471 | u32 i; |
1472 | |
1473 | for (i = 0; i < BACKUP_RF_REGS_NR; i++) { |
1474 | rtw89_write_rf(rtwdev, rf_path, addr: rtw8851b_backup_rf_regs[i], |
1475 | RFREG_MASK, data: backup_rf_reg_val[i]); |
1476 | |
1477 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1478 | fmt: "[RFK]restore rf S%d reg: %x, value =%x\n" , rf_path, |
1479 | rtw8851b_backup_rf_regs[i], backup_rf_reg_val[i]); |
1480 | } |
1481 | } |
1482 | |
1483 | static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
1484 | u8 path) |
1485 | { |
1486 | const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: RTW89_SUB_ENTITY_0); |
1487 | struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; |
1488 | u8 idx = 0; |
1489 | |
1490 | iqk_info->iqk_band[path] = chan->band_type; |
1491 | iqk_info->iqk_bw[path] = chan->band_width; |
1492 | iqk_info->iqk_ch[path] = chan->channel; |
1493 | iqk_info->iqk_table_idx[path] = idx; |
1494 | |
1495 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n" , |
1496 | path, phy, rtwdev->dbcc_en ? "on" : "off" , |
1497 | iqk_info->iqk_band[path] == 0 ? "2G" : |
1498 | iqk_info->iqk_band[path] == 1 ? "5G" : "6G" , |
1499 | iqk_info->iqk_ch[path], |
1500 | iqk_info->iqk_bw[path] == 0 ? "20M" : |
1501 | iqk_info->iqk_bw[path] == 1 ? "40M" : "80M" ); |
1502 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]times = 0x%x, ch =%x\n" , |
1503 | iqk_info->iqk_times, idx); |
1504 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]S%x, iqk_info->syn1to2= 0x%x\n" , |
1505 | path, iqk_info->syn1to2); |
1506 | } |
1507 | |
1508 | static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, |
1509 | u8 path) |
1510 | { |
1511 | _iqk_by_path(rtwdev, phy_idx, path); |
1512 | } |
1513 | |
1514 | static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path) |
1515 | { |
1516 | bool fail; |
1517 | |
1518 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
1519 | |
1520 | rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, data: 0x00001219); |
1521 | fsleep(usecs: 10); |
1522 | fail = _iqk_check_cal(rtwdev, path); |
1523 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK] restore fail=%d\n" , fail); |
1524 | |
1525 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LUTWE, RR_LUTWE_LOK, data: 0x0); |
1526 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LUTDBG, RR_LUTDBG_TIA, data: 0x0); |
1527 | |
1528 | rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, data: 0x00); |
1529 | rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, data: 0x00000000); |
1530 | rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, data: 0x80000000); |
1531 | } |
1532 | |
1533 | static void _iqk_afebb_restore(struct rtw89_dev *rtwdev, |
1534 | enum rtw89_phy_idx phy_idx, u8 path) |
1535 | { |
1536 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_iqk_afebb_restore_defs_tbl); |
1537 | } |
1538 | |
1539 | static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path) |
1540 | { |
1541 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
1542 | |
1543 | rtw89_write_rf(rtwdev, rf_path: path, RR_RSV1, RR_RSV1_RST, data: 0x0); |
1544 | rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, data: 0x00000080); |
1545 | rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, data: 0x81ff010a); |
1546 | } |
1547 | |
1548 | static void _iqk_macbb_setting(struct rtw89_dev *rtwdev, |
1549 | enum rtw89_phy_idx phy_idx, u8 path) |
1550 | { |
1551 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
1552 | |
1553 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_iqk_macbb_defs_tbl); |
1554 | } |
1555 | |
1556 | static void _iqk_init(struct rtw89_dev *rtwdev) |
1557 | { |
1558 | struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; |
1559 | u8 idx, path; |
1560 | |
1561 | rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, data: 0x0); |
1562 | |
1563 | if (iqk_info->is_iqk_init) |
1564 | return; |
1565 | |
1566 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]===>%s\n" , __func__); |
1567 | |
1568 | iqk_info->is_iqk_init = true; |
1569 | iqk_info->is_nbiqk = false; |
1570 | iqk_info->iqk_fft_en = false; |
1571 | iqk_info->iqk_sram_en = false; |
1572 | iqk_info->iqk_cfir_en = false; |
1573 | iqk_info->iqk_xym_en = false; |
1574 | iqk_info->iqk_times = 0x0; |
1575 | |
1576 | for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { |
1577 | iqk_info->iqk_channel[idx] = 0x0; |
1578 | for (path = 0; path < RF_PATH_NUM_8851B; path++) { |
1579 | iqk_info->lok_cor_fail[idx][path] = false; |
1580 | iqk_info->lok_fin_fail[idx][path] = false; |
1581 | iqk_info->iqk_tx_fail[idx][path] = false; |
1582 | iqk_info->iqk_rx_fail[idx][path] = false; |
1583 | iqk_info->iqk_table_idx[path] = 0x0; |
1584 | } |
1585 | } |
1586 | } |
1587 | |
1588 | static void _doiqk(struct rtw89_dev *rtwdev, bool force, |
1589 | enum rtw89_phy_idx phy_idx, u8 path) |
1590 | { |
1591 | struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; |
1592 | u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, paths: RF_AB); |
1593 | u32 backup_rf_val[RTW8851B_IQK_SS][BACKUP_RF_REGS_NR]; |
1594 | u32 backup_bb_val[BACKUP_BB_REGS_NR]; |
1595 | |
1596 | rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_IQK, |
1597 | state: BTC_WRFK_ONESHOT_START); |
1598 | |
1599 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1600 | fmt: "[IQK]==========IQK start!!!!!==========\n" ); |
1601 | iqk_info->iqk_times++; |
1602 | iqk_info->version = RTW8851B_IQK_VER; |
1603 | |
1604 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[IQK]Test Ver 0x%x\n" , iqk_info->version); |
1605 | _iqk_get_ch_info(rtwdev, phy: phy_idx, path); |
1606 | |
1607 | _rfk_backup_bb_reg(rtwdev, backup_bb_reg_val: &backup_bb_val[0]); |
1608 | _rfk_backup_rf_reg(rtwdev, backup_rf_reg_val: &backup_rf_val[path][0], rf_path: path); |
1609 | _iqk_macbb_setting(rtwdev, phy_idx, path); |
1610 | _iqk_preset(rtwdev, path); |
1611 | _iqk_start_iqk(rtwdev, phy_idx, path); |
1612 | _iqk_restore(rtwdev, path); |
1613 | _iqk_afebb_restore(rtwdev, phy_idx, path); |
1614 | _rfk_restore_bb_reg(rtwdev, backup_bb_reg_val: &backup_bb_val[0]); |
1615 | _rfk_restore_rf_reg(rtwdev, backup_rf_reg_val: &backup_rf_val[path][0], rf_path: path); |
1616 | |
1617 | rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_IQK, |
1618 | state: BTC_WRFK_ONESHOT_STOP); |
1619 | } |
1620 | |
1621 | static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force) |
1622 | { |
1623 | _doiqk(rtwdev, force, phy_idx, path: RF_PATH_A); |
1624 | } |
1625 | |
1626 | static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 *reg, |
1627 | u32 reg_bkup[][DPK_KIP_REG_NUM_8851B], u8 path) |
1628 | { |
1629 | u8 i; |
1630 | |
1631 | for (i = 0; i < DPK_KIP_REG_NUM_8851B; i++) { |
1632 | reg_bkup[path][i] = |
1633 | rtw89_phy_read32_mask(rtwdev, addr: reg[i] + (path << 8), MASKDWORD); |
1634 | |
1635 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] Backup 0x%x = %x\n" , |
1636 | reg[i] + (path << 8), reg_bkup[path][i]); |
1637 | } |
1638 | } |
1639 | |
1640 | static void _dpk_bkup_rf(struct rtw89_dev *rtwdev, const u32 *rf_reg, |
1641 | u32 rf_bkup[][DPK_RF_REG_NUM_8851B], u8 path) |
1642 | { |
1643 | u8 i; |
1644 | |
1645 | for (i = 0; i < DPK_RF_REG_NUM_8851B; i++) { |
1646 | rf_bkup[path][i] = rtw89_read_rf(rtwdev, rf_path: path, addr: rf_reg[i], RFREG_MASK); |
1647 | |
1648 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] Backup RF S%d 0x%x = %x\n" , |
1649 | path, rf_reg[i], rf_bkup[path][i]); |
1650 | } |
1651 | } |
1652 | |
1653 | static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 *reg, |
1654 | u32 reg_bkup[][DPK_KIP_REG_NUM_8851B], u8 path) |
1655 | { |
1656 | u8 i; |
1657 | |
1658 | for (i = 0; i < DPK_KIP_REG_NUM_8851B; i++) { |
1659 | rtw89_phy_write32_mask(rtwdev, addr: reg[i] + (path << 8), MASKDWORD, |
1660 | data: reg_bkup[path][i]); |
1661 | |
1662 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1663 | fmt: "[DPK] Reload 0x%x = %x\n" , |
1664 | reg[i] + (path << 8), reg_bkup[path][i]); |
1665 | } |
1666 | } |
1667 | |
1668 | static void _dpk_reload_rf(struct rtw89_dev *rtwdev, const u32 *rf_reg, |
1669 | u32 rf_bkup[][DPK_RF_REG_NUM_8851B], u8 path) |
1670 | { |
1671 | u8 i; |
1672 | |
1673 | for (i = 0; i < DPK_RF_REG_NUM_8851B; i++) { |
1674 | rtw89_write_rf(rtwdev, rf_path: path, addr: rf_reg[i], RFREG_MASK, data: rf_bkup[path][i]); |
1675 | |
1676 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1677 | fmt: "[DPK] Reload RF S%d 0x%x = %x\n" , path, |
1678 | rf_reg[i], rf_bkup[path][i]); |
1679 | } |
1680 | } |
1681 | |
1682 | static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
1683 | enum rtw89_rf_path path, enum dpk_id id) |
1684 | { |
1685 | u16 dpk_cmd; |
1686 | u32 val; |
1687 | int ret; |
1688 | |
1689 | dpk_cmd = ((id << 8) | (0x19 + path * 0x12)); |
1690 | rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, data: dpk_cmd); |
1691 | |
1692 | ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, |
1693 | 10, 20000, false, |
1694 | rtwdev, 0xbff8, MASKBYTE0); |
1695 | if (ret) |
1696 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] one-shot 1 timeout\n" ); |
1697 | |
1698 | udelay(1); |
1699 | |
1700 | ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000, |
1701 | 1, 2000, false, |
1702 | rtwdev, R_RPT_COM, MASKLWORD); |
1703 | if (ret) |
1704 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] one-shot 2 timeout\n" ); |
1705 | |
1706 | rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, data: 0x0); |
1707 | |
1708 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1709 | fmt: "[DPK] one-shot for %s = 0x%04x\n" , |
1710 | id == 0x28 ? "KIP_PRESET" : |
1711 | id == 0x29 ? "DPK_TXAGC" : |
1712 | id == 0x2a ? "DPK_RXAGC" : |
1713 | id == 0x2b ? "SYNC" : |
1714 | id == 0x2c ? "GAIN_LOSS" : |
1715 | id == 0x2d ? "MDPK_IDL" : |
1716 | id == 0x2f ? "DPK_GAIN_NORM" : |
1717 | id == 0x31 ? "KIP_RESTORE" : |
1718 | id == 0x6 ? "LBK_RXIQK" : "Unknown id" , |
1719 | dpk_cmd); |
1720 | } |
1721 | |
1722 | static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, |
1723 | bool off) |
1724 | { |
1725 | struct rtw89_dpk_info *dpk = &rtwdev->dpk; |
1726 | u8 kidx = dpk->cur_idx[path]; |
1727 | u8 off_reverse = off ? 0 : 1; |
1728 | u8 val; |
1729 | |
1730 | val = dpk->is_dpk_enable * off_reverse * dpk->bp[path][kidx].path_ok; |
1731 | |
1732 | rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), |
1733 | mask: 0xf0000000, data: val); |
1734 | |
1735 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d[%d] DPK %s !!!\n" , path, |
1736 | kidx, val == 0 ? "disable" : "enable" ); |
1737 | } |
1738 | |
1739 | static void _dpk_init(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) |
1740 | { |
1741 | struct rtw89_dpk_info *dpk = &rtwdev->dpk; |
1742 | |
1743 | u8 kidx = dpk->cur_idx[path]; |
1744 | |
1745 | dpk->bp[path][kidx].path_ok = 0; |
1746 | } |
1747 | |
1748 | static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
1749 | enum rtw89_rf_path path) |
1750 | { |
1751 | const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: RTW89_SUB_ENTITY_0); |
1752 | struct rtw89_dpk_info *dpk = &rtwdev->dpk; |
1753 | |
1754 | u8 kidx = dpk->cur_idx[path]; |
1755 | |
1756 | dpk->bp[path][kidx].band = chan->band_type; |
1757 | dpk->bp[path][kidx].ch = chan->band_width; |
1758 | dpk->bp[path][kidx].bw = chan->channel; |
1759 | |
1760 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1761 | fmt: "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n" , |
1762 | path, dpk->cur_idx[path], phy, |
1763 | rtwdev->is_tssi_mode[path] ? "on" : "off" , |
1764 | rtwdev->dbcc_en ? "on" : "off" , |
1765 | dpk->bp[path][kidx].band == 0 ? "2G" : |
1766 | dpk->bp[path][kidx].band == 1 ? "5G" : "6G" , |
1767 | dpk->bp[path][kidx].ch, |
1768 | dpk->bp[path][kidx].bw == 0 ? "20M" : |
1769 | dpk->bp[path][kidx].bw == 1 ? "40M" : |
1770 | dpk->bp[path][kidx].bw == 2 ? "80M" : "160M" ); |
1771 | } |
1772 | |
1773 | static void _dpk_rxagc_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, |
1774 | bool turn_on) |
1775 | { |
1776 | if (path == RF_PATH_A) |
1777 | rtw89_phy_write32_mask(rtwdev, R_P0_AGC_CTL, B_P0_AGC_EN, data: turn_on); |
1778 | else |
1779 | rtw89_phy_write32_mask(rtwdev, R_P1_AGC_CTL, B_P1_AGC_EN, data: turn_on); |
1780 | |
1781 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d RXAGC is %s\n" , path, |
1782 | turn_on ? "turn_on" : "turn_off" ); |
1783 | } |
1784 | |
1785 | static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) |
1786 | { |
1787 | rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(16 + path), data: 0x1); |
1788 | rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(20 + path), data: 0x0); |
1789 | rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(24 + path), data: 0x1); |
1790 | rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(28 + path), data: 0x0); |
1791 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, data: 0xd801dffd); |
1792 | |
1793 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_iqk_bb_afe_defs_tbl); |
1794 | |
1795 | rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(20 + path), data: 0x1); |
1796 | rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(28 + path), data: 0x1); |
1797 | |
1798 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d BB/AFE setting\n" , path); |
1799 | } |
1800 | |
1801 | static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) |
1802 | { |
1803 | rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, data: 0x0); |
1804 | rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(16 + path), data: 0x1); |
1805 | rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(20 + path), data: 0x0); |
1806 | rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(24 + path), data: 0x1); |
1807 | rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(28 + path), data: 0x0); |
1808 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, data: 0x00000000); |
1809 | rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_TXCK_ALL, data: 0x00); |
1810 | rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(16 + path), data: 0x0); |
1811 | rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(24 + path), data: 0x0); |
1812 | |
1813 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d BB/AFE restore\n" , path); |
1814 | } |
1815 | |
1816 | static void _dpk_tssi_pause(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, |
1817 | bool is_pause) |
1818 | { |
1819 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), |
1820 | B_P0_TSSI_TRK_EN, data: is_pause); |
1821 | |
1822 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d TSSI %s\n" , path, |
1823 | is_pause ? "pause" : "resume" ); |
1824 | } |
1825 | |
1826 | static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) |
1827 | { |
1828 | struct rtw89_dpk_info *dpk = &rtwdev->dpk; |
1829 | |
1830 | if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) { |
1831 | rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, data: 0x0); |
1832 | rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, data: 0xffe0fa00); |
1833 | } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) { |
1834 | rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, data: 0x2); |
1835 | rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, data: 0xff4009e0); |
1836 | } else { |
1837 | rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, data: 0x1); |
1838 | rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, data: 0xf9f007d0); |
1839 | } |
1840 | |
1841 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] TPG Select for %s\n" , |
1842 | dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" : |
1843 | dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M" ); |
1844 | } |
1845 | |
1846 | static void _dpk_txpwr_bb_force(struct rtw89_dev *rtwdev, |
1847 | enum rtw89_rf_path path, bool force) |
1848 | { |
1849 | rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_ON, data: force); |
1850 | rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H + (path << 13), B_TXPWRB_RDY, data: force); |
1851 | |
1852 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d txpwr_bb_force %s\n" , |
1853 | path, force ? "on" : "off" ); |
1854 | } |
1855 | |
1856 | static void _dpk_kip_pwr_clk_onoff(struct rtw89_dev *rtwdev, bool turn_on) |
1857 | { |
1858 | if (turn_on) { |
1859 | rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, data: 0x00000080); |
1860 | rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, data: 0x807f030a); |
1861 | } else { |
1862 | rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, data: 0x00000000); |
1863 | rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, data: 0x80000000); |
1864 | rtw89_phy_write32_mask(rtwdev, R_DPK_WR, BIT(18), data: 0x1); |
1865 | } |
1866 | } |
1867 | |
1868 | static void _dpk_kip_control_rfc(struct rtw89_dev *rtwdev, |
1869 | enum rtw89_rf_path path, bool ctrl_by_kip) |
1870 | { |
1871 | rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), |
1872 | B_IQK_RFC_ON, data: ctrl_by_kip); |
1873 | |
1874 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] RFC is controlled by %s\n" , |
1875 | ctrl_by_kip ? "KIP" : "BB" ); |
1876 | } |
1877 | |
1878 | static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
1879 | enum rtw89_rf_path path, u8 kidx) |
1880 | { |
1881 | rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, |
1882 | data: rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASK)); |
1883 | rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), |
1884 | B_DPD_SEL, data: 0x01); |
1885 | |
1886 | _dpk_kip_control_rfc(rtwdev, path, ctrl_by_kip: true); |
1887 | _dpk_one_shot(rtwdev, phy, path, id: D_KIP_PRESET); |
1888 | } |
1889 | |
1890 | static void _dpk_kip_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
1891 | enum rtw89_rf_path path) |
1892 | { |
1893 | _dpk_one_shot(rtwdev, phy, path, id: D_KIP_RESTORE); |
1894 | _dpk_kip_control_rfc(rtwdev, path, ctrl_by_kip: false); |
1895 | _dpk_txpwr_bb_force(rtwdev, path, force: false); |
1896 | |
1897 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d restore KIP\n" , path); |
1898 | } |
1899 | |
1900 | static void _dpk_kset_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) |
1901 | { |
1902 | struct rtw89_dpk_info *dpk = &rtwdev->dpk; |
1903 | |
1904 | rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, data: 0x10); |
1905 | |
1906 | dpk->cur_k_set = |
1907 | rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_KSET) - 1; |
1908 | } |
1909 | |
1910 | static void _dpk_para_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) |
1911 | { |
1912 | static const u32 reg[RTW89_DPK_BKUP_NUM][DPK_KSET_NUM] = { |
1913 | {0x8190, 0x8194, 0x8198, 0x81a4}, |
1914 | {0x81a8, 0x81c4, 0x81c8, 0x81e8} |
1915 | }; |
1916 | struct rtw89_dpk_info *dpk = &rtwdev->dpk; |
1917 | u8 cur_k_set = dpk->cur_k_set; |
1918 | u32 para; |
1919 | |
1920 | if (cur_k_set >= DPK_KSET_NUM) { |
1921 | rtw89_warn(rtwdev, "DPK cur_k_set = %d\n" , cur_k_set); |
1922 | cur_k_set = 2; |
1923 | } |
1924 | |
1925 | para = rtw89_phy_read32_mask(rtwdev, addr: reg[kidx][cur_k_set] + (path << 8), |
1926 | MASKDWORD); |
1927 | |
1928 | dpk->bp[path][kidx].txagc_dpk = (para >> 10) & 0x3f; |
1929 | dpk->bp[path][kidx].ther_dpk = (para >> 26) & 0x3f; |
1930 | |
1931 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1932 | fmt: "[DPK] thermal/ txagc_RF (K%d) = 0x%x/ 0x%x\n" , |
1933 | dpk->cur_k_set, dpk->bp[path][kidx].ther_dpk, |
1934 | dpk->bp[path][kidx].txagc_dpk); |
1935 | } |
1936 | |
1937 | static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) |
1938 | { |
1939 | struct rtw89_dpk_info *dpk = &rtwdev->dpk; |
1940 | u8 corr_val, corr_idx, rxbb; |
1941 | u16 dc_i, dc_q; |
1942 | u8 rxbb_ov; |
1943 | |
1944 | rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, data: 0x0); |
1945 | |
1946 | corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI); |
1947 | corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV); |
1948 | dpk->corr_idx[path][kidx] = corr_idx; |
1949 | dpk->corr_val[path][kidx] = corr_val; |
1950 | |
1951 | rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, data: 0x9); |
1952 | |
1953 | dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); |
1954 | dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ); |
1955 | |
1956 | dc_i = abs(sign_extend32(dc_i, 11)); |
1957 | dc_q = abs(sign_extend32(dc_q, 11)); |
1958 | |
1959 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1960 | fmt: "[DPK] S%d Corr_idx/ Corr_val /DC I/Q, = %d / %d / %d / %d\n" , |
1961 | path, corr_idx, corr_val, dc_i, dc_q); |
1962 | |
1963 | dpk->dc_i[path][kidx] = dc_i; |
1964 | dpk->dc_q[path][kidx] = dc_q; |
1965 | |
1966 | rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, data: 0x8); |
1967 | rxbb = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB); |
1968 | |
1969 | rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, data: 0x31); |
1970 | rxbb_ov = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXOV); |
1971 | |
1972 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1973 | fmt: "[DPK] S%d RXBB/ RXAGC_done /RXBB_ovlmt = %d / %d / %d\n" , |
1974 | path, rxbb, |
1975 | rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DONE), |
1976 | rxbb_ov); |
1977 | |
1978 | if (dc_i > 200 || dc_q > 200 || corr_val < 170) |
1979 | return true; |
1980 | else |
1981 | return false; |
1982 | } |
1983 | |
1984 | static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
1985 | enum rtw89_rf_path path, u8 dbm, |
1986 | bool set_from_bb) |
1987 | { |
1988 | if (set_from_bb) { |
1989 | dbm = clamp_t(u8, dbm, 7, 24); |
1990 | |
1991 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
1992 | fmt: "[DPK] set S%d txagc to %ddBm\n" , path, dbm); |
1993 | rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), |
1994 | B_TXPWRB_VAL, data: dbm << 2); |
1995 | } |
1996 | |
1997 | _dpk_one_shot(rtwdev, phy, path, id: D_TXAGC); |
1998 | _dpk_kset_query(rtwdev, path); |
1999 | } |
2000 | |
2001 | static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2002 | enum rtw89_rf_path path, u8 kidx) |
2003 | { |
2004 | _dpk_kip_control_rfc(rtwdev, path, ctrl_by_kip: false); |
2005 | rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, |
2006 | data: rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASK)); |
2007 | _dpk_kip_control_rfc(rtwdev, path, ctrl_by_kip: true); |
2008 | |
2009 | _dpk_one_shot(rtwdev, phy, path, id: D_RXAGC); |
2010 | return _dpk_sync_check(rtwdev, path, kidx); |
2011 | } |
2012 | |
2013 | static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2014 | enum rtw89_rf_path path) |
2015 | { |
2016 | u32 rf_11, reg_81cc; |
2017 | u8 cur_rxbb; |
2018 | |
2019 | rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, data: 0x1); |
2020 | rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, data: 0x1); |
2021 | |
2022 | _dpk_kip_control_rfc(rtwdev, path, ctrl_by_kip: false); |
2023 | |
2024 | cur_rxbb = rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_RXB); |
2025 | rf_11 = rtw89_read_rf(rtwdev, rf_path: path, RR_TXIG, RFREG_MASK); |
2026 | reg_81cc = rtw89_phy_read32_mask(rtwdev, R_KIP_IQP + (path << 8), |
2027 | B_KIP_IQP_SW); |
2028 | |
2029 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR0, data: 0x0); |
2030 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_GR1, data: 0x3); |
2031 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RR_TXIG_TG, data: 0xd); |
2032 | rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_RXB, data: 0x1f); |
2033 | |
2034 | rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, data: 0x12); |
2035 | rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, data: 0x3); |
2036 | |
2037 | _dpk_kip_control_rfc(rtwdev, path, ctrl_by_kip: true); |
2038 | |
2039 | rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, MASKDWORD, data: 0x00250025); |
2040 | |
2041 | _dpk_one_shot(rtwdev, phy, path, id: LBK_RXIQK); |
2042 | |
2043 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d LBK RXIQC = 0x%x\n" , path, |
2044 | rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD)); |
2045 | |
2046 | _dpk_kip_control_rfc(rtwdev, path, ctrl_by_kip: false); |
2047 | |
2048 | rtw89_write_rf(rtwdev, rf_path: path, RR_TXIG, RFREG_MASK, data: rf_11); |
2049 | rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_RXB, data: cur_rxbb); |
2050 | rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, data: reg_81cc); |
2051 | |
2052 | rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, data: 0x0); |
2053 | rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, data: 0x0); |
2054 | rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, data: 0x1); |
2055 | |
2056 | _dpk_kip_control_rfc(rtwdev, path, ctrl_by_kip: true); |
2057 | } |
2058 | |
2059 | static void _dpk_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) |
2060 | { |
2061 | struct rtw89_dpk_info *dpk = &rtwdev->dpk; |
2062 | |
2063 | if (dpk->bp[path][kidx].band == RTW89_BAND_2G) { |
2064 | rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASK, data: 0x50521); |
2065 | rtw89_write_rf(rtwdev, rf_path: path, RR_MOD_V1, RR_MOD_MASK, data: RF_DPK); |
2066 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXBB, RR_RXBB_ATTC, data: 0x0); |
2067 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXBB, RR_RXBB_ATTR, data: 0x7); |
2068 | } else { |
2069 | rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASK, |
2070 | data: 0x50521 | BIT(rtwdev->dbcc_en)); |
2071 | rtw89_write_rf(rtwdev, rf_path: path, RR_MOD_V1, RR_MOD_MASK, data: RF_DPK); |
2072 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXA2, RR_RAA2_SATT, data: 0x3); |
2073 | } |
2074 | |
2075 | rtw89_write_rf(rtwdev, rf_path: path, RR_RCKD, RR_RCKD_BW, data: 0x1); |
2076 | rtw89_write_rf(rtwdev, rf_path: path, RR_BTC, RR_BTC_TXBB, data: dpk->bp[path][kidx].bw + 1); |
2077 | rtw89_write_rf(rtwdev, rf_path: path, RR_BTC, RR_BTC_RXBB, data: 0x0); |
2078 | rtw89_write_rf(rtwdev, rf_path: path, RR_RXBB2, RR_RXBB2_EBW, data: 0x0); |
2079 | } |
2080 | |
2081 | static void _dpk_bypass_rxiqc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) |
2082 | { |
2083 | rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, data: 0x1); |
2084 | rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, data: 0x40000002); |
2085 | |
2086 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] Bypass RXIQC\n" ); |
2087 | } |
2088 | |
2089 | static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev) |
2090 | { |
2091 | u16 dgain; |
2092 | |
2093 | rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, data: 0x0); |
2094 | dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); |
2095 | |
2096 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] DGain = 0x%x\n" , dgain); |
2097 | |
2098 | return dgain; |
2099 | } |
2100 | |
2101 | static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev) |
2102 | { |
2103 | u8 result; |
2104 | |
2105 | rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, data: 0x6); |
2106 | rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, data: 0x1); |
2107 | result = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL); |
2108 | |
2109 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] tmp GL = %d\n" , result); |
2110 | |
2111 | return result; |
2112 | } |
2113 | |
2114 | static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2115 | enum rtw89_rf_path path, u8 kidx) |
2116 | { |
2117 | _dpk_one_shot(rtwdev, phy, path, id: D_GAIN_LOSS); |
2118 | _dpk_kip_set_txagc(rtwdev, phy, path, dbm: 0xff, set_from_bb: false); |
2119 | |
2120 | rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A1, data: 0xf078); |
2121 | rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A0, data: 0x0); |
2122 | |
2123 | return _dpk_gainloss_read(rtwdev); |
2124 | } |
2125 | |
2126 | static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, u8 is_check) |
2127 | { |
2128 | u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0; |
2129 | u32 val1_sqrt_sum, val2_sqrt_sum; |
2130 | u8 i; |
2131 | |
2132 | rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, data: 0x06); |
2133 | rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, data: 0x0); |
2134 | rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, data: 0x08); |
2135 | |
2136 | if (is_check) { |
2137 | rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, data: 0x00); |
2138 | val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); |
2139 | val1_i = abs(sign_extend32(val1_i, 11)); |
2140 | val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); |
2141 | val1_q = abs(sign_extend32(val1_q, 11)); |
2142 | |
2143 | rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, data: 0x1f); |
2144 | val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); |
2145 | val2_i = abs(sign_extend32(val2_i, 11)); |
2146 | val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); |
2147 | val2_q = abs(sign_extend32(val2_q, 11)); |
2148 | |
2149 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] PAS_delta = 0x%x\n" , |
2150 | phy_div(val1_i * val1_i + val1_q * val1_q, |
2151 | val2_i * val2_i + val2_q * val2_q)); |
2152 | } else { |
2153 | for (i = 0; i < 32; i++) { |
2154 | rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, data: i); |
2155 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
2156 | fmt: "[DPK] PAS_Read[%02d]= 0x%08x\n" , i, |
2157 | rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD)); |
2158 | } |
2159 | } |
2160 | |
2161 | val1_sqrt_sum = val1_i * val1_i + val1_q * val1_q; |
2162 | val2_sqrt_sum = val2_i * val2_i + val2_q * val2_q; |
2163 | |
2164 | if (val1_sqrt_sum < val2_sqrt_sum) |
2165 | return 2; |
2166 | else if (val1_sqrt_sum >= val2_sqrt_sum * 8 / 5) |
2167 | return 1; |
2168 | else |
2169 | return 0; |
2170 | } |
2171 | |
2172 | static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2173 | enum rtw89_rf_path path, u8 kidx, u8 init_xdbm, u8 loss_only) |
2174 | { |
2175 | struct rtw89_dpk_info *dpk = &rtwdev->dpk; |
2176 | u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0; |
2177 | u8 step = DPK_AGC_STEP_SYNC_DGAIN; |
2178 | u8 goout = 0, agc_cnt = 0; |
2179 | bool is_fail = false; |
2180 | int limit = 200; |
2181 | u8 tmp_rxbb; |
2182 | u16 dgain; |
2183 | |
2184 | do { |
2185 | switch (step) { |
2186 | case DPK_AGC_STEP_SYNC_DGAIN: |
2187 | is_fail = _dpk_kip_set_rxagc(rtwdev, phy, path, kidx); |
2188 | |
2189 | if (is_fail) { |
2190 | goout = 1; |
2191 | break; |
2192 | } |
2193 | |
2194 | dgain = _dpk_dgain_read(rtwdev); |
2195 | |
2196 | if (dgain > 0x5fc || dgain < 0x556) { |
2197 | _dpk_one_shot(rtwdev, phy, path, id: D_SYNC); |
2198 | dgain = _dpk_dgain_read(rtwdev); |
2199 | } |
2200 | |
2201 | if (agc_cnt == 0) { |
2202 | if (dpk->bp[path][kidx].band == RTW89_BAND_2G) |
2203 | _dpk_bypass_rxiqc(rtwdev, path); |
2204 | else |
2205 | _dpk_lbk_rxiqk(rtwdev, phy, path); |
2206 | } |
2207 | step = DPK_AGC_STEP_GAIN_LOSS_IDX; |
2208 | break; |
2209 | |
2210 | case DPK_AGC_STEP_GAIN_LOSS_IDX: |
2211 | tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx); |
2212 | |
2213 | if (_dpk_pas_read(rtwdev, is_check: true) == 2 && tmp_gl_idx > 0) |
2214 | step = DPK_AGC_STEP_GL_LT_CRITERION; |
2215 | else if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, is_check: true) == 1) || |
2216 | tmp_gl_idx >= 7) |
2217 | step = DPK_AGC_STEP_GL_GT_CRITERION; |
2218 | else if (tmp_gl_idx == 0) |
2219 | step = DPK_AGC_STEP_GL_LT_CRITERION; |
2220 | else |
2221 | step = DPK_AGC_STEP_SET_TX_GAIN; |
2222 | break; |
2223 | |
2224 | case DPK_AGC_STEP_GL_GT_CRITERION: |
2225 | if (tmp_dbm <= 7) { |
2226 | goout = 1; |
2227 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
2228 | fmt: "[DPK] Txagc@lower bound!!\n" ); |
2229 | } else { |
2230 | tmp_dbm = max_t(u8, tmp_dbm - 3, 7); |
2231 | _dpk_kip_set_txagc(rtwdev, phy, path, dbm: tmp_dbm, set_from_bb: true); |
2232 | } |
2233 | step = DPK_AGC_STEP_SYNC_DGAIN; |
2234 | agc_cnt++; |
2235 | break; |
2236 | |
2237 | case DPK_AGC_STEP_GL_LT_CRITERION: |
2238 | if (tmp_dbm >= 24) { |
2239 | goout = 1; |
2240 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
2241 | fmt: "[DPK] Txagc@upper bound!!\n" ); |
2242 | } else { |
2243 | tmp_dbm = min_t(u8, tmp_dbm + 2, 24); |
2244 | _dpk_kip_set_txagc(rtwdev, phy, path, dbm: tmp_dbm, set_from_bb: true); |
2245 | } |
2246 | step = DPK_AGC_STEP_SYNC_DGAIN; |
2247 | agc_cnt++; |
2248 | break; |
2249 | |
2250 | case DPK_AGC_STEP_SET_TX_GAIN: |
2251 | _dpk_kip_control_rfc(rtwdev, path, ctrl_by_kip: false); |
2252 | tmp_rxbb = rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_RXB); |
2253 | tmp_rxbb = min_t(u8, tmp_rxbb + tmp_gl_idx, 0x1f); |
2254 | |
2255 | rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_RXB, data: tmp_rxbb); |
2256 | |
2257 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
2258 | fmt: "[DPK] Adjust RXBB (%+d) = 0x%x\n" , |
2259 | tmp_gl_idx, tmp_rxbb); |
2260 | _dpk_kip_control_rfc(rtwdev, path, ctrl_by_kip: true); |
2261 | goout = 1; |
2262 | break; |
2263 | default: |
2264 | goout = 1; |
2265 | break; |
2266 | } |
2267 | } while (!goout && agc_cnt < 6 && limit-- > 0); |
2268 | |
2269 | return is_fail; |
2270 | } |
2271 | |
2272 | static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order) |
2273 | { |
2274 | switch (order) { |
2275 | case 0: /* (5,3,1) */ |
2276 | rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, data: 0x0); |
2277 | rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, data: 0x2); |
2278 | rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, data: 0x4); |
2279 | rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, data: 0x1); |
2280 | break; |
2281 | case 1: /* (5,3,0) */ |
2282 | rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, data: 0x1); |
2283 | rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, data: 0x1); |
2284 | rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, data: 0x0); |
2285 | rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, data: 0x0); |
2286 | break; |
2287 | case 2: /* (5,0,0) */ |
2288 | rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, data: 0x2); |
2289 | rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, data: 0x0); |
2290 | rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, data: 0x0); |
2291 | rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, data: 0x0); |
2292 | break; |
2293 | case 3: /* (7,3,1) */ |
2294 | rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, data: 0x3); |
2295 | rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, data: 0x3); |
2296 | rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, data: 0x4); |
2297 | rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, data: 0x1); |
2298 | break; |
2299 | default: |
2300 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
2301 | fmt: "[DPK] Wrong MDPD order!!(0x%x)\n" , order); |
2302 | break; |
2303 | } |
2304 | |
2305 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] Set %s for IDL\n" , |
2306 | order == 0x0 ? "(5,3,1)" : |
2307 | order == 0x1 ? "(5,3,0)" : |
2308 | order == 0x2 ? "(5,0,0)" : "(7,3,1)" ); |
2309 | } |
2310 | |
2311 | static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2312 | enum rtw89_rf_path path, u8 kidx) |
2313 | { |
2314 | rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_MA, data: 0x1); |
2315 | |
2316 | if (rtw89_phy_read32_mask(rtwdev, R_IDL_MPA, B_IDL_MD500) == 0x1) |
2317 | _dpk_set_mdpd_para(rtwdev, order: 0x2); |
2318 | else if (rtw89_phy_read32_mask(rtwdev, R_IDL_MPA, B_IDL_MD530) == 0x1) |
2319 | _dpk_set_mdpd_para(rtwdev, order: 0x1); |
2320 | else |
2321 | _dpk_set_mdpd_para(rtwdev, order: 0x0); |
2322 | |
2323 | rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL, data: 0x0); |
2324 | fsleep(usecs: 1000); |
2325 | |
2326 | _dpk_one_shot(rtwdev, phy, path, id: D_MDPK_IDL); |
2327 | } |
2328 | |
2329 | static u8 _dpk_order_convert(struct rtw89_dev *rtwdev) |
2330 | { |
2331 | u32 order; |
2332 | u8 val; |
2333 | |
2334 | order = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP); |
2335 | |
2336 | switch (order) { |
2337 | case 0: /* (5,3,1) */ |
2338 | val = 0x6; |
2339 | break; |
2340 | case 1: /* (5,3,0) */ |
2341 | val = 0x2; |
2342 | break; |
2343 | case 2: /* (5,0,0) */ |
2344 | val = 0x0; |
2345 | break; |
2346 | default: |
2347 | val = 0xff; |
2348 | break; |
2349 | } |
2350 | |
2351 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] convert MDPD order to 0x%x\n" , val); |
2352 | |
2353 | return val; |
2354 | } |
2355 | |
2356 | static void _dpk_gain_normalize(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2357 | enum rtw89_rf_path path, u8 kidx, bool is_execute) |
2358 | { |
2359 | static const u32 reg[RTW89_DPK_BKUP_NUM][DPK_KSET_NUM] = { |
2360 | {0x8190, 0x8194, 0x8198, 0x81a4}, |
2361 | {0x81a8, 0x81c4, 0x81c8, 0x81e8} |
2362 | }; |
2363 | struct rtw89_dpk_info *dpk = &rtwdev->dpk; |
2364 | u8 cur_k_set = dpk->cur_k_set; |
2365 | |
2366 | if (cur_k_set >= DPK_KSET_NUM) { |
2367 | rtw89_warn(rtwdev, "DPK cur_k_set = %d\n" , cur_k_set); |
2368 | cur_k_set = 2; |
2369 | } |
2370 | |
2371 | if (is_execute) { |
2372 | rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), |
2373 | B_DPK_GN_AG, data: 0x200); |
2374 | rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), |
2375 | B_DPK_GN_EN, data: 0x3); |
2376 | |
2377 | _dpk_one_shot(rtwdev, phy, path, id: D_GAIN_NORM); |
2378 | } else { |
2379 | rtw89_phy_write32_mask(rtwdev, addr: reg[kidx][cur_k_set] + (path << 8), |
2380 | mask: 0x0000007F, data: 0x5b); |
2381 | } |
2382 | |
2383 | dpk->bp[path][kidx].gs = |
2384 | rtw89_phy_read32_mask(rtwdev, addr: reg[kidx][cur_k_set] + (path << 8), |
2385 | mask: 0x0000007F); |
2386 | } |
2387 | |
2388 | static void _dpk_on(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2389 | enum rtw89_rf_path path, u8 kidx) |
2390 | { |
2391 | struct rtw89_dpk_info *dpk = &rtwdev->dpk; |
2392 | |
2393 | rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, data: 0x1); |
2394 | rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, data: 0x0); |
2395 | rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), |
2396 | B_DPD_ORDER, data: _dpk_order_convert(rtwdev)); |
2397 | |
2398 | dpk->bp[path][kidx].path_ok = |
2399 | dpk->bp[path][kidx].path_ok | BIT(dpk->cur_k_set); |
2400 | |
2401 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d[%d] path_ok = 0x%x\n" , |
2402 | path, kidx, dpk->bp[path][kidx].path_ok); |
2403 | |
2404 | rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), |
2405 | B_DPD_MEN, data: dpk->bp[path][kidx].path_ok); |
2406 | |
2407 | _dpk_gain_normalize(rtwdev, phy, path, kidx, is_execute: false); |
2408 | } |
2409 | |
2410 | static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2411 | enum rtw89_rf_path path) |
2412 | { |
2413 | struct rtw89_dpk_info *dpk = &rtwdev->dpk; |
2414 | u8 kidx = dpk->cur_idx[path]; |
2415 | u8 init_xdbm = 17; |
2416 | bool is_fail; |
2417 | |
2418 | if (dpk->bp[path][kidx].band != RTW89_BAND_2G) |
2419 | init_xdbm = 15; |
2420 | |
2421 | _dpk_kip_control_rfc(rtwdev, path, ctrl_by_kip: false); |
2422 | _rfk_rf_direct_cntrl(rtwdev, path, is_bybb: false); |
2423 | rtw89_write_rf(rtwdev, rf_path: path, RR_BBDC, RFREG_MASK, data: 0x03ffd); |
2424 | |
2425 | _dpk_rf_setting(rtwdev, path, kidx); |
2426 | _set_rx_dck(rtwdev, path, rf_mode: RF_DPK); |
2427 | |
2428 | _dpk_kip_pwr_clk_onoff(rtwdev, turn_on: true); |
2429 | _dpk_kip_preset(rtwdev, phy, path, kidx); |
2430 | _dpk_txpwr_bb_force(rtwdev, path, force: true); |
2431 | _dpk_kip_set_txagc(rtwdev, phy, path, dbm: init_xdbm, set_from_bb: true); |
2432 | _dpk_tpg_sel(rtwdev, path, kidx); |
2433 | is_fail = _dpk_agc(rtwdev, phy, path, kidx, init_xdbm, loss_only: false); |
2434 | if (is_fail) |
2435 | goto _error; |
2436 | |
2437 | _dpk_idl_mpa(rtwdev, phy, path, kidx); |
2438 | _dpk_para_query(rtwdev, path, kidx); |
2439 | |
2440 | _dpk_on(rtwdev, phy, path, kidx); |
2441 | _error: |
2442 | _dpk_kip_control_rfc(rtwdev, path, ctrl_by_kip: false); |
2443 | rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_MASK, data: RF_RX); |
2444 | |
2445 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[DPK] S%d[%d]_K%d %s\n" , path, kidx, |
2446 | dpk->cur_k_set, is_fail ? "need Check" : "is Success" ); |
2447 | |
2448 | return is_fail; |
2449 | } |
2450 | |
2451 | static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, |
2452 | enum rtw89_phy_idx phy, u8 kpath) |
2453 | { |
2454 | struct rtw89_dpk_info *dpk = &rtwdev->dpk; |
2455 | u32 kip_bkup[RF_PATH_NUM_8851B][DPK_KIP_REG_NUM_8851B] = {}; |
2456 | u32 rf_bkup[RF_PATH_NUM_8851B][DPK_RF_REG_NUM_8851B] = {}; |
2457 | bool is_fail; |
2458 | u8 path; |
2459 | |
2460 | for (path = 0; path < RF_PATH_NUM_8851B; path++) |
2461 | dpk->cur_idx[path] = 0; |
2462 | |
2463 | for (path = 0; path < RF_PATH_NUM_8851B; path++) { |
2464 | if (!(kpath & BIT(path))) |
2465 | continue; |
2466 | _dpk_bkup_kip(rtwdev, reg: dpk_kip_reg, reg_bkup: kip_bkup, path); |
2467 | _dpk_bkup_rf(rtwdev, rf_reg: dpk_rf_reg, rf_bkup, path); |
2468 | _dpk_information(rtwdev, phy, path); |
2469 | _dpk_init(rtwdev, path); |
2470 | |
2471 | if (rtwdev->is_tssi_mode[path]) |
2472 | _dpk_tssi_pause(rtwdev, path, is_pause: true); |
2473 | } |
2474 | |
2475 | for (path = 0; path < RF_PATH_NUM_8851B; path++) { |
2476 | if (!(kpath & BIT(path))) |
2477 | continue; |
2478 | |
2479 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
2480 | fmt: "[DPK] ========= S%d[%d] DPK Start =========\n" , |
2481 | path, dpk->cur_idx[path]); |
2482 | |
2483 | _dpk_rxagc_onoff(rtwdev, path, turn_on: false); |
2484 | _rfk_drf_direct_cntrl(rtwdev, path, is_bybb: false); |
2485 | _dpk_bb_afe_setting(rtwdev, path); |
2486 | |
2487 | is_fail = _dpk_main(rtwdev, phy, path); |
2488 | _dpk_onoff(rtwdev, path, off: is_fail); |
2489 | } |
2490 | |
2491 | for (path = 0; path < RF_PATH_NUM_8851B; path++) { |
2492 | if (!(kpath & BIT(path))) |
2493 | continue; |
2494 | |
2495 | _dpk_kip_restore(rtwdev, phy, path); |
2496 | _dpk_reload_kip(rtwdev, reg: dpk_kip_reg, reg_bkup: kip_bkup, path); |
2497 | _dpk_reload_rf(rtwdev, rf_reg: dpk_rf_reg, rf_bkup, path); |
2498 | _dpk_bb_afe_restore(rtwdev, path); |
2499 | _dpk_rxagc_onoff(rtwdev, path, turn_on: true); |
2500 | |
2501 | if (rtwdev->is_tssi_mode[path]) |
2502 | _dpk_tssi_pause(rtwdev, path, is_pause: false); |
2503 | } |
2504 | |
2505 | _dpk_kip_pwr_clk_onoff(rtwdev, turn_on: false); |
2506 | } |
2507 | |
2508 | static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force) |
2509 | { |
2510 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
2511 | fmt: "[DPK] ****** 8851B DPK Start (Ver: 0x%x, Cv: %d) ******\n" , |
2512 | DPK_VER_8851B, rtwdev->hal.cv); |
2513 | |
2514 | _dpk_cal_select(rtwdev, force, phy, kpath: _kpath(rtwdev, phy_idx: phy)); |
2515 | } |
2516 | |
2517 | static void _dpk_track(struct rtw89_dev *rtwdev) |
2518 | { |
2519 | struct rtw89_dpk_info *dpk = &rtwdev->dpk; |
2520 | s8 txagc_bb, txagc_bb_tp, txagc_ofst; |
2521 | s16 pwsf_tssi_ofst; |
2522 | s8 delta_ther = 0; |
2523 | u8 path, kidx; |
2524 | u8 txagc_rf; |
2525 | u8 cur_ther; |
2526 | |
2527 | for (path = 0; path < RF_PATH_NUM_8851B; path++) { |
2528 | kidx = dpk->cur_idx[path]; |
2529 | |
2530 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK_TRACK, |
2531 | fmt: "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n" , |
2532 | path, kidx, dpk->bp[path][kidx].ch); |
2533 | |
2534 | txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), |
2535 | B_TXAGC_RF); |
2536 | txagc_bb = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), |
2537 | MASKBYTE2); |
2538 | txagc_bb_tp = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BTP + (path << 13), |
2539 | B_TXAGC_BTP); |
2540 | |
2541 | rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), |
2542 | B_KIP_RPT_SEL, data: 0xf); |
2543 | cur_ther = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), |
2544 | B_RPT_PER_TH); |
2545 | txagc_ofst = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), |
2546 | B_RPT_PER_OF); |
2547 | pwsf_tssi_ofst = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), |
2548 | B_RPT_PER_TSSI); |
2549 | pwsf_tssi_ofst = sign_extend32(value: pwsf_tssi_ofst, index: 12); |
2550 | |
2551 | delta_ther = cur_ther - dpk->bp[path][kidx].ther_dpk; |
2552 | |
2553 | delta_ther = delta_ther * 2 / 3; |
2554 | |
2555 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK_TRACK, |
2556 | fmt: "[DPK_TRK] extra delta_ther = %d (0x%x / 0x%x@k)\n" , |
2557 | delta_ther, cur_ther, dpk->bp[path][kidx].ther_dpk); |
2558 | |
2559 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK_TRACK, |
2560 | fmt: "[DPK_TRK] delta_txagc = %d (0x%x / 0x%x@k)\n" , |
2561 | txagc_rf - dpk->bp[path][kidx].txagc_dpk, |
2562 | txagc_rf, dpk->bp[path][kidx].txagc_dpk); |
2563 | |
2564 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK_TRACK, |
2565 | fmt: "[DPK_TRK] txagc_offset / pwsf_tssi_ofst = 0x%x / %+d\n" , |
2566 | txagc_ofst, pwsf_tssi_ofst); |
2567 | |
2568 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK_TRACK, |
2569 | fmt: "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n" , |
2570 | txagc_bb_tp, txagc_bb); |
2571 | |
2572 | if (rtw89_phy_read32_mask(rtwdev, R_IDL_MPA, B_IDL_DN) == 0x0 && |
2573 | txagc_rf != 0) { |
2574 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK_TRACK, |
2575 | fmt: "[DPK_TRK] New pwsf = 0x%x\n" , 0x78 - delta_ther); |
2576 | |
2577 | rtw89_phy_write32_mask(rtwdev, |
2578 | R_DPD_BND + (path << 8) + (kidx << 2), |
2579 | mask: 0x07FC0000, data: 0x78 - delta_ther); |
2580 | } |
2581 | } |
2582 | } |
2583 | |
2584 | static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) |
2585 | { |
2586 | u32 rf_reg5; |
2587 | u32 rck_val; |
2588 | u32 val; |
2589 | int ret; |
2590 | |
2591 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RCK] ====== S%d RCK ======\n" , path); |
2592 | |
2593 | rf_reg5 = rtw89_read_rf(rtwdev, rf_path: path, RR_RSV1, RFREG_MASK); |
2594 | |
2595 | rtw89_write_rf(rtwdev, rf_path: path, RR_RSV1, RR_RSV1_RST, data: 0x0); |
2596 | rtw89_write_rf(rtwdev, rf_path: path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); |
2597 | |
2598 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RCK] RF0x00 = 0x%05x\n" , |
2599 | rtw89_read_rf(rtwdev, rf_path: path, RR_MOD, RFREG_MASK)); |
2600 | |
2601 | /* RCK trigger */ |
2602 | rtw89_write_rf(rtwdev, rf_path: path, RR_RCKC, RFREG_MASK, data: 0x00240); |
2603 | |
2604 | ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30, |
2605 | false, rtwdev, path, RR_RCKS, BIT(3)); |
2606 | |
2607 | rck_val = rtw89_read_rf(rtwdev, rf_path: path, RR_RCKC, RR_RCKC_CA); |
2608 | |
2609 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RCK] rck_val = 0x%x, ret = %d\n" , |
2610 | rck_val, ret); |
2611 | |
2612 | rtw89_write_rf(rtwdev, rf_path: path, RR_RCKC, RFREG_MASK, data: rck_val); |
2613 | rtw89_write_rf(rtwdev, rf_path: path, RR_RSV1, RFREG_MASK, data: rf_reg5); |
2614 | |
2615 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RCK] RF 0x1b = 0x%x\n" , |
2616 | rtw89_read_rf(rtwdev, rf_path: path, RR_RCKC, RFREG_MASK)); |
2617 | } |
2618 | |
2619 | static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2620 | enum rtw89_rf_path path) |
2621 | { |
2622 | const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: RTW89_SUB_ENTITY_0); |
2623 | enum rtw89_band band = chan->band_type; |
2624 | |
2625 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_tssi_sys_defs_tbl); |
2626 | |
2627 | rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, |
2628 | &rtw8851b_tssi_sys_a_defs_2g_tbl, |
2629 | &rtw8851b_tssi_sys_a_defs_5g_tbl); |
2630 | } |
2631 | |
2632 | static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, |
2633 | enum rtw89_phy_idx phy, |
2634 | enum rtw89_rf_path path) |
2635 | { |
2636 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_tssi_init_txpwr_defs_a_tbl); |
2637 | } |
2638 | |
2639 | static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev, |
2640 | enum rtw89_phy_idx phy, |
2641 | enum rtw89_rf_path path) |
2642 | { |
2643 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_tssi_init_txpwr_he_tb_defs_a_tbl); |
2644 | } |
2645 | |
2646 | static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2647 | enum rtw89_rf_path path) |
2648 | { |
2649 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_tssi_dck_defs_a_tbl); |
2650 | } |
2651 | |
2652 | static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2653 | enum rtw89_rf_path path) |
2654 | { |
2655 | #define RTW8851B_TSSI_GET_VAL(ptr, idx) \ |
2656 | ({ \ |
2657 | s8 *__ptr = (ptr); \ |
2658 | u8 __idx = (idx), __i, __v; \ |
2659 | u32 __val = 0; \ |
2660 | for (__i = 0; __i < 4; __i++) { \ |
2661 | __v = (__ptr[__idx + __i]); \ |
2662 | __val |= (__v << (8 * __i)); \ |
2663 | } \ |
2664 | __val; \ |
2665 | }) |
2666 | struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; |
2667 | const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: RTW89_SUB_ENTITY_0); |
2668 | u8 ch = chan->channel; |
2669 | u8 subband = chan->subband_type; |
2670 | const s8 *thm_up_a = NULL; |
2671 | const s8 *thm_down_a = NULL; |
2672 | u8 thermal = 0xff; |
2673 | s8 thm_ofst[64] = {0}; |
2674 | u32 tmp = 0; |
2675 | u8 i, j; |
2676 | |
2677 | switch (subband) { |
2678 | default: |
2679 | case RTW89_CH_2G: |
2680 | thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_2ga_p; |
2681 | thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_2ga_n; |
2682 | break; |
2683 | case RTW89_CH_5G_BAND_1: |
2684 | thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_p[0]; |
2685 | thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_n[0]; |
2686 | break; |
2687 | case RTW89_CH_5G_BAND_3: |
2688 | thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_p[1]; |
2689 | thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_n[1]; |
2690 | break; |
2691 | case RTW89_CH_5G_BAND_4: |
2692 | thm_up_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_p[2]; |
2693 | thm_down_a = rtw89_8851b_trk_cfg.delta_swingidx_5ga_n[2]; |
2694 | break; |
2695 | } |
2696 | |
2697 | if (path == RF_PATH_A) { |
2698 | thermal = tssi_info->thermal[RF_PATH_A]; |
2699 | |
2700 | rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, |
2701 | fmt: "[TSSI] ch=%d thermal_pathA=0x%x\n" , ch, thermal); |
2702 | |
2703 | rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, data: 0x0); |
2704 | rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, data: 0x1); |
2705 | |
2706 | if (thermal == 0xff) { |
2707 | rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, data: 32); |
2708 | rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, data: 32); |
2709 | |
2710 | for (i = 0; i < 64; i += 4) { |
2711 | rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, data: 0x0); |
2712 | |
2713 | rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, |
2714 | fmt: "[TSSI] write 0x%x val=0x%08x\n" , |
2715 | R_P0_TSSI_BASE + i, 0x0); |
2716 | } |
2717 | |
2718 | } else { |
2719 | rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, |
2720 | data: thermal); |
2721 | rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, |
2722 | data: thermal); |
2723 | |
2724 | i = 0; |
2725 | for (j = 0; j < 32; j++) |
2726 | thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? |
2727 | -thm_down_a[i++] : |
2728 | -thm_down_a[DELTA_SWINGIDX_SIZE - 1]; |
2729 | |
2730 | i = 1; |
2731 | for (j = 63; j >= 32; j--) |
2732 | thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? |
2733 | thm_up_a[i++] : |
2734 | thm_up_a[DELTA_SWINGIDX_SIZE - 1]; |
2735 | |
2736 | for (i = 0; i < 64; i += 4) { |
2737 | tmp = RTW8851B_TSSI_GET_VAL(thm_ofst, i); |
2738 | rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, data: tmp); |
2739 | |
2740 | rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, |
2741 | fmt: "[TSSI] write 0x%x val=0x%08x\n" , |
2742 | 0x5c00 + i, tmp); |
2743 | } |
2744 | } |
2745 | rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, data: 0x1); |
2746 | rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, data: 0x0); |
2747 | } |
2748 | #undef RTW8851B_TSSI_GET_VAL |
2749 | } |
2750 | |
2751 | static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2752 | enum rtw89_rf_path path) |
2753 | { |
2754 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_tssi_dac_gain_defs_a_tbl); |
2755 | } |
2756 | |
2757 | static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2758 | enum rtw89_rf_path path) |
2759 | { |
2760 | const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: RTW89_SUB_ENTITY_0); |
2761 | enum rtw89_band band = chan->band_type; |
2762 | |
2763 | rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, |
2764 | &rtw8851b_tssi_slope_a_defs_2g_tbl, |
2765 | &rtw8851b_tssi_slope_a_defs_5g_tbl); |
2766 | } |
2767 | |
2768 | static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2769 | enum rtw89_rf_path path, bool all) |
2770 | { |
2771 | const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: RTW89_SUB_ENTITY_0); |
2772 | enum rtw89_band band = chan->band_type; |
2773 | |
2774 | rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, |
2775 | &rtw8851b_tssi_align_a_2g_defs_tbl, |
2776 | &rtw8851b_tssi_align_a_5g_defs_tbl); |
2777 | } |
2778 | |
2779 | static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2780 | enum rtw89_rf_path path) |
2781 | { |
2782 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_tssi_slope_defs_a_tbl); |
2783 | } |
2784 | |
2785 | static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2786 | enum rtw89_rf_path path) |
2787 | { |
2788 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_tssi_track_defs_a_tbl); |
2789 | } |
2790 | |
2791 | static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev, |
2792 | enum rtw89_phy_idx phy, |
2793 | enum rtw89_rf_path path) |
2794 | { |
2795 | rtw89_rfk_parser(rtwdev, tbl: &rtw8851b_tssi_mv_avg_defs_a_tbl); |
2796 | } |
2797 | |
2798 | static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) |
2799 | { |
2800 | _tssi_set_tssi_track(rtwdev, phy, path: RF_PATH_A); |
2801 | _tssi_set_txagc_offset_mv_avg(rtwdev, phy, path: RF_PATH_A); |
2802 | |
2803 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, data: 0x0); |
2804 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, data: 0x0); |
2805 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, data: 0x1); |
2806 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_TXGA_V1, RR_TXGA_V1_TRK_EN, data: 0x1); |
2807 | |
2808 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, data: 0x0); |
2809 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, data: 0x3); |
2810 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, data: 0xc0); |
2811 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, data: 0x0); |
2812 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, data: 0x1); |
2813 | |
2814 | rtwdev->is_tssi_mode[RF_PATH_A] = true; |
2815 | } |
2816 | |
2817 | static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) |
2818 | { |
2819 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, data: 0x0); |
2820 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, data: 0x0); |
2821 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, data: 0x1); |
2822 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, data: 0x0); |
2823 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, data: 0x1); |
2824 | |
2825 | rtwdev->is_tssi_mode[RF_PATH_A] = false; |
2826 | } |
2827 | |
2828 | static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch) |
2829 | { |
2830 | switch (ch) { |
2831 | case 1 ... 2: |
2832 | return 0; |
2833 | case 3 ... 5: |
2834 | return 1; |
2835 | case 6 ... 8: |
2836 | return 2; |
2837 | case 9 ... 11: |
2838 | return 3; |
2839 | case 12 ... 13: |
2840 | return 4; |
2841 | case 14: |
2842 | return 5; |
2843 | } |
2844 | |
2845 | return 0; |
2846 | } |
2847 | |
2848 | #define (BIT(31)) |
2849 | #define (idx) (TSSI_EXTRA_GROUP_BIT | (idx)) |
2850 | #define (group) ((group) & TSSI_EXTRA_GROUP_BIT) |
2851 | #define (group) ((group) & ~TSSI_EXTRA_GROUP_BIT) |
2852 | #define (group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) |
2853 | |
2854 | static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch) |
2855 | { |
2856 | switch (ch) { |
2857 | case 1 ... 2: |
2858 | return 0; |
2859 | case 3 ... 5: |
2860 | return 1; |
2861 | case 6 ... 8: |
2862 | return 2; |
2863 | case 9 ... 11: |
2864 | return 3; |
2865 | case 12 ... 14: |
2866 | return 4; |
2867 | case 36 ... 40: |
2868 | return 5; |
2869 | case 41 ... 43: |
2870 | return TSSI_EXTRA_GROUP(5); |
2871 | case 44 ... 48: |
2872 | return 6; |
2873 | case 49 ... 51: |
2874 | return TSSI_EXTRA_GROUP(6); |
2875 | case 52 ... 56: |
2876 | return 7; |
2877 | case 57 ... 59: |
2878 | return TSSI_EXTRA_GROUP(7); |
2879 | case 60 ... 64: |
2880 | return 8; |
2881 | case 100 ... 104: |
2882 | return 9; |
2883 | case 105 ... 107: |
2884 | return TSSI_EXTRA_GROUP(9); |
2885 | case 108 ... 112: |
2886 | return 10; |
2887 | case 113 ... 115: |
2888 | return TSSI_EXTRA_GROUP(10); |
2889 | case 116 ... 120: |
2890 | return 11; |
2891 | case 121 ... 123: |
2892 | return TSSI_EXTRA_GROUP(11); |
2893 | case 124 ... 128: |
2894 | return 12; |
2895 | case 129 ... 131: |
2896 | return TSSI_EXTRA_GROUP(12); |
2897 | case 132 ... 136: |
2898 | return 13; |
2899 | case 137 ... 139: |
2900 | return TSSI_EXTRA_GROUP(13); |
2901 | case 140 ... 144: |
2902 | return 14; |
2903 | case 149 ... 153: |
2904 | return 15; |
2905 | case 154 ... 156: |
2906 | return TSSI_EXTRA_GROUP(15); |
2907 | case 157 ... 161: |
2908 | return 16; |
2909 | case 162 ... 164: |
2910 | return TSSI_EXTRA_GROUP(16); |
2911 | case 165 ... 169: |
2912 | return 17; |
2913 | case 170 ... 172: |
2914 | return TSSI_EXTRA_GROUP(17); |
2915 | case 173 ... 177: |
2916 | return 18; |
2917 | } |
2918 | |
2919 | return 0; |
2920 | } |
2921 | |
2922 | static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) |
2923 | { |
2924 | switch (ch) { |
2925 | case 1 ... 8: |
2926 | return 0; |
2927 | case 9 ... 14: |
2928 | return 1; |
2929 | case 36 ... 48: |
2930 | return 2; |
2931 | case 52 ... 64: |
2932 | return 3; |
2933 | case 100 ... 112: |
2934 | return 4; |
2935 | case 116 ... 128: |
2936 | return 5; |
2937 | case 132 ... 144: |
2938 | return 6; |
2939 | case 149 ... 177: |
2940 | return 7; |
2941 | } |
2942 | |
2943 | return 0; |
2944 | } |
2945 | |
2946 | static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2947 | enum rtw89_rf_path path) |
2948 | { |
2949 | struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; |
2950 | const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: RTW89_SUB_ENTITY_0); |
2951 | u32 gidx, gidx_1st, gidx_2nd; |
2952 | u8 ch = chan->channel; |
2953 | s8 de_1st; |
2954 | s8 de_2nd; |
2955 | s8 val; |
2956 | |
2957 | gidx = _tssi_get_ofdm_group(rtwdev, ch); |
2958 | |
2959 | rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, |
2960 | fmt: "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n" , path, gidx); |
2961 | |
2962 | if (IS_TSSI_EXTRA_GROUP(gidx)) { |
2963 | gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx); |
2964 | gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx); |
2965 | de_1st = tssi_info->tssi_mcs[path][gidx_1st]; |
2966 | de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; |
2967 | val = (de_1st + de_2nd) / 2; |
2968 | |
2969 | rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, |
2970 | fmt: "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n" , |
2971 | path, val, de_1st, de_2nd); |
2972 | } else { |
2973 | val = tssi_info->tssi_mcs[path][gidx]; |
2974 | |
2975 | rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, |
2976 | fmt: "[TSSI][TRIM]: path=%d mcs de=%d\n" , path, val); |
2977 | } |
2978 | |
2979 | return val; |
2980 | } |
2981 | |
2982 | static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
2983 | enum rtw89_rf_path path) |
2984 | { |
2985 | struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; |
2986 | const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: RTW89_SUB_ENTITY_0); |
2987 | u32 tgidx, tgidx_1st, tgidx_2nd; |
2988 | u8 ch = chan->channel; |
2989 | s8 tde_1st; |
2990 | s8 tde_2nd; |
2991 | s8 val; |
2992 | |
2993 | tgidx = _tssi_get_trim_group(rtwdev, ch); |
2994 | |
2995 | rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, |
2996 | fmt: "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n" , |
2997 | path, tgidx); |
2998 | |
2999 | if (IS_TSSI_EXTRA_GROUP(tgidx)) { |
3000 | tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx); |
3001 | tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx); |
3002 | tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; |
3003 | tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; |
3004 | val = (tde_1st + tde_2nd) / 2; |
3005 | |
3006 | rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, |
3007 | fmt: "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n" , |
3008 | path, val, tde_1st, tde_2nd); |
3009 | } else { |
3010 | val = tssi_info->tssi_trim[path][tgidx]; |
3011 | |
3012 | rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, |
3013 | fmt: "[TSSI][TRIM]: path=%d mcs trim_de=%d\n" , |
3014 | path, val); |
3015 | } |
3016 | |
3017 | return val; |
3018 | } |
3019 | |
3020 | static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) |
3021 | { |
3022 | struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; |
3023 | const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: RTW89_SUB_ENTITY_0); |
3024 | u8 ch = chan->channel; |
3025 | u8 gidx; |
3026 | s8 ofdm_de; |
3027 | s8 trim_de; |
3028 | s32 val; |
3029 | u32 i; |
3030 | |
3031 | rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, fmt: "[TSSI][TRIM]: phy=%d ch=%d\n" , |
3032 | phy, ch); |
3033 | |
3034 | for (i = RF_PATH_A; i < RTW8851B_TSSI_PATH_NR; i++) { |
3035 | gidx = _tssi_get_cck_group(rtwdev, ch); |
3036 | trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, path: i); |
3037 | val = tssi_info->tssi_cck[i][gidx] + trim_de; |
3038 | |
3039 | rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, |
3040 | fmt: "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n" , |
3041 | i, gidx, tssi_info->tssi_cck[i][gidx], trim_de); |
3042 | |
3043 | rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_cck_long[i], _TSSI_DE_MASK, data: val); |
3044 | rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_cck_short[i], _TSSI_DE_MASK, data: val); |
3045 | |
3046 | rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, |
3047 | fmt: "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n" , |
3048 | _tssi_de_cck_long[i], |
3049 | rtw89_phy_read32_mask(rtwdev, addr: _tssi_de_cck_long[i], |
3050 | _TSSI_DE_MASK)); |
3051 | |
3052 | ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, path: i); |
3053 | trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, path: i); |
3054 | val = ofdm_de + trim_de; |
3055 | |
3056 | rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, |
3057 | fmt: "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n" , |
3058 | i, ofdm_de, trim_de); |
3059 | |
3060 | rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_mcs_20m[i], _TSSI_DE_MASK, data: val); |
3061 | rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_mcs_40m[i], _TSSI_DE_MASK, data: val); |
3062 | rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_mcs_80m[i], _TSSI_DE_MASK, data: val); |
3063 | rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, data: val); |
3064 | rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_mcs_5m[i], _TSSI_DE_MASK, data: val); |
3065 | rtw89_phy_write32_mask(rtwdev, addr: _tssi_de_mcs_10m[i], _TSSI_DE_MASK, data: val); |
3066 | |
3067 | rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, |
3068 | fmt: "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n" , |
3069 | _tssi_de_mcs_20m[i], |
3070 | rtw89_phy_read32_mask(rtwdev, addr: _tssi_de_mcs_20m[i], |
3071 | _TSSI_DE_MASK)); |
3072 | } |
3073 | } |
3074 | |
3075 | static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) |
3076 | { |
3077 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
3078 | fmt: "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n" |
3079 | "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n" , |
3080 | R_TSSI_PA_K1 + (path << 13), |
3081 | rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K1 + (path << 13), MASKDWORD), |
3082 | R_TSSI_PA_K2 + (path << 13), |
3083 | rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K2 + (path << 13), MASKDWORD), |
3084 | R_P0_TSSI_ALIM1 + (path << 13), |
3085 | rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD), |
3086 | R_P0_TSSI_ALIM3 + (path << 13), |
3087 | rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD), |
3088 | R_TSSI_PA_K5 + (path << 13), |
3089 | rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K5 + (path << 13), MASKDWORD), |
3090 | R_P0_TSSI_ALIM2 + (path << 13), |
3091 | rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD), |
3092 | R_P0_TSSI_ALIM4 + (path << 13), |
3093 | rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD), |
3094 | R_TSSI_PA_K8 + (path << 13), |
3095 | rtw89_phy_read32_mask(rtwdev, R_TSSI_PA_K8 + (path << 13), MASKDWORD)); |
3096 | } |
3097 | |
3098 | static void _tssi_alimentk_done(struct rtw89_dev *rtwdev, |
3099 | enum rtw89_phy_idx phy, enum rtw89_rf_path path) |
3100 | { |
3101 | struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; |
3102 | const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: RTW89_SUB_ENTITY_0); |
3103 | u8 channel = chan->channel; |
3104 | u8 band; |
3105 | |
3106 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
3107 | fmt: "======>%s phy=%d path=%d\n" , __func__, phy, path); |
3108 | |
3109 | if (channel >= 1 && channel <= 14) |
3110 | band = TSSI_ALIMK_2G; |
3111 | else if (channel >= 36 && channel <= 64) |
3112 | band = TSSI_ALIMK_5GL; |
3113 | else if (channel >= 100 && channel <= 144) |
3114 | band = TSSI_ALIMK_5GM; |
3115 | else if (channel >= 149 && channel <= 177) |
3116 | band = TSSI_ALIMK_5GH; |
3117 | else |
3118 | band = TSSI_ALIMK_2G; |
3119 | |
3120 | if (tssi_info->alignment_done[path][band]) { |
3121 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD, |
3122 | data: tssi_info->alignment_value[path][band][0]); |
3123 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD, |
3124 | data: tssi_info->alignment_value[path][band][1]); |
3125 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD, |
3126 | data: tssi_info->alignment_value[path][band][2]); |
3127 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD, |
3128 | data: tssi_info->alignment_value[path][band][3]); |
3129 | } |
3130 | |
3131 | _tssi_alimentk_dump_result(rtwdev, path); |
3132 | } |
3133 | |
3134 | static void rtw8851b_by_rate_dpd(struct rtw89_dev *rtwdev) |
3135 | { |
3136 | rtw89_write32_mask(rtwdev, R_AX_PWR_SWING_OTHER_CTRL0, |
3137 | B_AX_CFIR_BY_RATE_OFF_MASK, data: 0x21861); |
3138 | } |
3139 | |
3140 | void rtw8851b_dpk_init(struct rtw89_dev *rtwdev) |
3141 | { |
3142 | rtw8851b_by_rate_dpd(rtwdev); |
3143 | } |
3144 | |
3145 | void rtw8851b_aack(struct rtw89_dev *rtwdev) |
3146 | { |
3147 | u32 tmp05, tmpd3, ib[4]; |
3148 | u32 tmp; |
3149 | int ret; |
3150 | int rek; |
3151 | int i; |
3152 | |
3153 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[LCK]DO AACK\n" ); |
3154 | |
3155 | tmp05 = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_RSV1, RFREG_MASK); |
3156 | tmpd3 = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RFREG_MASK); |
3157 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MOD, RR_MOD_MASK, data: 0x3); |
3158 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_RSV1, RFREG_MASK, data: 0x0); |
3159 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RR_LCK_ST, data: 0x0); |
3160 | |
3161 | for (rek = 0; rek < 4; rek++) { |
3162 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_AACK, RFREG_MASK, data: 0x8201e); |
3163 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_AACK, RFREG_MASK, data: 0x8201f); |
3164 | fsleep(usecs: 100); |
3165 | |
3166 | ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp, |
3167 | 1, 1000, false, |
3168 | rtwdev, RF_PATH_A, 0xd0, BIT(16)); |
3169 | if (ret) |
3170 | rtw89_warn(rtwdev, "[LCK]AACK timeout\n" ); |
3171 | |
3172 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_VCI, RR_VCI_ON, data: 0x1); |
3173 | for (i = 0; i < 4; i++) { |
3174 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_VCO, RR_VCO_SEL, data: i); |
3175 | ib[i] = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_IBD, RR_IBD_VAL); |
3176 | } |
3177 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_VCI, RR_VCI_ON, data: 0x0); |
3178 | |
3179 | if (ib[0] != 0 && ib[1] != 0 && ib[2] != 0 && ib[3] != 0) |
3180 | break; |
3181 | } |
3182 | |
3183 | if (rek != 0) |
3184 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[LCK]AACK rek = %d\n" , rek); |
3185 | |
3186 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_RSV1, RFREG_MASK, data: tmp05); |
3187 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RFREG_MASK, data: tmpd3); |
3188 | } |
3189 | |
3190 | static void _lck_keep_thermal(struct rtw89_dev *rtwdev) |
3191 | { |
3192 | struct rtw89_lck_info *lck = &rtwdev->lck; |
3193 | |
3194 | lck->thermal[RF_PATH_A] = |
3195 | ewma_thermal_read(e: &rtwdev->phystat.avg_thermal[RF_PATH_A]); |
3196 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK_TRACK, |
3197 | fmt: "[LCK] path=%d thermal=0x%x" , RF_PATH_A, lck->thermal[RF_PATH_A]); |
3198 | } |
3199 | |
3200 | static void rtw8851b_lck(struct rtw89_dev *rtwdev) |
3201 | { |
3202 | u32 tmp05, tmp18, tmpd3; |
3203 | |
3204 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[LCK]DO LCK\n" ); |
3205 | |
3206 | tmp05 = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_RSV1, RFREG_MASK); |
3207 | tmp18 = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_CFGCH, RFREG_MASK); |
3208 | tmpd3 = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RFREG_MASK); |
3209 | |
3210 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MOD, RR_MOD_MASK, data: 0x3); |
3211 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_RSV1, RFREG_MASK, data: 0x0); |
3212 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, data: 0x1); |
3213 | |
3214 | _set_ch(rtwdev, val: tmp18); |
3215 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RFREG_MASK, data: tmpd3); |
3216 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_RSV1, RFREG_MASK, data: tmp05); |
3217 | |
3218 | _lck_keep_thermal(rtwdev); |
3219 | } |
3220 | |
3221 | #define RTW8851B_LCK_TH 8 |
3222 | |
3223 | void rtw8851b_lck_track(struct rtw89_dev *rtwdev) |
3224 | { |
3225 | struct rtw89_lck_info *lck = &rtwdev->lck; |
3226 | u8 cur_thermal; |
3227 | int delta; |
3228 | |
3229 | cur_thermal = |
3230 | ewma_thermal_read(e: &rtwdev->phystat.avg_thermal[RF_PATH_A]); |
3231 | delta = abs((int)cur_thermal - lck->thermal[RF_PATH_A]); |
3232 | |
3233 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK_TRACK, |
3234 | fmt: "[LCK] path=%d current thermal=0x%x delta=0x%x\n" , |
3235 | RF_PATH_A, cur_thermal, delta); |
3236 | |
3237 | if (delta >= RTW8851B_LCK_TH) { |
3238 | rtw8851b_aack(rtwdev); |
3239 | rtw8851b_lck(rtwdev); |
3240 | } |
3241 | } |
3242 | |
3243 | void rtw8851b_lck_init(struct rtw89_dev *rtwdev) |
3244 | { |
3245 | _lck_keep_thermal(rtwdev); |
3246 | } |
3247 | |
3248 | void rtw8851b_rck(struct rtw89_dev *rtwdev) |
3249 | { |
3250 | _rck(rtwdev, path: RF_PATH_A); |
3251 | } |
3252 | |
3253 | void rtw8851b_dack(struct rtw89_dev *rtwdev) |
3254 | { |
3255 | _dac_cal(rtwdev, force: false); |
3256 | } |
3257 | |
3258 | void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) |
3259 | { |
3260 | u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, paths: 0); |
3261 | u32 tx_en; |
3262 | |
3263 | rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_IQK, state: BTC_WRFK_START); |
3264 | rtw89_chip_stop_sch_tx(rtwdev, mac_idx: phy_idx, tx_en: &tx_en, sel: RTW89_SCH_TX_SEL_ALL); |
3265 | _wait_rx_mode(rtwdev, kpath: _kpath(rtwdev, phy_idx)); |
3266 | |
3267 | _iqk_init(rtwdev); |
3268 | _iqk(rtwdev, phy_idx, force: false); |
3269 | |
3270 | rtw89_chip_resume_sch_tx(rtwdev, mac_idx: phy_idx, tx_en); |
3271 | rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_IQK, state: BTC_WRFK_STOP); |
3272 | } |
3273 | |
3274 | void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) |
3275 | { |
3276 | u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, paths: 0); |
3277 | u32 tx_en; |
3278 | |
3279 | rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_RXDCK, state: BTC_WRFK_START); |
3280 | rtw89_chip_stop_sch_tx(rtwdev, mac_idx: phy_idx, tx_en: &tx_en, sel: RTW89_SCH_TX_SEL_ALL); |
3281 | _wait_rx_mode(rtwdev, kpath: _kpath(rtwdev, phy_idx)); |
3282 | |
3283 | _rx_dck(rtwdev, phy: phy_idx, is_afe: false); |
3284 | |
3285 | rtw89_chip_resume_sch_tx(rtwdev, mac_idx: phy_idx, tx_en); |
3286 | rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_RXDCK, state: BTC_WRFK_STOP); |
3287 | } |
3288 | |
3289 | void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) |
3290 | { |
3291 | u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, paths: 0); |
3292 | u32 tx_en; |
3293 | |
3294 | rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_DPK, state: BTC_WRFK_START); |
3295 | rtw89_chip_stop_sch_tx(rtwdev, mac_idx: phy_idx, tx_en: &tx_en, sel: RTW89_SCH_TX_SEL_ALL); |
3296 | _wait_rx_mode(rtwdev, kpath: _kpath(rtwdev, phy_idx)); |
3297 | |
3298 | rtwdev->dpk.is_dpk_enable = true; |
3299 | rtwdev->dpk.is_dpk_reload_en = false; |
3300 | _dpk(rtwdev, phy: phy_idx, force: false); |
3301 | |
3302 | rtw89_chip_resume_sch_tx(rtwdev, mac_idx: phy_idx, tx_en); |
3303 | rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_DPK, state: BTC_WRFK_STOP); |
3304 | } |
3305 | |
3306 | void rtw8851b_dpk_track(struct rtw89_dev *rtwdev) |
3307 | { |
3308 | _dpk_track(rtwdev); |
3309 | } |
3310 | |
3311 | void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en) |
3312 | { |
3313 | u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx: phy, paths: RF_A); |
3314 | u8 i; |
3315 | |
3316 | rtw89_debug(rtwdev, mask: RTW89_DBG_TSSI, fmt: "[TSSI] %s: phy=%d\n" , __func__, phy); |
3317 | rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_IQK, state: BTC_WRFK_ONESHOT_START); |
3318 | |
3319 | _tssi_disable(rtwdev, phy); |
3320 | |
3321 | for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) { |
3322 | _tssi_set_sys(rtwdev, phy, path: i); |
3323 | _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, path: i); |
3324 | _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, path: i); |
3325 | _tssi_set_dck(rtwdev, phy, path: i); |
3326 | _tssi_set_tmeter_tbl(rtwdev, phy, path: i); |
3327 | _tssi_set_dac_gain_tbl(rtwdev, phy, path: i); |
3328 | _tssi_slope_cal_org(rtwdev, phy, path: i); |
3329 | _tssi_alignment_default(rtwdev, phy, path: i, all: true); |
3330 | _tssi_set_tssi_slope(rtwdev, phy, path: i); |
3331 | } |
3332 | |
3333 | _tssi_enable(rtwdev, phy); |
3334 | _tssi_set_efuse_to_de(rtwdev, phy); |
3335 | |
3336 | rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, type: BTC_WRFKT_IQK, state: BTC_WRFK_ONESHOT_STOP); |
3337 | } |
3338 | |
3339 | void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) |
3340 | { |
3341 | const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: RTW89_SUB_ENTITY_0); |
3342 | u8 channel = chan->channel; |
3343 | u32 i; |
3344 | |
3345 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
3346 | fmt: "======>%s phy=%d channel=%d\n" , __func__, phy, channel); |
3347 | |
3348 | _tssi_disable(rtwdev, phy); |
3349 | |
3350 | for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) { |
3351 | _tssi_set_sys(rtwdev, phy, path: i); |
3352 | _tssi_set_tmeter_tbl(rtwdev, phy, path: i); |
3353 | _tssi_slope_cal_org(rtwdev, phy, path: i); |
3354 | _tssi_alignment_default(rtwdev, phy, path: i, all: true); |
3355 | } |
3356 | |
3357 | _tssi_enable(rtwdev, phy); |
3358 | _tssi_set_efuse_to_de(rtwdev, phy); |
3359 | } |
3360 | |
3361 | static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev, |
3362 | enum rtw89_phy_idx phy, bool enable) |
3363 | { |
3364 | const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx: RTW89_SUB_ENTITY_0); |
3365 | u8 channel = chan->channel; |
3366 | |
3367 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "======> %s ch=%d\n" , |
3368 | __func__, channel); |
3369 | |
3370 | if (enable) |
3371 | return; |
3372 | |
3373 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
3374 | fmt: "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x\n" , |
3375 | __func__, |
3376 | rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT)); |
3377 | |
3378 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, data: 0xc0); |
3379 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, data: 0x0); |
3380 | rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, data: 0x1); |
3381 | |
3382 | _tssi_alimentk_done(rtwdev, phy, path: RF_PATH_A); |
3383 | |
3384 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
3385 | fmt: "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x\n" , |
3386 | __func__, |
3387 | rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT)); |
3388 | |
3389 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
3390 | fmt: "======> %s SCAN_END\n" , __func__); |
3391 | } |
3392 | |
3393 | void rtw8851b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, |
3394 | enum rtw89_phy_idx phy_idx) |
3395 | { |
3396 | if (scan_start) |
3397 | rtw8851b_tssi_default_txagc(rtwdev, phy: phy_idx, enable: true); |
3398 | else |
3399 | rtw8851b_tssi_default_txagc(rtwdev, phy: phy_idx, enable: false); |
3400 | } |
3401 | |
3402 | static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, |
3403 | enum rtw89_bandwidth bw, bool dav) |
3404 | { |
3405 | u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1; |
3406 | u32 rf_reg18; |
3407 | |
3408 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RFK]===> %s\n" , __func__); |
3409 | |
3410 | rf_reg18 = rtw89_read_rf(rtwdev, rf_path: path, addr: reg18_addr, RFREG_MASK); |
3411 | if (rf_reg18 == INV_RF_DATA) { |
3412 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
3413 | fmt: "[RFK]Invalid RF_0x18 for Path-%d\n" , path); |
3414 | return; |
3415 | } |
3416 | rf_reg18 &= ~RR_CFGCH_BW; |
3417 | |
3418 | switch (bw) { |
3419 | case RTW89_CHANNEL_WIDTH_5: |
3420 | case RTW89_CHANNEL_WIDTH_10: |
3421 | case RTW89_CHANNEL_WIDTH_20: |
3422 | rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M); |
3423 | break; |
3424 | case RTW89_CHANNEL_WIDTH_40: |
3425 | rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M); |
3426 | break; |
3427 | case RTW89_CHANNEL_WIDTH_80: |
3428 | rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M); |
3429 | break; |
3430 | default: |
3431 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RFK]Fail to set CH\n" ); |
3432 | } |
3433 | |
3434 | rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN | |
3435 | RR_CFGCH_BW2) & RFREG_MASK; |
3436 | rf_reg18 |= RR_CFGCH_BW2; |
3437 | rtw89_write_rf(rtwdev, rf_path: path, addr: reg18_addr, RFREG_MASK, data: rf_reg18); |
3438 | |
3439 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RFK] set %x at path%d, %x =0x%x\n" , |
3440 | bw, path, reg18_addr, |
3441 | rtw89_read_rf(rtwdev, rf_path: path, addr: reg18_addr, RFREG_MASK)); |
3442 | } |
3443 | |
3444 | static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
3445 | enum rtw89_bandwidth bw) |
3446 | { |
3447 | _bw_setting(rtwdev, path: RF_PATH_A, bw, dav: true); |
3448 | _bw_setting(rtwdev, path: RF_PATH_A, bw, dav: false); |
3449 | } |
3450 | |
3451 | static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val) |
3452 | { |
3453 | u32 bak; |
3454 | u32 tmp; |
3455 | int ret; |
3456 | |
3457 | bak = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_LDO, RFREG_MASK); |
3458 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LDO, RR_LDO_SEL, data: 0x1); |
3459 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_CFGCH, RFREG_MASK, data: val); |
3460 | |
3461 | ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000, |
3462 | false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY); |
3463 | if (ret) |
3464 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[LCK]LCK timeout\n" ); |
3465 | |
3466 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LDO, RFREG_MASK, data: bak); |
3467 | |
3468 | return !!ret; |
3469 | } |
3470 | |
3471 | static void _lck_check(struct rtw89_dev *rtwdev) |
3472 | { |
3473 | u32 tmp; |
3474 | |
3475 | if (rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { |
3476 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[LCK]SYN MMD reset\n" ); |
3477 | |
3478 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MMD, RR_MMD_RST_EN, data: 0x1); |
3479 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, data: 0x0); |
3480 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, data: 0x1); |
3481 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_MMD, RR_MMD_RST_EN, data: 0x0); |
3482 | } |
3483 | |
3484 | udelay(10); |
3485 | |
3486 | if (rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { |
3487 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[LCK]re-set RF 0x18\n" ); |
3488 | |
3489 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, data: 0x1); |
3490 | tmp = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_CFGCH, RFREG_MASK); |
3491 | _set_s0_arfc18(rtwdev, val: tmp); |
3492 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, data: 0x0); |
3493 | } |
3494 | |
3495 | if (rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) { |
3496 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[LCK]SYN off/on\n" ); |
3497 | |
3498 | tmp = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_POW, RFREG_MASK); |
3499 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_POW, RFREG_MASK, data: tmp); |
3500 | tmp = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_SX, RFREG_MASK); |
3501 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_SX, RFREG_MASK, data: tmp); |
3502 | |
3503 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, data: 0x1); |
3504 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_POW, RR_POW_SYN, data: 0x0); |
3505 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_POW, RR_POW_SYN, data: 0x3); |
3506 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, data: 0x0); |
3507 | |
3508 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, data: 0x1); |
3509 | tmp = rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_CFGCH, RFREG_MASK); |
3510 | _set_s0_arfc18(rtwdev, val: tmp); |
3511 | rtw89_write_rf(rtwdev, rf_path: RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, data: 0x0); |
3512 | |
3513 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[LCK]0xb2=%x, 0xc5=%x\n" , |
3514 | rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_VCO, RFREG_MASK), |
3515 | rtw89_read_rf(rtwdev, rf_path: RF_PATH_A, RR_SYNFB, RFREG_MASK)); |
3516 | } |
3517 | } |
3518 | |
3519 | static void _set_ch(struct rtw89_dev *rtwdev, u32 val) |
3520 | { |
3521 | bool timeout; |
3522 | |
3523 | timeout = _set_s0_arfc18(rtwdev, val); |
3524 | if (!timeout) |
3525 | _lck_check(rtwdev); |
3526 | } |
3527 | |
3528 | static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, |
3529 | u8 central_ch, bool dav) |
3530 | { |
3531 | u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1; |
3532 | bool is_2g_ch = central_ch <= 14; |
3533 | u32 rf_reg18; |
3534 | |
3535 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RFK]===> %s\n" , __func__); |
3536 | |
3537 | rf_reg18 = rtw89_read_rf(rtwdev, rf_path: path, addr: reg18_addr, RFREG_MASK); |
3538 | rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | |
3539 | RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH); |
3540 | rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch); |
3541 | |
3542 | if (!is_2g_ch) |
3543 | rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) | |
3544 | FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G); |
3545 | |
3546 | rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN | |
3547 | RR_CFGCH_BW2) & RFREG_MASK; |
3548 | rf_reg18 |= RR_CFGCH_BW2; |
3549 | |
3550 | if (path == RF_PATH_A && dav) |
3551 | _set_ch(rtwdev, val: rf_reg18); |
3552 | else |
3553 | rtw89_write_rf(rtwdev, rf_path: path, addr: reg18_addr, RFREG_MASK, data: rf_reg18); |
3554 | |
3555 | rtw89_write_rf(rtwdev, rf_path: path, RR_LCKST, RR_LCKST_BIN, data: 0); |
3556 | rtw89_write_rf(rtwdev, rf_path: path, RR_LCKST, RR_LCKST_BIN, data: 1); |
3557 | |
3558 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, |
3559 | fmt: "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n" , |
3560 | central_ch, path, reg18_addr, |
3561 | rtw89_read_rf(rtwdev, rf_path: path, addr: reg18_addr, RFREG_MASK)); |
3562 | } |
3563 | |
3564 | static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch) |
3565 | { |
3566 | _ch_setting(rtwdev, path: RF_PATH_A, central_ch, dav: true); |
3567 | _ch_setting(rtwdev, path: RF_PATH_A, central_ch, dav: false); |
3568 | } |
3569 | |
3570 | static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw, |
3571 | enum rtw89_rf_path path) |
3572 | { |
3573 | rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWE2, RR_LUTWE2_RTXBW, data: 0x1); |
3574 | rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWA, RR_LUTWA_M2, data: 0x12); |
3575 | |
3576 | if (bw == RTW89_CHANNEL_WIDTH_20) |
3577 | rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWD0, RR_LUTWD0_LB, data: 0x1b); |
3578 | else if (bw == RTW89_CHANNEL_WIDTH_40) |
3579 | rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWD0, RR_LUTWD0_LB, data: 0x13); |
3580 | else if (bw == RTW89_CHANNEL_WIDTH_80) |
3581 | rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWD0, RR_LUTWD0_LB, data: 0xb); |
3582 | else |
3583 | rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWD0, RR_LUTWD0_LB, data: 0x3); |
3584 | |
3585 | rtw89_debug(rtwdev, mask: RTW89_DBG_RFK, fmt: "[RFK] set S%d RXBB BW 0x3F = 0x%x\n" , path, |
3586 | rtw89_read_rf(rtwdev, rf_path: path, RR_LUTWD0, RR_LUTWD0_LB)); |
3587 | |
3588 | rtw89_write_rf(rtwdev, rf_path: path, RR_LUTWE2, RR_LUTWE2_RTXBW, data: 0x0); |
3589 | } |
3590 | |
3591 | static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, |
3592 | enum rtw89_bandwidth bw) |
3593 | { |
3594 | u8 kpath, path; |
3595 | |
3596 | kpath = _kpath(rtwdev, phy_idx: phy); |
3597 | |
3598 | for (path = 0; path < RF_PATH_NUM_8851B; path++) { |
3599 | if (!(kpath & BIT(path))) |
3600 | continue; |
3601 | |
3602 | _set_rxbb_bw(rtwdev, bw, path); |
3603 | } |
3604 | } |
3605 | |
3606 | static void rtw8851b_ctrl_bw_ch(struct rtw89_dev *rtwdev, |
3607 | enum rtw89_phy_idx phy, u8 central_ch, |
3608 | enum rtw89_band band, enum rtw89_bandwidth bw) |
3609 | { |
3610 | _ctrl_ch(rtwdev, central_ch); |
3611 | _ctrl_bw(rtwdev, phy, bw); |
3612 | _rxbb_bw(rtwdev, phy, bw); |
3613 | } |
3614 | |
3615 | void rtw8851b_set_channel_rf(struct rtw89_dev *rtwdev, |
3616 | const struct rtw89_chan *chan, |
3617 | enum rtw89_phy_idx phy_idx) |
3618 | { |
3619 | rtw8851b_ctrl_bw_ch(rtwdev, phy: phy_idx, central_ch: chan->channel, band: chan->band_type, |
3620 | bw: chan->band_width); |
3621 | } |
3622 | |