1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright(c) 2019-2020 Realtek Corporation
3 */
4
5#include <linux/devcoredump.h>
6
7#include "cam.h"
8#include "chan.h"
9#include "debug.h"
10#include "fw.h"
11#include "mac.h"
12#include "ps.h"
13#include "reg.h"
14#include "ser.h"
15#include "util.h"
16
17#define SER_RECFG_TIMEOUT 1000
18
19enum ser_evt {
20 SER_EV_NONE,
21 SER_EV_STATE_IN,
22 SER_EV_STATE_OUT,
23 SER_EV_L1_RESET_PREPARE, /* pre-M0 */
24 SER_EV_L1_RESET, /* M1 */
25 SER_EV_DO_RECOVERY, /* M3 */
26 SER_EV_MAC_RESET_DONE, /* M5 */
27 SER_EV_L2_RESET,
28 SER_EV_L2_RECFG_DONE,
29 SER_EV_L2_RECFG_TIMEOUT,
30 SER_EV_M1_TIMEOUT,
31 SER_EV_M3_TIMEOUT,
32 SER_EV_FW_M5_TIMEOUT,
33 SER_EV_L0_RESET,
34 SER_EV_MAXX
35};
36
37enum ser_state {
38 SER_IDLE_ST,
39 SER_L1_RESET_PRE_ST,
40 SER_RESET_TRX_ST,
41 SER_DO_HCI_ST,
42 SER_L2_RESET_ST,
43 SER_ST_MAX_ST
44};
45
46struct ser_msg {
47 struct list_head list;
48 u8 event;
49};
50
51struct state_ent {
52 u8 state;
53 char *name;
54 void (*st_func)(struct rtw89_ser *ser, u8 event);
55};
56
57struct event_ent {
58 u8 event;
59 char *name;
60};
61
62static char *ser_ev_name(struct rtw89_ser *ser, u8 event)
63{
64 if (event < SER_EV_MAXX)
65 return ser->ev_tbl[event].name;
66
67 return "err_ev_name";
68}
69
70static char *ser_st_name(struct rtw89_ser *ser)
71{
72 if (ser->state < SER_ST_MAX_ST)
73 return ser->st_tbl[ser->state].name;
74
75 return "err_st_name";
76}
77
78#define RTW89_DEF_SER_CD_TYPE(_name, _type, _size) \
79struct ser_cd_ ## _name { \
80 u32 type; \
81 u32 type_size; \
82 u64 padding; \
83 u8 data[_size]; \
84} __packed; \
85static void ser_cd_ ## _name ## _init(struct ser_cd_ ## _name *p) \
86{ \
87 p->type = _type; \
88 p->type_size = sizeof(p->data); \
89 p->padding = 0x0123456789abcdef; \
90}
91
92enum rtw89_ser_cd_type {
93 RTW89_SER_CD_FW_RSVD_PLE = 0,
94 RTW89_SER_CD_FW_BACKTRACE = 1,
95};
96
97RTW89_DEF_SER_CD_TYPE(fw_rsvd_ple,
98 RTW89_SER_CD_FW_RSVD_PLE,
99 RTW89_FW_RSVD_PLE_SIZE);
100
101RTW89_DEF_SER_CD_TYPE(fw_backtrace,
102 RTW89_SER_CD_FW_BACKTRACE,
103 RTW89_FW_BACKTRACE_MAX_SIZE);
104
105struct rtw89_ser_cd_buffer {
106 struct ser_cd_fw_rsvd_ple fwple;
107 struct ser_cd_fw_backtrace fwbt;
108} __packed;
109
110static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev)
111{
112 struct rtw89_ser_cd_buffer *buf;
113
114 buf = vzalloc(size: sizeof(*buf));
115 if (!buf)
116 return NULL;
117
118 ser_cd_fw_rsvd_ple_init(p: &buf->fwple);
119 ser_cd_fw_backtrace_init(p: &buf->fwbt);
120
121 return buf;
122}
123
124static void rtw89_ser_cd_send(struct rtw89_dev *rtwdev,
125 struct rtw89_ser_cd_buffer *buf)
126{
127 rtw89_debug(rtwdev, mask: RTW89_DBG_SER, fmt: "SER sends core dump\n");
128
129 /* After calling dev_coredump, buf's lifetime is supposed to be
130 * handled by the device coredump framework. Note that a new dump
131 * will be discarded if a previous one hasn't been released by
132 * framework yet.
133 */
134 dev_coredumpv(dev: rtwdev->dev, data: buf, datalen: sizeof(*buf), GFP_KERNEL);
135}
136
137static void rtw89_ser_cd_free(struct rtw89_dev *rtwdev,
138 struct rtw89_ser_cd_buffer *buf, bool free_self)
139{
140 if (!free_self)
141 return;
142
143 rtw89_debug(rtwdev, mask: RTW89_DBG_SER, fmt: "SER frees core dump by self\n");
144
145 /* When some problems happen during filling data of core dump,
146 * we won't send it to device coredump framework. Instead, we
147 * free buf by ourselves.
148 */
149 vfree(addr: buf);
150}
151
152static void ser_state_run(struct rtw89_ser *ser, u8 evt)
153{
154 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
155
156 rtw89_debug(rtwdev, mask: RTW89_DBG_SER, fmt: "ser: %s receive %s\n",
157 ser_st_name(ser), ser_ev_name(ser, event: evt));
158
159 mutex_lock(&rtwdev->mutex);
160 rtw89_leave_lps(rtwdev);
161 mutex_unlock(lock: &rtwdev->mutex);
162
163 ser->st_tbl[ser->state].st_func(ser, evt);
164}
165
166static void ser_state_goto(struct rtw89_ser *ser, u8 new_state)
167{
168 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
169
170 if (ser->state == new_state || new_state >= SER_ST_MAX_ST)
171 return;
172 ser_state_run(ser, evt: SER_EV_STATE_OUT);
173
174 rtw89_debug(rtwdev, mask: RTW89_DBG_SER, fmt: "ser: %s goto -> %s\n",
175 ser_st_name(ser), ser->st_tbl[new_state].name);
176
177 ser->state = new_state;
178 ser_state_run(ser, evt: SER_EV_STATE_IN);
179}
180
181static struct ser_msg *__rtw89_ser_dequeue_msg(struct rtw89_ser *ser)
182{
183 struct ser_msg *msg;
184
185 spin_lock_irq(lock: &ser->msg_q_lock);
186 msg = list_first_entry_or_null(&ser->msg_q, struct ser_msg, list);
187 if (msg)
188 list_del(entry: &msg->list);
189 spin_unlock_irq(lock: &ser->msg_q_lock);
190
191 return msg;
192}
193
194static void rtw89_ser_hdl_work(struct work_struct *work)
195{
196 struct ser_msg *msg;
197 struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
198 ser_hdl_work);
199
200 while ((msg = __rtw89_ser_dequeue_msg(ser))) {
201 ser_state_run(ser, evt: msg->event);
202 kfree(objp: msg);
203 }
204}
205
206static int ser_send_msg(struct rtw89_ser *ser, u8 event)
207{
208 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
209 struct ser_msg *msg = NULL;
210
211 if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
212 return -EIO;
213
214 msg = kmalloc(size: sizeof(*msg), GFP_ATOMIC);
215 if (!msg)
216 return -ENOMEM;
217
218 msg->event = event;
219
220 spin_lock_irq(lock: &ser->msg_q_lock);
221 list_add(new: &msg->list, head: &ser->msg_q);
222 spin_unlock_irq(lock: &ser->msg_q_lock);
223
224 ieee80211_queue_work(hw: rtwdev->hw, work: &ser->ser_hdl_work);
225 return 0;
226}
227
228static void rtw89_ser_alarm_work(struct work_struct *work)
229{
230 struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
231 ser_alarm_work.work);
232
233 ser_send_msg(ser, event: ser->alarm_event);
234 ser->alarm_event = SER_EV_NONE;
235}
236
237static void ser_set_alarm(struct rtw89_ser *ser, u32 ms, u8 event)
238{
239 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
240
241 if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
242 return;
243
244 ser->alarm_event = event;
245 ieee80211_queue_delayed_work(hw: rtwdev->hw, dwork: &ser->ser_alarm_work,
246 delay: msecs_to_jiffies(m: ms));
247}
248
249static void ser_del_alarm(struct rtw89_ser *ser)
250{
251 cancel_delayed_work(dwork: &ser->ser_alarm_work);
252 ser->alarm_event = SER_EV_NONE;
253}
254
255/* driver function */
256static void drv_stop_tx(struct rtw89_ser *ser)
257{
258 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
259
260 ieee80211_stop_queues(hw: rtwdev->hw);
261 set_bit(nr: RTW89_SER_DRV_STOP_TX, addr: ser->flags);
262}
263
264static void drv_stop_rx(struct rtw89_ser *ser)
265{
266 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
267
268 clear_bit(nr: RTW89_FLAG_RUNNING, addr: rtwdev->flags);
269 set_bit(nr: RTW89_SER_DRV_STOP_RX, addr: ser->flags);
270}
271
272static void drv_trx_reset(struct rtw89_ser *ser)
273{
274 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
275
276 rtw89_hci_reset(rtwdev);
277}
278
279static void drv_resume_tx(struct rtw89_ser *ser)
280{
281 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
282
283 if (!test_bit(RTW89_SER_DRV_STOP_TX, ser->flags))
284 return;
285
286 ieee80211_wake_queues(hw: rtwdev->hw);
287 clear_bit(nr: RTW89_SER_DRV_STOP_TX, addr: ser->flags);
288}
289
290static void drv_resume_rx(struct rtw89_ser *ser)
291{
292 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
293
294 if (!test_bit(RTW89_SER_DRV_STOP_RX, ser->flags))
295 return;
296
297 set_bit(nr: RTW89_FLAG_RUNNING, addr: rtwdev->flags);
298 clear_bit(nr: RTW89_SER_DRV_STOP_RX, addr: ser->flags);
299}
300
301static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
302{
303 rtw89_core_release_bit_map(addr: rtwdev->hw_port, bit: rtwvif->port);
304 rtwvif->net_type = RTW89_NET_TYPE_NO_LINK;
305 rtwvif->trigger = false;
306 rtwvif->tdls_peer = 0;
307}
308
309static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta)
310{
311 struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
312 struct rtw89_dev *rtwdev = rtwvif->rtwdev;
313 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
314
315 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
316 rtw89_cam_deinit_addr_cam(rtwdev, addr_cam: &rtwsta->addr_cam);
317 if (sta->tdls)
318 rtw89_cam_deinit_bssid_cam(rtwdev, bssid_cam: &rtwsta->bssid_cam);
319
320 INIT_LIST_HEAD(list: &rtwsta->ba_cam_list);
321}
322
323static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
324{
325 ieee80211_iterate_stations_atomic(hw: rtwdev->hw,
326 iterator: ser_sta_deinit_cam_iter,
327 data: rtwvif);
328
329 rtw89_cam_deinit(rtwdev, vif: rtwvif);
330
331 bitmap_zero(dst: rtwdev->cam_info.ba_cam_map, RTW89_MAX_BA_CAM_NUM);
332}
333
334static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
335{
336 struct rtw89_vif *rtwvif;
337
338 rtw89_cam_reset_keys(rtwdev);
339 rtw89_for_each_rtwvif(rtwdev, rtwvif)
340 ser_deinit_cam(rtwdev, rtwvif);
341
342 rtw89_core_release_all_bits_map(addr: rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM);
343 rtw89_for_each_rtwvif(rtwdev, rtwvif)
344 ser_reset_vif(rtwdev, rtwvif);
345
346 rtwdev->total_sta_assoc = 0;
347}
348
349/* hal function */
350static int hal_enable_dma(struct rtw89_ser *ser)
351{
352 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
353 int ret;
354
355 if (!test_bit(RTW89_SER_HAL_STOP_DMA, ser->flags))
356 return 0;
357
358 if (!rtwdev->hci.ops->mac_lv1_rcvy)
359 return -EIO;
360
361 ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2);
362 if (!ret)
363 clear_bit(nr: RTW89_SER_HAL_STOP_DMA, addr: ser->flags);
364 else
365 rtw89_debug(rtwdev, mask: RTW89_DBG_SER,
366 fmt: "lv1 rcvy fail to start dma: %d\n", ret);
367
368 return ret;
369}
370
371static int hal_stop_dma(struct rtw89_ser *ser)
372{
373 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
374 int ret;
375
376 if (!rtwdev->hci.ops->mac_lv1_rcvy)
377 return -EIO;
378
379 ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1);
380 if (!ret)
381 set_bit(nr: RTW89_SER_HAL_STOP_DMA, addr: ser->flags);
382 else
383 rtw89_debug(rtwdev, mask: RTW89_DBG_SER,
384 fmt: "lv1 rcvy fail to stop dma: %d\n", ret);
385
386 return ret;
387}
388
389static void hal_send_post_m0_event(struct rtw89_ser *ser)
390{
391 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
392
393 rtw89_mac_set_err_status(rtwdev, err: MAC_AX_ERR_L1_RESET_START_DMAC);
394}
395
396static void hal_send_m2_event(struct rtw89_ser *ser)
397{
398 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
399
400 rtw89_mac_set_err_status(rtwdev, err: MAC_AX_ERR_L1_DISABLE_EN);
401}
402
403static void hal_send_m4_event(struct rtw89_ser *ser)
404{
405 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
406
407 rtw89_mac_set_err_status(rtwdev, err: MAC_AX_ERR_L1_RCVY_EN);
408}
409
410/* state handler */
411static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
412{
413 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
414
415 switch (evt) {
416 case SER_EV_STATE_IN:
417 rtw89_hci_recovery_complete(rtwdev);
418 clear_bit(nr: RTW89_FLAG_SER_HANDLING, addr: rtwdev->flags);
419 clear_bit(nr: RTW89_FLAG_CRASH_SIMULATING, addr: rtwdev->flags);
420 break;
421 case SER_EV_L1_RESET_PREPARE:
422 ser_state_goto(ser, new_state: SER_L1_RESET_PRE_ST);
423 break;
424 case SER_EV_L1_RESET:
425 ser_state_goto(ser, new_state: SER_RESET_TRX_ST);
426 break;
427 case SER_EV_L2_RESET:
428 ser_state_goto(ser, new_state: SER_L2_RESET_ST);
429 break;
430 case SER_EV_STATE_OUT:
431 set_bit(nr: RTW89_FLAG_SER_HANDLING, addr: rtwdev->flags);
432 rtw89_hci_recovery_start(rtwdev);
433 break;
434 default:
435 break;
436 }
437}
438
439static void ser_l1_reset_pre_st_hdl(struct rtw89_ser *ser, u8 evt)
440{
441 switch (evt) {
442 case SER_EV_STATE_IN:
443 ser->prehandle_l1 = true;
444 hal_send_post_m0_event(ser);
445 ser_set_alarm(ser, ms: 1000, event: SER_EV_M1_TIMEOUT);
446 break;
447 case SER_EV_L1_RESET:
448 ser_state_goto(ser, new_state: SER_RESET_TRX_ST);
449 break;
450 case SER_EV_M1_TIMEOUT:
451 ser_state_goto(ser, new_state: SER_L2_RESET_ST);
452 break;
453 case SER_EV_STATE_OUT:
454 ser_del_alarm(ser);
455 break;
456 default:
457 break;
458 }
459}
460
461static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
462{
463 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
464
465 switch (evt) {
466 case SER_EV_STATE_IN:
467 cancel_delayed_work_sync(dwork: &rtwdev->track_work);
468 drv_stop_tx(ser);
469
470 if (hal_stop_dma(ser)) {
471 ser_state_goto(ser, new_state: SER_L2_RESET_ST);
472 break;
473 }
474
475 drv_stop_rx(ser);
476 drv_trx_reset(ser);
477
478 /* wait m3 */
479 hal_send_m2_event(ser);
480
481 /* set alarm to prevent FW response timeout */
482 ser_set_alarm(ser, ms: 1000, event: SER_EV_M3_TIMEOUT);
483 break;
484
485 case SER_EV_DO_RECOVERY:
486 ser_state_goto(ser, new_state: SER_DO_HCI_ST);
487 break;
488
489 case SER_EV_M3_TIMEOUT:
490 ser_state_goto(ser, new_state: SER_L2_RESET_ST);
491 break;
492
493 case SER_EV_STATE_OUT:
494 ser_del_alarm(ser);
495 hal_enable_dma(ser);
496 drv_resume_rx(ser);
497 drv_resume_tx(ser);
498 ieee80211_queue_delayed_work(hw: rtwdev->hw, dwork: &rtwdev->track_work,
499 RTW89_TRACK_WORK_PERIOD);
500 break;
501
502 default:
503 break;
504 }
505}
506
507static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt)
508{
509 switch (evt) {
510 case SER_EV_STATE_IN:
511 /* wait m5 */
512 hal_send_m4_event(ser);
513
514 /* prevent FW response timeout */
515 ser_set_alarm(ser, ms: 1000, event: SER_EV_FW_M5_TIMEOUT);
516 break;
517
518 case SER_EV_FW_M5_TIMEOUT:
519 ser_state_goto(ser, new_state: SER_L2_RESET_ST);
520 break;
521
522 case SER_EV_MAC_RESET_DONE:
523 ser_state_goto(ser, new_state: SER_IDLE_ST);
524 break;
525
526 case SER_EV_STATE_OUT:
527 ser_del_alarm(ser);
528 break;
529
530 default:
531 break;
532 }
533}
534
535static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf,
536 u8 sel, u32 start_addr, u32 len)
537{
538 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
539 u32 filter_model_addr = mac->filter_model_addr;
540 u32 indir_access_addr = mac->indir_access_addr;
541 u32 *ptr = (u32 *)buf;
542 u32 base_addr, start_page, residue;
543 u32 cnt = 0;
544 u32 i;
545
546 start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE;
547 residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE;
548 base_addr = mac->mem_base_addrs[sel];
549 base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
550
551 while (cnt < len) {
552 rtw89_write32(rtwdev, addr: filter_model_addr, data: base_addr);
553
554 for (i = indir_access_addr + residue;
555 i < indir_access_addr + MAC_MEM_DUMP_PAGE_SIZE;
556 i += 4, ptr++) {
557 *ptr = rtw89_read32(rtwdev, addr: i);
558 cnt += 4;
559 if (cnt >= len)
560 break;
561 }
562
563 residue = 0;
564 base_addr += MAC_MEM_DUMP_PAGE_SIZE;
565 }
566}
567
568static void rtw89_ser_fw_rsvd_ple_dump(struct rtw89_dev *rtwdev, u8 *buf)
569{
570 u32 start_addr = rtwdev->chip->rsvd_ple_ofst;
571
572 rtw89_debug(rtwdev, mask: RTW89_DBG_SER,
573 fmt: "dump mem for fw rsvd payload engine (start addr: 0x%x)\n",
574 start_addr);
575 ser_mac_mem_dump(rtwdev, buf, sel: RTW89_MAC_MEM_SHARED_BUF, start_addr,
576 RTW89_FW_RSVD_PLE_SIZE);
577}
578
579struct __fw_backtrace_entry {
580 u32 wcpu_addr;
581 u32 size;
582 u32 key;
583} __packed;
584
585struct __fw_backtrace_info {
586 u32 ra;
587 u32 sp;
588} __packed;
589
590static_assert(RTW89_FW_BACKTRACE_INFO_SIZE ==
591 sizeof(struct __fw_backtrace_info));
592
593static u32 convert_addr_from_wcpu(u32 wcpu_addr)
594{
595 if (wcpu_addr < 0x30000000)
596 return wcpu_addr;
597
598 return wcpu_addr & GENMASK(28, 0);
599}
600
601static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf,
602 const struct __fw_backtrace_entry *ent)
603{
604 struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf;
605 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
606 u32 filter_model_addr = mac->filter_model_addr;
607 u32 indir_access_addr = mac->indir_access_addr;
608 u32 fwbt_addr = convert_addr_from_wcpu(wcpu_addr: ent->wcpu_addr);
609 u32 fwbt_size = ent->size;
610 u32 fwbt_key = ent->key;
611 u32 i;
612
613 if (fwbt_addr == 0) {
614 rtw89_warn(rtwdev, "FW backtrace invalid address: 0x%x\n",
615 fwbt_addr);
616 return -EINVAL;
617 }
618
619 if (fwbt_key != RTW89_FW_BACKTRACE_KEY) {
620 rtw89_warn(rtwdev, "FW backtrace invalid key: 0x%x\n",
621 fwbt_key);
622 return -EINVAL;
623 }
624
625 if (fwbt_size == 0 || !RTW89_VALID_FW_BACKTRACE_SIZE(fwbt_size) ||
626 fwbt_size > RTW89_FW_BACKTRACE_MAX_SIZE) {
627 rtw89_warn(rtwdev, "FW backtrace invalid size: 0x%x\n",
628 fwbt_size);
629 return -EINVAL;
630 }
631
632 rtw89_debug(rtwdev, mask: RTW89_DBG_SER, fmt: "dump fw backtrace start\n");
633 rtw89_write32(rtwdev, addr: filter_model_addr, data: fwbt_addr);
634
635 for (i = indir_access_addr;
636 i < indir_access_addr + fwbt_size;
637 i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) {
638 *ptr = (struct __fw_backtrace_info){
639 .ra = rtw89_read32(rtwdev, addr: i),
640 .sp = rtw89_read32(rtwdev, addr: i + 4),
641 };
642 rtw89_debug(rtwdev, mask: RTW89_DBG_SER,
643 fmt: "next sp: 0x%x, next ra: 0x%x\n",
644 ptr->sp, ptr->ra);
645 }
646
647 rtw89_debug(rtwdev, mask: RTW89_DBG_SER, fmt: "dump fw backtrace end\n");
648 return 0;
649}
650
651static void ser_l2_reset_st_pre_hdl(struct rtw89_ser *ser)
652{
653 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
654 struct rtw89_ser_cd_buffer *buf;
655 struct __fw_backtrace_entry fwbt_ent;
656 int ret = 0;
657
658 buf = rtw89_ser_cd_prep(rtwdev);
659 if (!buf) {
660 ret = -ENOMEM;
661 goto bottom;
662 }
663
664 rtw89_ser_fw_rsvd_ple_dump(rtwdev, buf: buf->fwple.data);
665
666 fwbt_ent = *(struct __fw_backtrace_entry *)buf->fwple.data;
667 ret = rtw89_ser_fw_backtrace_dump(rtwdev, buf: buf->fwbt.data, ent: &fwbt_ent);
668 if (ret)
669 goto bottom;
670
671 rtw89_ser_cd_send(rtwdev, buf);
672
673bottom:
674 rtw89_ser_cd_free(rtwdev, buf, free_self: !!ret);
675
676 ser_reset_mac_binding(rtwdev);
677 rtw89_core_stop(rtwdev);
678 rtw89_entity_init(rtwdev);
679 rtw89_fw_release_general_pkt_list(rtwdev, notify_fw: false);
680 INIT_LIST_HEAD(list: &rtwdev->rtwvifs_list);
681}
682
683static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
684{
685 struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
686
687 switch (evt) {
688 case SER_EV_STATE_IN:
689 mutex_lock(&rtwdev->mutex);
690 ser_l2_reset_st_pre_hdl(ser);
691 mutex_unlock(lock: &rtwdev->mutex);
692
693 ieee80211_restart_hw(hw: rtwdev->hw);
694 ser_set_alarm(ser, SER_RECFG_TIMEOUT, event: SER_EV_L2_RECFG_TIMEOUT);
695 break;
696
697 case SER_EV_L2_RECFG_TIMEOUT:
698 rtw89_info(rtwdev, "Err: ser L2 re-config timeout\n");
699 fallthrough;
700 case SER_EV_L2_RECFG_DONE:
701 ser_state_goto(ser, new_state: SER_IDLE_ST);
702 break;
703
704 case SER_EV_STATE_OUT:
705 ser_del_alarm(ser);
706 break;
707
708 default:
709 break;
710 }
711}
712
713static const struct event_ent ser_ev_tbl[] = {
714 {SER_EV_NONE, "SER_EV_NONE"},
715 {SER_EV_STATE_IN, "SER_EV_STATE_IN"},
716 {SER_EV_STATE_OUT, "SER_EV_STATE_OUT"},
717 {SER_EV_L1_RESET_PREPARE, "SER_EV_L1_RESET_PREPARE pre-m0"},
718 {SER_EV_L1_RESET, "SER_EV_L1_RESET m1"},
719 {SER_EV_DO_RECOVERY, "SER_EV_DO_RECOVERY m3"},
720 {SER_EV_MAC_RESET_DONE, "SER_EV_MAC_RESET_DONE m5"},
721 {SER_EV_L2_RESET, "SER_EV_L2_RESET"},
722 {SER_EV_L2_RECFG_DONE, "SER_EV_L2_RECFG_DONE"},
723 {SER_EV_L2_RECFG_TIMEOUT, "SER_EV_L2_RECFG_TIMEOUT"},
724 {SER_EV_M1_TIMEOUT, "SER_EV_M1_TIMEOUT"},
725 {SER_EV_M3_TIMEOUT, "SER_EV_M3_TIMEOUT"},
726 {SER_EV_FW_M5_TIMEOUT, "SER_EV_FW_M5_TIMEOUT"},
727 {SER_EV_L0_RESET, "SER_EV_L0_RESET"},
728 {SER_EV_MAXX, "SER_EV_MAX"}
729};
730
731static const struct state_ent ser_st_tbl[] = {
732 {SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl},
733 {SER_L1_RESET_PRE_ST, "SER_L1_RESET_PRE_ST", ser_l1_reset_pre_st_hdl},
734 {SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl},
735 {SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl},
736 {SER_L2_RESET_ST, "SER_L2_RESET_ST", ser_l2_reset_st_hdl}
737};
738
739int rtw89_ser_init(struct rtw89_dev *rtwdev)
740{
741 struct rtw89_ser *ser = &rtwdev->ser;
742
743 memset(ser, 0, sizeof(*ser));
744 INIT_LIST_HEAD(list: &ser->msg_q);
745 ser->state = SER_IDLE_ST;
746 ser->st_tbl = ser_st_tbl;
747 ser->ev_tbl = ser_ev_tbl;
748
749 bitmap_zero(dst: ser->flags, nbits: RTW89_NUM_OF_SER_FLAGS);
750 spin_lock_init(&ser->msg_q_lock);
751 INIT_WORK(&ser->ser_hdl_work, rtw89_ser_hdl_work);
752 INIT_DELAYED_WORK(&ser->ser_alarm_work, rtw89_ser_alarm_work);
753 return 0;
754}
755
756int rtw89_ser_deinit(struct rtw89_dev *rtwdev)
757{
758 struct rtw89_ser *ser = (struct rtw89_ser *)&rtwdev->ser;
759
760 set_bit(nr: RTW89_SER_DRV_STOP_RUN, addr: ser->flags);
761 cancel_delayed_work_sync(dwork: &ser->ser_alarm_work);
762 cancel_work_sync(work: &ser->ser_hdl_work);
763 clear_bit(nr: RTW89_SER_DRV_STOP_RUN, addr: ser->flags);
764 return 0;
765}
766
767void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev)
768{
769 ser_send_msg(ser: &rtwdev->ser, event: SER_EV_L2_RECFG_DONE);
770}
771
772int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err)
773{
774 u8 event = SER_EV_NONE;
775
776 rtw89_info(rtwdev, "SER catches error: 0x%x\n", err);
777
778 switch (err) {
779 case MAC_AX_ERR_L1_PREERR_DMAC: /* pre-M0 */
780 event = SER_EV_L1_RESET_PREPARE;
781 break;
782 case MAC_AX_ERR_L1_ERR_DMAC:
783 case MAC_AX_ERR_L0_PROMOTE_TO_L1:
784 event = SER_EV_L1_RESET; /* M1 */
785 break;
786 case MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE:
787 event = SER_EV_DO_RECOVERY; /* M3 */
788 break;
789 case MAC_AX_ERR_L1_RESET_RECOVERY_DONE:
790 event = SER_EV_MAC_RESET_DONE; /* M5 */
791 break;
792 case MAC_AX_ERR_L0_ERR_CMAC0:
793 case MAC_AX_ERR_L0_ERR_CMAC1:
794 case MAC_AX_ERR_L0_RESET_DONE:
795 event = SER_EV_L0_RESET;
796 break;
797 default:
798 if (err == MAC_AX_ERR_L1_PROMOTE_TO_L2 ||
799 (err >= MAC_AX_ERR_L2_ERR_AH_DMA &&
800 err <= MAC_AX_GET_ERR_MAX))
801 event = SER_EV_L2_RESET;
802 break;
803 }
804
805 if (event == SER_EV_NONE) {
806 rtw89_warn(rtwdev, "SER cannot recognize error: 0x%x\n", err);
807 return -EINVAL;
808 }
809
810 ser_send_msg(ser: &rtwdev->ser, event);
811 return 0;
812}
813EXPORT_SYMBOL(rtw89_ser_notify);
814

source code of linux/drivers/net/wireless/realtek/rtw89/ser.c