1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* Microchip Sparx5 Switch driver |
3 | * |
4 | * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. |
5 | * |
6 | * The Sparx5 Chip Register Model can be browsed at this location: |
7 | * https://github.com/microchip-ung/sparx-5_reginfo |
8 | */ |
9 | |
10 | #include <linux/types.h> |
11 | #include <linux/skbuff.h> |
12 | #include <linux/netdevice.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/ip.h> |
15 | #include <linux/dma-mapping.h> |
16 | |
17 | #include "sparx5_main_regs.h" |
18 | #include "sparx5_main.h" |
19 | #include "sparx5_port.h" |
20 | |
21 | #define FDMA_XTR_CHANNEL 6 |
22 | #define FDMA_INJ_CHANNEL 0 |
23 | |
24 | #define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0)) |
25 | #define FDMA_DCB_INFO_TOKEN BIT(17) |
26 | #define FDMA_DCB_INFO_INTR BIT(18) |
27 | #define FDMA_DCB_INFO_SW(x) (((x) << 24) & GENMASK(31, 24)) |
28 | |
29 | #define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0)) |
30 | #define FDMA_DCB_STATUS_SOF BIT(16) |
31 | #define FDMA_DCB_STATUS_EOF BIT(17) |
32 | #define FDMA_DCB_STATUS_INTR BIT(18) |
33 | #define FDMA_DCB_STATUS_DONE BIT(19) |
34 | #define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) |
35 | #define FDMA_DCB_INVALID_DATA 0x1 |
36 | |
37 | #define FDMA_XTR_BUFFER_SIZE 2048 |
38 | #define FDMA_WEIGHT 4 |
39 | |
40 | /* Frame DMA DCB format |
41 | * |
42 | * +---------------------------+ |
43 | * | Next Ptr | |
44 | * +---------------------------+ |
45 | * | Reserved | Info | |
46 | * +---------------------------+ |
47 | * | Data0 Ptr | |
48 | * +---------------------------+ |
49 | * | Reserved | Status0 | |
50 | * +---------------------------+ |
51 | * | Data1 Ptr | |
52 | * +---------------------------+ |
53 | * | Reserved | Status1 | |
54 | * +---------------------------+ |
55 | * | Data2 Ptr | |
56 | * +---------------------------+ |
57 | * | Reserved | Status2 | |
58 | * |-------------|-------------| |
59 | * | | |
60 | * | | |
61 | * | | |
62 | * | | |
63 | * | | |
64 | * |---------------------------| |
65 | * | Data14 Ptr | |
66 | * +-------------|-------------+ |
67 | * | Reserved | Status14 | |
68 | * +-------------|-------------+ |
69 | */ |
70 | |
71 | /* For each hardware DB there is an entry in this list and when the HW DB |
72 | * entry is used, this SW DB entry is moved to the back of the list |
73 | */ |
74 | struct sparx5_db { |
75 | struct list_head list; |
76 | void *cpu_addr; |
77 | }; |
78 | |
79 | static void sparx5_fdma_rx_add_dcb(struct sparx5_rx *rx, |
80 | struct sparx5_rx_dcb_hw *dcb, |
81 | u64 nextptr) |
82 | { |
83 | int idx = 0; |
84 | |
85 | /* Reset the status of the DB */ |
86 | for (idx = 0; idx < FDMA_RX_DCB_MAX_DBS; ++idx) { |
87 | struct sparx5_db_hw *db = &dcb->db[idx]; |
88 | |
89 | db->status = FDMA_DCB_STATUS_INTR; |
90 | } |
91 | dcb->nextptr = FDMA_DCB_INVALID_DATA; |
92 | dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE); |
93 | rx->last_entry->nextptr = nextptr; |
94 | rx->last_entry = dcb; |
95 | } |
96 | |
97 | static void sparx5_fdma_tx_add_dcb(struct sparx5_tx *tx, |
98 | struct sparx5_tx_dcb_hw *dcb, |
99 | u64 nextptr) |
100 | { |
101 | int idx = 0; |
102 | |
103 | /* Reset the status of the DB */ |
104 | for (idx = 0; idx < FDMA_TX_DCB_MAX_DBS; ++idx) { |
105 | struct sparx5_db_hw *db = &dcb->db[idx]; |
106 | |
107 | db->status = FDMA_DCB_STATUS_DONE; |
108 | } |
109 | dcb->nextptr = FDMA_DCB_INVALID_DATA; |
110 | dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE); |
111 | } |
112 | |
113 | static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx) |
114 | { |
115 | /* Write the buffer address in the LLP and LLP1 regs */ |
116 | spx5_wr(val: ((u64)rx->dma) & GENMASK(31, 0), sparx5, |
117 | FDMA_DCB_LLP(rx->channel_id)); |
118 | spx5_wr(val: ((u64)rx->dma) >> 32, sparx5, FDMA_DCB_LLP1(rx->channel_id)); |
119 | |
120 | /* Set the number of RX DBs to be used, and DB end-of-frame interrupt */ |
121 | spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) | |
122 | FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | |
123 | FDMA_CH_CFG_CH_INJ_PORT_SET(XTR_QUEUE), |
124 | sparx5, FDMA_CH_CFG(rx->channel_id)); |
125 | |
126 | /* Set the RX Watermark to max */ |
127 | spx5_rmw(FDMA_XTR_CFG_XTR_FIFO_WM_SET(31), FDMA_XTR_CFG_XTR_FIFO_WM, |
128 | sparx5, |
129 | FDMA_XTR_CFG); |
130 | |
131 | /* Start RX fdma */ |
132 | spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), FDMA_PORT_CTRL_XTR_STOP, |
133 | sparx5, FDMA_PORT_CTRL(0)); |
134 | |
135 | /* Enable RX channel DB interrupt */ |
136 | spx5_rmw(BIT(rx->channel_id), |
137 | BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, |
138 | sparx5, FDMA_INTR_DB_ENA); |
139 | |
140 | /* Activate the RX channel */ |
141 | spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_ACTIVATE); |
142 | } |
143 | |
144 | static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx) |
145 | { |
146 | /* Dectivate the RX channel */ |
147 | spx5_rmw(val: 0, BIT(rx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, |
148 | sparx5, FDMA_CH_ACTIVATE); |
149 | |
150 | /* Disable RX channel DB interrupt */ |
151 | spx5_rmw(val: 0, BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, |
152 | sparx5, FDMA_INTR_DB_ENA); |
153 | |
154 | /* Stop RX fdma */ |
155 | spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(1), FDMA_PORT_CTRL_XTR_STOP, |
156 | sparx5, FDMA_PORT_CTRL(0)); |
157 | } |
158 | |
159 | static void sparx5_fdma_tx_activate(struct sparx5 *sparx5, struct sparx5_tx *tx) |
160 | { |
161 | /* Write the buffer address in the LLP and LLP1 regs */ |
162 | spx5_wr(val: ((u64)tx->dma) & GENMASK(31, 0), sparx5, |
163 | FDMA_DCB_LLP(tx->channel_id)); |
164 | spx5_wr(val: ((u64)tx->dma) >> 32, sparx5, FDMA_DCB_LLP1(tx->channel_id)); |
165 | |
166 | /* Set the number of TX DBs to be used, and DB end-of-frame interrupt */ |
167 | spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) | |
168 | FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | |
169 | FDMA_CH_CFG_CH_INJ_PORT_SET(INJ_QUEUE), |
170 | sparx5, FDMA_CH_CFG(tx->channel_id)); |
171 | |
172 | /* Start TX fdma */ |
173 | spx5_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), FDMA_PORT_CTRL_INJ_STOP, |
174 | sparx5, FDMA_PORT_CTRL(0)); |
175 | |
176 | /* Activate the channel */ |
177 | spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_ACTIVATE); |
178 | } |
179 | |
180 | static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *tx) |
181 | { |
182 | /* Disable the channel */ |
183 | spx5_rmw(val: 0, BIT(tx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, |
184 | sparx5, FDMA_CH_ACTIVATE); |
185 | } |
186 | |
187 | static void sparx5_fdma_rx_reload(struct sparx5 *sparx5, struct sparx5_rx *rx) |
188 | { |
189 | /* Reload the RX channel */ |
190 | spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_RELOAD); |
191 | } |
192 | |
193 | static void sparx5_fdma_tx_reload(struct sparx5 *sparx5, struct sparx5_tx *tx) |
194 | { |
195 | /* Reload the TX channel */ |
196 | spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_RELOAD); |
197 | } |
198 | |
199 | static struct sk_buff *sparx5_fdma_rx_alloc_skb(struct sparx5_rx *rx) |
200 | { |
201 | return __netdev_alloc_skb(dev: rx->ndev, FDMA_XTR_BUFFER_SIZE, |
202 | GFP_ATOMIC); |
203 | } |
204 | |
205 | static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx) |
206 | { |
207 | struct sparx5_db_hw *db_hw; |
208 | unsigned int packet_size; |
209 | struct sparx5_port *port; |
210 | struct sk_buff *new_skb; |
211 | struct frame_info fi; |
212 | struct sk_buff *skb; |
213 | dma_addr_t dma_addr; |
214 | |
215 | /* Check if the DCB is done */ |
216 | db_hw = &rx->dcb_entries[rx->dcb_index].db[rx->db_index]; |
217 | if (unlikely(!(db_hw->status & FDMA_DCB_STATUS_DONE))) |
218 | return false; |
219 | skb = rx->skb[rx->dcb_index][rx->db_index]; |
220 | /* Replace the DB entry with a new SKB */ |
221 | new_skb = sparx5_fdma_rx_alloc_skb(rx); |
222 | if (unlikely(!new_skb)) |
223 | return false; |
224 | /* Map the new skb data and set the new skb */ |
225 | dma_addr = virt_to_phys(address: new_skb->data); |
226 | rx->skb[rx->dcb_index][rx->db_index] = new_skb; |
227 | db_hw->dataptr = dma_addr; |
228 | packet_size = FDMA_DCB_STATUS_BLOCKL(db_hw->status); |
229 | skb_put(skb, len: packet_size); |
230 | /* Now do the normal processing of the skb */ |
231 | sparx5_ifh_parse(ifh: (u32 *)skb->data, info: &fi); |
232 | /* Map to port netdev */ |
233 | port = fi.src_port < SPX5_PORTS ? sparx5->ports[fi.src_port] : NULL; |
234 | if (!port || !port->ndev) { |
235 | dev_err(sparx5->dev, "Data on inactive port %d\n" , fi.src_port); |
236 | sparx5_xtr_flush(sparx5, XTR_QUEUE); |
237 | return false; |
238 | } |
239 | skb->dev = port->ndev; |
240 | skb_pull(skb, IFH_LEN * sizeof(u32)); |
241 | if (likely(!(skb->dev->features & NETIF_F_RXFCS))) |
242 | skb_trim(skb, len: skb->len - ETH_FCS_LEN); |
243 | |
244 | sparx5_ptp_rxtstamp(sparx5, skb, timestamp: fi.timestamp); |
245 | skb->protocol = eth_type_trans(skb, dev: skb->dev); |
246 | /* Everything we see on an interface that is in the HW bridge |
247 | * has already been forwarded |
248 | */ |
249 | if (test_bit(port->portno, sparx5->bridge_mask)) |
250 | skb->offload_fwd_mark = 1; |
251 | skb->dev->stats.rx_bytes += skb->len; |
252 | skb->dev->stats.rx_packets++; |
253 | rx->packets++; |
254 | netif_receive_skb(skb); |
255 | return true; |
256 | } |
257 | |
258 | static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight) |
259 | { |
260 | struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi); |
261 | struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx); |
262 | int counter = 0; |
263 | |
264 | while (counter < weight && sparx5_fdma_rx_get_frame(sparx5, rx)) { |
265 | struct sparx5_rx_dcb_hw *old_dcb; |
266 | |
267 | rx->db_index++; |
268 | counter++; |
269 | /* Check if the DCB can be reused */ |
270 | if (rx->db_index != FDMA_RX_DCB_MAX_DBS) |
271 | continue; |
272 | /* As the DCB can be reused, just advance the dcb_index |
273 | * pointer and set the nextptr in the DCB |
274 | */ |
275 | rx->db_index = 0; |
276 | old_dcb = &rx->dcb_entries[rx->dcb_index]; |
277 | rx->dcb_index++; |
278 | rx->dcb_index &= FDMA_DCB_MAX - 1; |
279 | sparx5_fdma_rx_add_dcb(rx, dcb: old_dcb, |
280 | nextptr: rx->dma + |
281 | ((unsigned long)old_dcb - |
282 | (unsigned long)rx->dcb_entries)); |
283 | } |
284 | if (counter < weight) { |
285 | napi_complete_done(n: &rx->napi, work_done: counter); |
286 | spx5_rmw(BIT(rx->channel_id), |
287 | BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, |
288 | sparx5, FDMA_INTR_DB_ENA); |
289 | } |
290 | if (counter) |
291 | sparx5_fdma_rx_reload(sparx5, rx); |
292 | return counter; |
293 | } |
294 | |
295 | static struct sparx5_tx_dcb_hw *sparx5_fdma_next_dcb(struct sparx5_tx *tx, |
296 | struct sparx5_tx_dcb_hw *dcb) |
297 | { |
298 | struct sparx5_tx_dcb_hw *next_dcb; |
299 | |
300 | next_dcb = dcb; |
301 | next_dcb++; |
302 | /* Handle wrap-around */ |
303 | if ((unsigned long)next_dcb >= |
304 | ((unsigned long)tx->first_entry + FDMA_DCB_MAX * sizeof(*dcb))) |
305 | next_dcb = tx->first_entry; |
306 | return next_dcb; |
307 | } |
308 | |
309 | int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb) |
310 | { |
311 | struct sparx5_tx_dcb_hw *next_dcb_hw; |
312 | struct sparx5_tx *tx = &sparx5->tx; |
313 | static bool first_time = true; |
314 | struct sparx5_db_hw *db_hw; |
315 | struct sparx5_db *db; |
316 | |
317 | next_dcb_hw = sparx5_fdma_next_dcb(tx, dcb: tx->curr_entry); |
318 | db_hw = &next_dcb_hw->db[0]; |
319 | if (!(db_hw->status & FDMA_DCB_STATUS_DONE)) |
320 | return -EINVAL; |
321 | db = list_first_entry(&tx->db_list, struct sparx5_db, list); |
322 | list_move_tail(list: &db->list, head: &tx->db_list); |
323 | next_dcb_hw->nextptr = FDMA_DCB_INVALID_DATA; |
324 | tx->curr_entry->nextptr = tx->dma + |
325 | ((unsigned long)next_dcb_hw - |
326 | (unsigned long)tx->first_entry); |
327 | tx->curr_entry = next_dcb_hw; |
328 | memset(db->cpu_addr, 0, FDMA_XTR_BUFFER_SIZE); |
329 | memcpy(db->cpu_addr, ifh, IFH_LEN * 4); |
330 | memcpy(db->cpu_addr + IFH_LEN * 4, skb->data, skb->len); |
331 | db_hw->status = FDMA_DCB_STATUS_SOF | |
332 | FDMA_DCB_STATUS_EOF | |
333 | FDMA_DCB_STATUS_BLOCKO(0) | |
334 | FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4); |
335 | if (first_time) { |
336 | sparx5_fdma_tx_activate(sparx5, tx); |
337 | first_time = false; |
338 | } else { |
339 | sparx5_fdma_tx_reload(sparx5, tx); |
340 | } |
341 | return NETDEV_TX_OK; |
342 | } |
343 | |
344 | static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5) |
345 | { |
346 | struct sparx5_rx *rx = &sparx5->rx; |
347 | struct sparx5_rx_dcb_hw *dcb; |
348 | int idx, jdx; |
349 | int size; |
350 | |
351 | size = sizeof(struct sparx5_rx_dcb_hw) * FDMA_DCB_MAX; |
352 | size = ALIGN(size, PAGE_SIZE); |
353 | rx->dcb_entries = devm_kzalloc(dev: sparx5->dev, size, GFP_KERNEL); |
354 | if (!rx->dcb_entries) |
355 | return -ENOMEM; |
356 | rx->dma = virt_to_phys(address: rx->dcb_entries); |
357 | rx->last_entry = rx->dcb_entries; |
358 | rx->db_index = 0; |
359 | rx->dcb_index = 0; |
360 | /* Now for each dcb allocate the db */ |
361 | for (idx = 0; idx < FDMA_DCB_MAX; ++idx) { |
362 | dcb = &rx->dcb_entries[idx]; |
363 | dcb->info = 0; |
364 | /* For each db allocate an skb and map skb data pointer to the DB |
365 | * dataptr. In this way when the frame is received the skb->data |
366 | * will contain the frame, so no memcpy is needed |
367 | */ |
368 | for (jdx = 0; jdx < FDMA_RX_DCB_MAX_DBS; ++jdx) { |
369 | struct sparx5_db_hw *db_hw = &dcb->db[jdx]; |
370 | dma_addr_t dma_addr; |
371 | struct sk_buff *skb; |
372 | |
373 | skb = sparx5_fdma_rx_alloc_skb(rx); |
374 | if (!skb) |
375 | return -ENOMEM; |
376 | |
377 | dma_addr = virt_to_phys(address: skb->data); |
378 | db_hw->dataptr = dma_addr; |
379 | db_hw->status = 0; |
380 | rx->skb[idx][jdx] = skb; |
381 | } |
382 | sparx5_fdma_rx_add_dcb(rx, dcb, nextptr: rx->dma + sizeof(*dcb) * idx); |
383 | } |
384 | netif_napi_add_weight(dev: rx->ndev, napi: &rx->napi, poll: sparx5_fdma_napi_callback, |
385 | FDMA_WEIGHT); |
386 | napi_enable(n: &rx->napi); |
387 | sparx5_fdma_rx_activate(sparx5, rx); |
388 | return 0; |
389 | } |
390 | |
391 | static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5) |
392 | { |
393 | struct sparx5_tx *tx = &sparx5->tx; |
394 | struct sparx5_tx_dcb_hw *dcb; |
395 | int idx, jdx; |
396 | int size; |
397 | |
398 | size = sizeof(struct sparx5_tx_dcb_hw) * FDMA_DCB_MAX; |
399 | size = ALIGN(size, PAGE_SIZE); |
400 | tx->curr_entry = devm_kzalloc(dev: sparx5->dev, size, GFP_KERNEL); |
401 | if (!tx->curr_entry) |
402 | return -ENOMEM; |
403 | tx->dma = virt_to_phys(address: tx->curr_entry); |
404 | tx->first_entry = tx->curr_entry; |
405 | INIT_LIST_HEAD(list: &tx->db_list); |
406 | /* Now for each dcb allocate the db */ |
407 | for (idx = 0; idx < FDMA_DCB_MAX; ++idx) { |
408 | dcb = &tx->curr_entry[idx]; |
409 | dcb->info = 0; |
410 | /* TX databuffers must be 16byte aligned */ |
411 | for (jdx = 0; jdx < FDMA_TX_DCB_MAX_DBS; ++jdx) { |
412 | struct sparx5_db_hw *db_hw = &dcb->db[jdx]; |
413 | struct sparx5_db *db; |
414 | dma_addr_t phys; |
415 | void *cpu_addr; |
416 | |
417 | cpu_addr = devm_kzalloc(dev: sparx5->dev, |
418 | FDMA_XTR_BUFFER_SIZE, |
419 | GFP_KERNEL); |
420 | if (!cpu_addr) |
421 | return -ENOMEM; |
422 | phys = virt_to_phys(address: cpu_addr); |
423 | db_hw->dataptr = phys; |
424 | db_hw->status = 0; |
425 | db = devm_kzalloc(dev: sparx5->dev, size: sizeof(*db), GFP_KERNEL); |
426 | if (!db) |
427 | return -ENOMEM; |
428 | db->cpu_addr = cpu_addr; |
429 | list_add_tail(new: &db->list, head: &tx->db_list); |
430 | } |
431 | sparx5_fdma_tx_add_dcb(tx, dcb, nextptr: tx->dma + sizeof(*dcb) * idx); |
432 | /* Let the curr_entry to point to the last allocated entry */ |
433 | if (idx == FDMA_DCB_MAX - 1) |
434 | tx->curr_entry = dcb; |
435 | } |
436 | return 0; |
437 | } |
438 | |
439 | static void sparx5_fdma_rx_init(struct sparx5 *sparx5, |
440 | struct sparx5_rx *rx, int channel) |
441 | { |
442 | int idx; |
443 | |
444 | rx->channel_id = channel; |
445 | /* Fetch a netdev for SKB and NAPI use, any will do */ |
446 | for (idx = 0; idx < SPX5_PORTS; ++idx) { |
447 | struct sparx5_port *port = sparx5->ports[idx]; |
448 | |
449 | if (port && port->ndev) { |
450 | rx->ndev = port->ndev; |
451 | break; |
452 | } |
453 | } |
454 | } |
455 | |
456 | static void sparx5_fdma_tx_init(struct sparx5 *sparx5, |
457 | struct sparx5_tx *tx, int channel) |
458 | { |
459 | tx->channel_id = channel; |
460 | } |
461 | |
462 | irqreturn_t sparx5_fdma_handler(int irq, void *args) |
463 | { |
464 | struct sparx5 *sparx5 = args; |
465 | u32 db = 0, err = 0; |
466 | |
467 | db = spx5_rd(sparx5, FDMA_INTR_DB); |
468 | err = spx5_rd(sparx5, FDMA_INTR_ERR); |
469 | /* Clear interrupt */ |
470 | if (db) { |
471 | spx5_wr(val: 0, sparx5, FDMA_INTR_DB_ENA); |
472 | spx5_wr(val: db, sparx5, FDMA_INTR_DB); |
473 | napi_schedule(n: &sparx5->rx.napi); |
474 | } |
475 | if (err) { |
476 | u32 err_type = spx5_rd(sparx5, FDMA_ERRORS); |
477 | |
478 | dev_err_ratelimited(sparx5->dev, |
479 | "ERR: int: %#x, type: %#x\n" , |
480 | err, err_type); |
481 | spx5_wr(val: err, sparx5, FDMA_INTR_ERR); |
482 | spx5_wr(val: err_type, sparx5, FDMA_ERRORS); |
483 | } |
484 | return IRQ_HANDLED; |
485 | } |
486 | |
487 | static void sparx5_fdma_injection_mode(struct sparx5 *sparx5) |
488 | { |
489 | const int byte_swap = 1; |
490 | int portno; |
491 | int urgency; |
492 | |
493 | /* Change mode to fdma extraction and injection */ |
494 | spx5_wr(QS_XTR_GRP_CFG_MODE_SET(2) | |
495 | QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) | |
496 | QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap), |
497 | sparx5, QS_XTR_GRP_CFG(XTR_QUEUE)); |
498 | spx5_wr(QS_INJ_GRP_CFG_MODE_SET(2) | |
499 | QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap), |
500 | sparx5, QS_INJ_GRP_CFG(INJ_QUEUE)); |
501 | |
502 | /* CPU ports capture setup */ |
503 | for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) { |
504 | /* ASM CPU port: No preamble, IFH, enable padding */ |
505 | spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) | |
506 | ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) | |
507 | ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */ |
508 | sparx5, ASM_PORT_CFG(portno)); |
509 | |
510 | /* Reset WM cnt to unclog queued frames */ |
511 | spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), |
512 | DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, |
513 | sparx5, |
514 | DSM_DEV_TX_STOP_WM_CFG(portno)); |
515 | |
516 | /* Set Disassembler Stop Watermark level */ |
517 | spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(100), |
518 | DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, |
519 | sparx5, |
520 | DSM_DEV_TX_STOP_WM_CFG(portno)); |
521 | |
522 | /* Enable port in queue system */ |
523 | urgency = sparx5_port_fwd_urg(sparx5, SPEED_2500); |
524 | spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) | |
525 | QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency), |
526 | QFWD_SWITCH_PORT_MODE_PORT_ENA | |
527 | QFWD_SWITCH_PORT_MODE_FWD_URGENCY, |
528 | sparx5, |
529 | QFWD_SWITCH_PORT_MODE(portno)); |
530 | |
531 | /* Disable Disassembler buffer underrun watchdog |
532 | * to avoid truncated packets in XTR |
533 | */ |
534 | spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(1), |
535 | DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, |
536 | sparx5, |
537 | DSM_BUF_CFG(portno)); |
538 | |
539 | /* Disabling frame aging */ |
540 | spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(1), |
541 | HSCH_PORT_MODE_AGE_DIS, |
542 | sparx5, |
543 | HSCH_PORT_MODE(portno)); |
544 | } |
545 | } |
546 | |
547 | int sparx5_fdma_start(struct sparx5 *sparx5) |
548 | { |
549 | int err; |
550 | |
551 | /* Reset FDMA state */ |
552 | spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL); |
553 | spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL); |
554 | |
555 | /* Force ACP caching but disable read/write allocation */ |
556 | spx5_rmw(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(1) | |
557 | CPU_PROC_CTRL_ACP_AWCACHE_SET(0) | |
558 | CPU_PROC_CTRL_ACP_ARCACHE_SET(0), |
559 | CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA | |
560 | CPU_PROC_CTRL_ACP_AWCACHE | |
561 | CPU_PROC_CTRL_ACP_ARCACHE, |
562 | sparx5, CPU_PROC_CTRL); |
563 | |
564 | sparx5_fdma_injection_mode(sparx5); |
565 | sparx5_fdma_rx_init(sparx5, rx: &sparx5->rx, FDMA_XTR_CHANNEL); |
566 | sparx5_fdma_tx_init(sparx5, tx: &sparx5->tx, FDMA_INJ_CHANNEL); |
567 | err = sparx5_fdma_rx_alloc(sparx5); |
568 | if (err) { |
569 | dev_err(sparx5->dev, "Could not allocate RX buffers: %d\n" , err); |
570 | return err; |
571 | } |
572 | err = sparx5_fdma_tx_alloc(sparx5); |
573 | if (err) { |
574 | dev_err(sparx5->dev, "Could not allocate TX buffers: %d\n" , err); |
575 | return err; |
576 | } |
577 | return err; |
578 | } |
579 | |
580 | static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5) |
581 | { |
582 | return spx5_rd(sparx5, FDMA_PORT_CTRL(0)); |
583 | } |
584 | |
585 | int sparx5_fdma_stop(struct sparx5 *sparx5) |
586 | { |
587 | u32 val; |
588 | |
589 | napi_disable(n: &sparx5->rx.napi); |
590 | /* Stop the fdma and channel interrupts */ |
591 | sparx5_fdma_rx_deactivate(sparx5, rx: &sparx5->rx); |
592 | sparx5_fdma_tx_deactivate(sparx5, tx: &sparx5->tx); |
593 | /* Wait for the RX channel to stop */ |
594 | read_poll_timeout(sparx5_fdma_port_ctrl, val, |
595 | FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0, |
596 | 500, 10000, 0, sparx5); |
597 | return 0; |
598 | } |
599 | |