1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Huawei HiNIC PCI Express Linux driver |
4 | * Copyright(c) 2017 Huawei Technologies Co., Ltd |
5 | */ |
6 | |
7 | #include <linux/kernel.h> |
8 | #include <linux/types.h> |
9 | #include <linux/errno.h> |
10 | #include <linux/pci.h> |
11 | #include <linux/device.h> |
12 | #include <linux/netdevice.h> |
13 | #include <linux/etherdevice.h> |
14 | #include <linux/u64_stats_sync.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/skbuff.h> |
18 | #include <linux/dma-mapping.h> |
19 | #include <linux/prefetch.h> |
20 | #include <linux/cpumask.h> |
21 | #include <linux/if_vlan.h> |
22 | #include <asm/barrier.h> |
23 | |
24 | #include "hinic_common.h" |
25 | #include "hinic_hw_if.h" |
26 | #include "hinic_hw_wqe.h" |
27 | #include "hinic_hw_wq.h" |
28 | #include "hinic_hw_qp.h" |
29 | #include "hinic_hw_dev.h" |
30 | #include "hinic_rx.h" |
31 | #include "hinic_dev.h" |
32 | |
33 | #define RX_IRQ_NO_PENDING 0 |
34 | #define RX_IRQ_NO_COALESC 0 |
35 | #define RX_IRQ_NO_LLI_TIMER 0 |
36 | #define RX_IRQ_NO_CREDIT 0 |
37 | #define RX_IRQ_NO_RESEND_TIMER 0 |
38 | #define HINIC_RX_BUFFER_WRITE 16 |
39 | |
40 | #define HINIC_RX_IPV6_PKT 7 |
41 | #define LRO_PKT_HDR_LEN_IPV4 66 |
42 | #define LRO_PKT_HDR_LEN_IPV6 86 |
43 | #define LRO_REPLENISH_THLD 256 |
44 | |
45 | #define LRO_PKT_HDR_LEN(cqe) \ |
46 | (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \ |
47 | HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4) |
48 | |
49 | /** |
50 | * hinic_rxq_clean_stats - Clean the statistics of specific queue |
51 | * @rxq: Logical Rx Queue |
52 | **/ |
53 | static void hinic_rxq_clean_stats(struct hinic_rxq *rxq) |
54 | { |
55 | struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; |
56 | |
57 | u64_stats_update_begin(syncp: &rxq_stats->syncp); |
58 | rxq_stats->pkts = 0; |
59 | rxq_stats->bytes = 0; |
60 | rxq_stats->errors = 0; |
61 | rxq_stats->csum_errors = 0; |
62 | rxq_stats->other_errors = 0; |
63 | u64_stats_update_end(syncp: &rxq_stats->syncp); |
64 | } |
65 | |
66 | /** |
67 | * hinic_rxq_get_stats - get statistics of Rx Queue |
68 | * @rxq: Logical Rx Queue |
69 | * @stats: return updated stats here |
70 | **/ |
71 | void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) |
72 | { |
73 | struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; |
74 | unsigned int start; |
75 | |
76 | do { |
77 | start = u64_stats_fetch_begin(syncp: &rxq_stats->syncp); |
78 | stats->pkts = rxq_stats->pkts; |
79 | stats->bytes = rxq_stats->bytes; |
80 | stats->errors = rxq_stats->csum_errors + |
81 | rxq_stats->other_errors; |
82 | stats->csum_errors = rxq_stats->csum_errors; |
83 | stats->other_errors = rxq_stats->other_errors; |
84 | } while (u64_stats_fetch_retry(syncp: &rxq_stats->syncp, start)); |
85 | } |
86 | |
87 | /** |
88 | * rxq_stats_init - Initialize the statistics of specific queue |
89 | * @rxq: Logical Rx Queue |
90 | **/ |
91 | static void rxq_stats_init(struct hinic_rxq *rxq) |
92 | { |
93 | struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; |
94 | |
95 | u64_stats_init(syncp: &rxq_stats->syncp); |
96 | hinic_rxq_clean_stats(rxq); |
97 | } |
98 | |
99 | static void rx_csum(struct hinic_rxq *rxq, u32 status, |
100 | struct sk_buff *skb) |
101 | { |
102 | struct net_device *netdev = rxq->netdev; |
103 | u32 csum_err; |
104 | |
105 | csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR); |
106 | |
107 | if (!(netdev->features & NETIF_F_RXCSUM)) |
108 | return; |
109 | |
110 | if (!csum_err) { |
111 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
112 | } else { |
113 | if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE | |
114 | HINIC_RX_CSUM_IPSU_OTHER_ERR))) |
115 | rxq->rxq_stats.csum_errors++; |
116 | skb->ip_summed = CHECKSUM_NONE; |
117 | } |
118 | } |
119 | |
120 | /** |
121 | * rx_alloc_skb - allocate skb and map it to dma address |
122 | * @rxq: rx queue |
123 | * @dma_addr: returned dma address for the skb |
124 | * |
125 | * Return skb |
126 | **/ |
127 | static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq, |
128 | dma_addr_t *dma_addr) |
129 | { |
130 | struct hinic_dev *nic_dev = netdev_priv(dev: rxq->netdev); |
131 | struct hinic_hwdev *hwdev = nic_dev->hwdev; |
132 | struct hinic_hwif *hwif = hwdev->hwif; |
133 | struct pci_dev *pdev = hwif->pdev; |
134 | struct sk_buff *skb; |
135 | dma_addr_t addr; |
136 | int err; |
137 | |
138 | skb = netdev_alloc_skb_ip_align(dev: rxq->netdev, length: rxq->rq->buf_sz); |
139 | if (!skb) |
140 | return NULL; |
141 | |
142 | addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz, |
143 | DMA_FROM_DEVICE); |
144 | err = dma_mapping_error(dev: &pdev->dev, dma_addr: addr); |
145 | if (err) { |
146 | dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n" , err); |
147 | goto err_rx_map; |
148 | } |
149 | |
150 | *dma_addr = addr; |
151 | return skb; |
152 | |
153 | err_rx_map: |
154 | dev_kfree_skb_any(skb); |
155 | return NULL; |
156 | } |
157 | |
158 | /** |
159 | * rx_unmap_skb - unmap the dma address of the skb |
160 | * @rxq: rx queue |
161 | * @dma_addr: dma address of the skb |
162 | **/ |
163 | static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr) |
164 | { |
165 | struct hinic_dev *nic_dev = netdev_priv(dev: rxq->netdev); |
166 | struct hinic_hwdev *hwdev = nic_dev->hwdev; |
167 | struct hinic_hwif *hwif = hwdev->hwif; |
168 | struct pci_dev *pdev = hwif->pdev; |
169 | |
170 | dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz, |
171 | DMA_FROM_DEVICE); |
172 | } |
173 | |
174 | /** |
175 | * rx_free_skb - unmap and free skb |
176 | * @rxq: rx queue |
177 | * @skb: skb to free |
178 | * @dma_addr: dma address of the skb |
179 | **/ |
180 | static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb, |
181 | dma_addr_t dma_addr) |
182 | { |
183 | rx_unmap_skb(rxq, dma_addr); |
184 | dev_kfree_skb_any(skb); |
185 | } |
186 | |
187 | /** |
188 | * rx_alloc_pkts - allocate pkts in rx queue |
189 | * @rxq: rx queue |
190 | * |
191 | * Return number of skbs allocated |
192 | **/ |
193 | static int rx_alloc_pkts(struct hinic_rxq *rxq) |
194 | { |
195 | struct hinic_dev *nic_dev = netdev_priv(dev: rxq->netdev); |
196 | struct hinic_rq_wqe *rq_wqe; |
197 | unsigned int free_wqebbs; |
198 | struct hinic_sge sge; |
199 | dma_addr_t dma_addr; |
200 | struct sk_buff *skb; |
201 | u16 prod_idx; |
202 | int i; |
203 | |
204 | free_wqebbs = hinic_get_rq_free_wqebbs(rq: rxq->rq); |
205 | |
206 | /* Limit the allocation chunks */ |
207 | if (free_wqebbs > nic_dev->rx_weight) |
208 | free_wqebbs = nic_dev->rx_weight; |
209 | |
210 | for (i = 0; i < free_wqebbs; i++) { |
211 | skb = rx_alloc_skb(rxq, dma_addr: &dma_addr); |
212 | if (!skb) |
213 | goto skb_out; |
214 | |
215 | hinic_set_sge(sge: &sge, addr: dma_addr, len: skb->len); |
216 | |
217 | rq_wqe = hinic_rq_get_wqe(rq: rxq->rq, HINIC_RQ_WQE_SIZE, |
218 | prod_idx: &prod_idx); |
219 | if (!rq_wqe) { |
220 | rx_free_skb(rxq, skb, dma_addr); |
221 | goto skb_out; |
222 | } |
223 | |
224 | hinic_rq_prepare_wqe(rq: rxq->rq, prod_idx, wqe: rq_wqe, sge: &sge); |
225 | |
226 | hinic_rq_write_wqe(rq: rxq->rq, prod_idx, wqe: rq_wqe, skb); |
227 | } |
228 | |
229 | skb_out: |
230 | if (i) { |
231 | wmb(); /* write all the wqes before update PI */ |
232 | |
233 | hinic_rq_update(rq: rxq->rq, prod_idx); |
234 | } |
235 | |
236 | return i; |
237 | } |
238 | |
239 | /** |
240 | * free_all_rx_skbs - free all skbs in rx queue |
241 | * @rxq: rx queue |
242 | **/ |
243 | static void free_all_rx_skbs(struct hinic_rxq *rxq) |
244 | { |
245 | struct hinic_rq *rq = rxq->rq; |
246 | struct hinic_hw_wqe *hw_wqe; |
247 | struct hinic_sge sge; |
248 | u16 ci; |
249 | |
250 | while ((hw_wqe = hinic_read_wqe(wq: rq->wq, HINIC_RQ_WQE_SIZE, cons_idx: &ci))) { |
251 | if (IS_ERR(ptr: hw_wqe)) |
252 | break; |
253 | |
254 | hinic_rq_get_sge(rq, wqe: &hw_wqe->rq_wqe, cons_idx: ci, sge: &sge); |
255 | |
256 | hinic_put_wqe(wq: rq->wq, HINIC_RQ_WQE_SIZE); |
257 | |
258 | rx_free_skb(rxq, skb: rq->saved_skb[ci], dma_addr: hinic_sge_to_dma(sge: &sge)); |
259 | } |
260 | } |
261 | |
262 | /** |
263 | * rx_recv_jumbo_pkt - Rx handler for jumbo pkt |
264 | * @rxq: rx queue |
265 | * @head_skb: the first skb in the list |
266 | * @left_pkt_len: left size of the pkt exclude head skb |
267 | * @ci: consumer index |
268 | * |
269 | * Return number of wqes that used for the left of the pkt |
270 | **/ |
271 | static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb, |
272 | unsigned int left_pkt_len, u16 ci) |
273 | { |
274 | struct sk_buff *skb, *curr_skb = head_skb; |
275 | struct hinic_rq_wqe *rq_wqe; |
276 | unsigned int curr_len; |
277 | struct hinic_sge sge; |
278 | int num_wqes = 0; |
279 | |
280 | while (left_pkt_len > 0) { |
281 | rq_wqe = hinic_rq_read_next_wqe(rq: rxq->rq, HINIC_RQ_WQE_SIZE, |
282 | skb: &skb, cons_idx: &ci); |
283 | |
284 | num_wqes++; |
285 | |
286 | hinic_rq_get_sge(rq: rxq->rq, wqe: rq_wqe, cons_idx: ci, sge: &sge); |
287 | |
288 | rx_unmap_skb(rxq, dma_addr: hinic_sge_to_dma(sge: &sge)); |
289 | |
290 | prefetch(skb->data); |
291 | |
292 | curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ : |
293 | left_pkt_len; |
294 | |
295 | left_pkt_len -= curr_len; |
296 | |
297 | __skb_put(skb, len: curr_len); |
298 | |
299 | if (curr_skb == head_skb) |
300 | skb_shinfo(head_skb)->frag_list = skb; |
301 | else |
302 | curr_skb->next = skb; |
303 | |
304 | head_skb->len += skb->len; |
305 | head_skb->data_len += skb->len; |
306 | head_skb->truesize += skb->truesize; |
307 | |
308 | curr_skb = skb; |
309 | } |
310 | |
311 | return num_wqes; |
312 | } |
313 | |
314 | static void hinic_copy_lp_data(struct hinic_dev *nic_dev, |
315 | struct sk_buff *skb) |
316 | { |
317 | struct net_device *netdev = nic_dev->netdev; |
318 | u8 *lb_buf = nic_dev->lb_test_rx_buf; |
319 | int lb_len = nic_dev->lb_pkt_len; |
320 | int pkt_offset, frag_len, i; |
321 | void *frag_data = NULL; |
322 | |
323 | if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) { |
324 | nic_dev->lb_test_rx_idx = 0; |
325 | netif_warn(nic_dev, drv, netdev, "Loopback test warning, receive too more test pkts\n" ); |
326 | } |
327 | |
328 | if (skb->len != nic_dev->lb_pkt_len) { |
329 | netif_warn(nic_dev, drv, netdev, "Wrong packet length\n" ); |
330 | nic_dev->lb_test_rx_idx++; |
331 | return; |
332 | } |
333 | |
334 | pkt_offset = nic_dev->lb_test_rx_idx * lb_len; |
335 | frag_len = (int)skb_headlen(skb); |
336 | memcpy(lb_buf + pkt_offset, skb->data, frag_len); |
337 | pkt_offset += frag_len; |
338 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
339 | frag_data = skb_frag_address(frag: &skb_shinfo(skb)->frags[i]); |
340 | frag_len = (int)skb_frag_size(frag: &skb_shinfo(skb)->frags[i]); |
341 | memcpy((lb_buf + pkt_offset), frag_data, frag_len); |
342 | pkt_offset += frag_len; |
343 | } |
344 | nic_dev->lb_test_rx_idx++; |
345 | } |
346 | |
347 | /** |
348 | * rxq_recv - Rx handler |
349 | * @rxq: rx queue |
350 | * @budget: maximum pkts to process |
351 | * |
352 | * Return number of pkts received |
353 | **/ |
354 | static int rxq_recv(struct hinic_rxq *rxq, int budget) |
355 | { |
356 | struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq); |
357 | struct net_device *netdev = rxq->netdev; |
358 | u64 pkt_len = 0, rx_bytes = 0; |
359 | struct hinic_rq *rq = rxq->rq; |
360 | struct hinic_rq_wqe *rq_wqe; |
361 | struct hinic_dev *nic_dev; |
362 | unsigned int free_wqebbs; |
363 | struct hinic_rq_cqe *cqe; |
364 | int num_wqes, pkts = 0; |
365 | struct hinic_sge sge; |
366 | unsigned int status; |
367 | struct sk_buff *skb; |
368 | u32 offload_type; |
369 | u16 ci, num_lro; |
370 | u16 num_wqe = 0; |
371 | u32 vlan_len; |
372 | u16 vid; |
373 | |
374 | nic_dev = netdev_priv(dev: netdev); |
375 | |
376 | while (pkts < budget) { |
377 | num_wqes = 0; |
378 | |
379 | rq_wqe = hinic_rq_read_wqe(rq: rxq->rq, HINIC_RQ_WQE_SIZE, skb: &skb, |
380 | cons_idx: &ci); |
381 | if (!rq_wqe) |
382 | break; |
383 | |
384 | /* make sure we read rx_done before packet length */ |
385 | dma_rmb(); |
386 | |
387 | cqe = rq->cqe[ci]; |
388 | status = be32_to_cpu(cqe->status); |
389 | hinic_rq_get_sge(rq: rxq->rq, wqe: rq_wqe, cons_idx: ci, sge: &sge); |
390 | |
391 | rx_unmap_skb(rxq, dma_addr: hinic_sge_to_dma(sge: &sge)); |
392 | |
393 | rx_csum(rxq, status, skb); |
394 | |
395 | prefetch(skb->data); |
396 | |
397 | pkt_len = sge.len; |
398 | |
399 | if (pkt_len <= HINIC_RX_BUF_SZ) { |
400 | __skb_put(skb, len: pkt_len); |
401 | } else { |
402 | __skb_put(skb, HINIC_RX_BUF_SZ); |
403 | num_wqes = rx_recv_jumbo_pkt(rxq, head_skb: skb, left_pkt_len: pkt_len - |
404 | HINIC_RX_BUF_SZ, ci); |
405 | } |
406 | |
407 | hinic_rq_put_wqe(rq, cons_idx: ci, |
408 | wqe_size: (num_wqes + 1) * HINIC_RQ_WQE_SIZE); |
409 | |
410 | offload_type = be32_to_cpu(cqe->offload_type); |
411 | vlan_len = be32_to_cpu(cqe->len); |
412 | if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && |
413 | HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) { |
414 | vid = HINIC_GET_RX_VLAN_TAG(vlan_len); |
415 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: vid); |
416 | } |
417 | |
418 | if (unlikely(nic_dev->flags & HINIC_LP_TEST)) |
419 | hinic_copy_lp_data(nic_dev, skb); |
420 | |
421 | skb_record_rx_queue(skb, rx_queue: qp->q_id); |
422 | skb->protocol = eth_type_trans(skb, dev: rxq->netdev); |
423 | |
424 | napi_gro_receive(napi: &rxq->napi, skb); |
425 | |
426 | pkts++; |
427 | rx_bytes += pkt_len; |
428 | |
429 | num_lro = HINIC_GET_RX_NUM_LRO(status); |
430 | if (num_lro) { |
431 | rx_bytes += ((num_lro - 1) * |
432 | LRO_PKT_HDR_LEN(cqe)); |
433 | |
434 | num_wqe += |
435 | (u16)(pkt_len >> rxq->rx_buff_shift) + |
436 | ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); |
437 | } |
438 | |
439 | cqe->status = 0; |
440 | |
441 | if (num_wqe >= LRO_REPLENISH_THLD) |
442 | break; |
443 | } |
444 | |
445 | free_wqebbs = hinic_get_rq_free_wqebbs(rq: rxq->rq); |
446 | if (free_wqebbs > HINIC_RX_BUFFER_WRITE) |
447 | rx_alloc_pkts(rxq); |
448 | |
449 | u64_stats_update_begin(syncp: &rxq->rxq_stats.syncp); |
450 | rxq->rxq_stats.pkts += pkts; |
451 | rxq->rxq_stats.bytes += rx_bytes; |
452 | u64_stats_update_end(syncp: &rxq->rxq_stats.syncp); |
453 | |
454 | return pkts; |
455 | } |
456 | |
457 | static int rx_poll(struct napi_struct *napi, int budget) |
458 | { |
459 | struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi); |
460 | struct hinic_dev *nic_dev = netdev_priv(dev: rxq->netdev); |
461 | struct hinic_rq *rq = rxq->rq; |
462 | int pkts; |
463 | |
464 | pkts = rxq_recv(rxq, budget); |
465 | if (pkts >= budget) |
466 | return budget; |
467 | |
468 | napi_complete(n: napi); |
469 | |
470 | if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) |
471 | hinic_hwdev_set_msix_state(hwdev: nic_dev->hwdev, |
472 | msix_index: rq->msix_entry, |
473 | flag: HINIC_MSIX_ENABLE); |
474 | |
475 | return pkts; |
476 | } |
477 | |
478 | static void rx_add_napi(struct hinic_rxq *rxq) |
479 | { |
480 | struct hinic_dev *nic_dev = netdev_priv(dev: rxq->netdev); |
481 | |
482 | netif_napi_add_weight(dev: rxq->netdev, napi: &rxq->napi, poll: rx_poll, |
483 | weight: nic_dev->rx_weight); |
484 | napi_enable(n: &rxq->napi); |
485 | } |
486 | |
487 | static void rx_del_napi(struct hinic_rxq *rxq) |
488 | { |
489 | napi_disable(n: &rxq->napi); |
490 | netif_napi_del(napi: &rxq->napi); |
491 | } |
492 | |
493 | static irqreturn_t rx_irq(int irq, void *data) |
494 | { |
495 | struct hinic_rxq *rxq = (struct hinic_rxq *)data; |
496 | struct hinic_rq *rq = rxq->rq; |
497 | struct hinic_dev *nic_dev; |
498 | |
499 | /* Disable the interrupt until napi will be completed */ |
500 | nic_dev = netdev_priv(dev: rxq->netdev); |
501 | if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) |
502 | hinic_hwdev_set_msix_state(hwdev: nic_dev->hwdev, |
503 | msix_index: rq->msix_entry, |
504 | flag: HINIC_MSIX_DISABLE); |
505 | |
506 | nic_dev = netdev_priv(dev: rxq->netdev); |
507 | hinic_hwdev_msix_cnt_set(hwdev: nic_dev->hwdev, msix_index: rq->msix_entry); |
508 | |
509 | napi_schedule(n: &rxq->napi); |
510 | return IRQ_HANDLED; |
511 | } |
512 | |
513 | static int rx_request_irq(struct hinic_rxq *rxq) |
514 | { |
515 | struct hinic_dev *nic_dev = netdev_priv(dev: rxq->netdev); |
516 | struct hinic_msix_config interrupt_info = {0}; |
517 | struct hinic_intr_coal_info *intr_coal = NULL; |
518 | struct hinic_hwdev *hwdev = nic_dev->hwdev; |
519 | struct hinic_rq *rq = rxq->rq; |
520 | struct hinic_qp *qp; |
521 | int err; |
522 | |
523 | qp = container_of(rq, struct hinic_qp, rq); |
524 | |
525 | rx_add_napi(rxq); |
526 | |
527 | hinic_hwdev_msix_set(hwdev, msix_index: rq->msix_entry, |
528 | RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC, |
529 | RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT, |
530 | RX_IRQ_NO_RESEND_TIMER); |
531 | |
532 | intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id]; |
533 | interrupt_info.msix_index = rq->msix_entry; |
534 | interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg; |
535 | interrupt_info.pending_cnt = intr_coal->pending_limt; |
536 | interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg; |
537 | |
538 | err = hinic_set_interrupt_cfg(hwdev, interrupt_info: &interrupt_info); |
539 | if (err) { |
540 | netif_err(nic_dev, drv, rxq->netdev, |
541 | "Failed to set RX interrupt coalescing attribute\n" ); |
542 | goto err_req_irq; |
543 | } |
544 | |
545 | err = request_irq(irq: rq->irq, handler: rx_irq, flags: 0, name: rxq->irq_name, dev: rxq); |
546 | if (err) |
547 | goto err_req_irq; |
548 | |
549 | cpumask_set_cpu(cpu: qp->q_id % num_online_cpus(), dstp: &rq->affinity_mask); |
550 | err = irq_set_affinity_and_hint(irq: rq->irq, m: &rq->affinity_mask); |
551 | if (err) |
552 | goto err_irq_affinity; |
553 | |
554 | return 0; |
555 | |
556 | err_irq_affinity: |
557 | free_irq(rq->irq, rxq); |
558 | err_req_irq: |
559 | rx_del_napi(rxq); |
560 | return err; |
561 | } |
562 | |
563 | static void rx_free_irq(struct hinic_rxq *rxq) |
564 | { |
565 | struct hinic_rq *rq = rxq->rq; |
566 | |
567 | irq_update_affinity_hint(irq: rq->irq, NULL); |
568 | free_irq(rq->irq, rxq); |
569 | rx_del_napi(rxq); |
570 | } |
571 | |
572 | /** |
573 | * hinic_init_rxq - Initialize the Rx Queue |
574 | * @rxq: Logical Rx Queue |
575 | * @rq: Hardware Rx Queue to connect the Logical queue with |
576 | * @netdev: network device to connect the Logical queue with |
577 | * |
578 | * Return 0 - Success, negative - Failure |
579 | **/ |
580 | int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, |
581 | struct net_device *netdev) |
582 | { |
583 | struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq); |
584 | int err, pkts; |
585 | |
586 | rxq->netdev = netdev; |
587 | rxq->rq = rq; |
588 | rxq->buf_len = HINIC_RX_BUF_SZ; |
589 | rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ); |
590 | |
591 | rxq_stats_init(rxq); |
592 | |
593 | rxq->irq_name = devm_kasprintf(dev: &netdev->dev, GFP_KERNEL, |
594 | fmt: "%s_rxq%d" , netdev->name, qp->q_id); |
595 | if (!rxq->irq_name) |
596 | return -ENOMEM; |
597 | |
598 | pkts = rx_alloc_pkts(rxq); |
599 | if (!pkts) { |
600 | err = -ENOMEM; |
601 | goto err_rx_pkts; |
602 | } |
603 | |
604 | err = rx_request_irq(rxq); |
605 | if (err) { |
606 | netdev_err(dev: netdev, format: "Failed to request Rx irq\n" ); |
607 | goto err_req_rx_irq; |
608 | } |
609 | |
610 | return 0; |
611 | |
612 | err_req_rx_irq: |
613 | err_rx_pkts: |
614 | free_all_rx_skbs(rxq); |
615 | devm_kfree(dev: &netdev->dev, p: rxq->irq_name); |
616 | return err; |
617 | } |
618 | |
619 | /** |
620 | * hinic_clean_rxq - Clean the Rx Queue |
621 | * @rxq: Logical Rx Queue |
622 | **/ |
623 | void hinic_clean_rxq(struct hinic_rxq *rxq) |
624 | { |
625 | struct net_device *netdev = rxq->netdev; |
626 | |
627 | rx_free_irq(rxq); |
628 | |
629 | free_all_rx_skbs(rxq); |
630 | devm_kfree(dev: &netdev->dev, p: rxq->irq_name); |
631 | } |
632 | |