1// SPDX-License-Identifier: GPL-2.0-only
2/* Atlantic Network Driver
3 *
4 * Copyright (C) 2014-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
6 */
7
8/* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
9 * Definition of functions for Rx and Tx rings. Friendly module for aq_nic.
10 */
11
12#include "aq_vec.h"
13
14struct aq_vec_s {
15 const struct aq_hw_ops *aq_hw_ops;
16 struct aq_hw_s *aq_hw;
17 struct aq_nic_s *aq_nic;
18 unsigned int tx_rings;
19 unsigned int rx_rings;
20 struct aq_ring_param_s aq_ring_param;
21 struct napi_struct napi;
22 struct aq_ring_s ring[AQ_CFG_TCS_MAX][2];
23};
24
25#define AQ_VEC_TX_ID 0
26#define AQ_VEC_RX_ID 1
27
28static int aq_vec_poll(struct napi_struct *napi, int budget)
29{
30 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
31 unsigned int sw_tail_old = 0U;
32 struct aq_ring_s *ring = NULL;
33 bool was_tx_cleaned = true;
34 unsigned int i = 0U;
35 int work_done = 0;
36 int err = 0;
37
38 if (!self) {
39 err = -EINVAL;
40 } else {
41 for (i = 0U; self->tx_rings > i; ++i) {
42 ring = self->ring[i];
43 u64_stats_update_begin(syncp: &ring[AQ_VEC_RX_ID].stats.rx.syncp);
44 ring[AQ_VEC_RX_ID].stats.rx.polls++;
45 u64_stats_update_end(syncp: &ring[AQ_VEC_RX_ID].stats.rx.syncp);
46 if (self->aq_hw_ops->hw_ring_tx_head_update) {
47 err = self->aq_hw_ops->hw_ring_tx_head_update(
48 self->aq_hw,
49 &ring[AQ_VEC_TX_ID]);
50 if (err < 0)
51 goto err_exit;
52 }
53
54 if (ring[AQ_VEC_TX_ID].sw_head !=
55 ring[AQ_VEC_TX_ID].hw_head) {
56 was_tx_cleaned = aq_ring_tx_clean(self: &ring[AQ_VEC_TX_ID]);
57 aq_ring_update_queue_state(ring: &ring[AQ_VEC_TX_ID]);
58 }
59
60 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
61 &ring[AQ_VEC_RX_ID]);
62 if (err < 0)
63 goto err_exit;
64
65 if (ring[AQ_VEC_RX_ID].sw_head !=
66 ring[AQ_VEC_RX_ID].hw_head) {
67 err = aq_ring_rx_clean(self: &ring[AQ_VEC_RX_ID],
68 napi,
69 work_done: &work_done,
70 budget: budget - work_done);
71 if (err < 0)
72 goto err_exit;
73
74 sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail;
75
76 err = aq_ring_rx_fill(self: &ring[AQ_VEC_RX_ID]);
77 if (err < 0)
78 goto err_exit;
79
80 err = self->aq_hw_ops->hw_ring_rx_fill(
81 self->aq_hw,
82 &ring[AQ_VEC_RX_ID], sw_tail_old);
83 if (err < 0)
84 goto err_exit;
85 }
86 }
87
88err_exit:
89 if (!was_tx_cleaned)
90 work_done = budget;
91
92 if (work_done < budget) {
93 napi_complete_done(n: napi, work_done);
94 self->aq_hw_ops->hw_irq_enable(self->aq_hw,
95 1U << self->aq_ring_param.vec_idx);
96 }
97 }
98
99 return work_done;
100}
101
102struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
103 struct aq_nic_cfg_s *aq_nic_cfg)
104{
105 struct aq_vec_s *self = NULL;
106
107 self = kzalloc(size: sizeof(*self), GFP_KERNEL);
108 if (!self)
109 goto err_exit;
110
111 self->aq_nic = aq_nic;
112 self->aq_ring_param.vec_idx = idx;
113 self->aq_ring_param.cpu =
114 idx + aq_nic_cfg->aq_rss.base_cpu_number;
115
116 cpumask_set_cpu(cpu: self->aq_ring_param.cpu,
117 dstp: &self->aq_ring_param.affinity_mask);
118
119 self->tx_rings = 0;
120 self->rx_rings = 0;
121
122 netif_napi_add(dev: aq_nic_get_ndev(self: aq_nic), napi: &self->napi, poll: aq_vec_poll);
123
124err_exit:
125 return self;
126}
127
128int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
129 unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg)
130{
131 struct aq_ring_s *ring = NULL;
132 unsigned int i = 0U;
133 int err = 0;
134
135 for (i = 0; i < aq_nic_cfg->tcs; ++i) {
136 const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg,
137 i, idx);
138
139 ring = &self->ring[i][AQ_VEC_TX_ID];
140 err = aq_ring_tx_alloc(self: ring, aq_nic, idx: idx_ring, aq_nic_cfg);
141 if (err)
142 goto err_exit;
143
144 ++self->tx_rings;
145
146 aq_nic_set_tx_ring(self: aq_nic, idx: idx_ring, ring);
147
148 ring = &self->ring[i][AQ_VEC_RX_ID];
149 if (xdp_rxq_info_reg(xdp_rxq: &ring->xdp_rxq,
150 dev: aq_nic->ndev, queue_index: idx,
151 napi_id: self->napi.napi_id) < 0) {
152 err = -ENOMEM;
153 goto err_exit;
154 }
155 if (xdp_rxq_info_reg_mem_model(xdp_rxq: &ring->xdp_rxq,
156 type: MEM_TYPE_PAGE_SHARED, NULL) < 0) {
157 xdp_rxq_info_unreg(xdp_rxq: &ring->xdp_rxq);
158 err = -ENOMEM;
159 goto err_exit;
160 }
161
162 err = aq_ring_rx_alloc(self: ring, aq_nic, idx: idx_ring, aq_nic_cfg);
163 if (err) {
164 xdp_rxq_info_unreg(xdp_rxq: &ring->xdp_rxq);
165 goto err_exit;
166 }
167
168 ++self->rx_rings;
169 }
170
171err_exit:
172 if (err < 0) {
173 aq_vec_ring_free(self);
174 self = NULL;
175 }
176
177 return err;
178}
179
180int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
181 struct aq_hw_s *aq_hw)
182{
183 struct aq_ring_s *ring = NULL;
184 unsigned int i = 0U;
185 int err = 0;
186
187 self->aq_hw_ops = aq_hw_ops;
188 self->aq_hw = aq_hw;
189
190 for (i = 0U; self->tx_rings > i; ++i) {
191 ring = self->ring[i];
192 err = aq_ring_init(self: &ring[AQ_VEC_TX_ID], ring_type: ATL_RING_TX);
193 if (err < 0)
194 goto err_exit;
195
196 err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw,
197 &ring[AQ_VEC_TX_ID],
198 &self->aq_ring_param);
199 if (err < 0)
200 goto err_exit;
201
202 err = aq_ring_init(self: &ring[AQ_VEC_RX_ID], ring_type: ATL_RING_RX);
203 if (err < 0)
204 goto err_exit;
205
206 err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw,
207 &ring[AQ_VEC_RX_ID],
208 &self->aq_ring_param);
209 if (err < 0)
210 goto err_exit;
211
212 err = aq_ring_rx_fill(self: &ring[AQ_VEC_RX_ID]);
213 if (err < 0)
214 goto err_exit;
215
216 err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw,
217 &ring[AQ_VEC_RX_ID], 0U);
218 if (err < 0)
219 goto err_exit;
220 }
221
222err_exit:
223 return err;
224}
225
226int aq_vec_start(struct aq_vec_s *self)
227{
228 struct aq_ring_s *ring = NULL;
229 unsigned int i = 0U;
230 int err = 0;
231
232 for (i = 0U; self->tx_rings > i; ++i) {
233 ring = self->ring[i];
234 err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
235 &ring[AQ_VEC_TX_ID]);
236 if (err < 0)
237 goto err_exit;
238
239 err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw,
240 &ring[AQ_VEC_RX_ID]);
241 if (err < 0)
242 goto err_exit;
243 }
244
245 napi_enable(n: &self->napi);
246
247err_exit:
248 return err;
249}
250
251void aq_vec_stop(struct aq_vec_s *self)
252{
253 struct aq_ring_s *ring = NULL;
254 unsigned int i = 0U;
255
256 for (i = 0U; self->tx_rings > i; ++i) {
257 ring = self->ring[i];
258 self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
259 &ring[AQ_VEC_TX_ID]);
260
261 self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw,
262 &ring[AQ_VEC_RX_ID]);
263 }
264
265 napi_disable(n: &self->napi);
266}
267
268void aq_vec_deinit(struct aq_vec_s *self)
269{
270 struct aq_ring_s *ring = NULL;
271 unsigned int i = 0U;
272
273 if (!self)
274 goto err_exit;
275
276 for (i = 0U; self->tx_rings > i; ++i) {
277 ring = self->ring[i];
278 aq_ring_tx_clean(self: &ring[AQ_VEC_TX_ID]);
279 aq_ring_rx_deinit(self: &ring[AQ_VEC_RX_ID]);
280 }
281
282err_exit:;
283}
284
285void aq_vec_free(struct aq_vec_s *self)
286{
287 if (!self)
288 goto err_exit;
289
290 netif_napi_del(napi: &self->napi);
291
292 kfree(objp: self);
293
294err_exit:;
295}
296
297void aq_vec_ring_free(struct aq_vec_s *self)
298{
299 struct aq_ring_s *ring = NULL;
300 unsigned int i = 0U;
301
302 if (!self)
303 goto err_exit;
304
305 for (i = 0U; self->tx_rings > i; ++i) {
306 ring = self->ring[i];
307 aq_ring_free(self: &ring[AQ_VEC_TX_ID]);
308 if (i < self->rx_rings) {
309 xdp_rxq_info_unreg(xdp_rxq: &ring[AQ_VEC_RX_ID].xdp_rxq);
310 aq_ring_free(self: &ring[AQ_VEC_RX_ID]);
311 }
312 }
313
314 self->tx_rings = 0;
315 self->rx_rings = 0;
316err_exit:;
317}
318
319irqreturn_t aq_vec_isr(int irq, void *private)
320{
321 struct aq_vec_s *self = private;
322 int err = 0;
323
324 if (!self) {
325 err = -EINVAL;
326 goto err_exit;
327 }
328 napi_schedule(n: &self->napi);
329
330err_exit:
331 return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
332}
333
334irqreturn_t aq_vec_isr_legacy(int irq, void *private)
335{
336 struct aq_vec_s *self = private;
337 u64 irq_mask = 0U;
338 int err;
339
340 if (!self)
341 return IRQ_NONE;
342 err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
343 if (err < 0)
344 return IRQ_NONE;
345
346 if (irq_mask) {
347 self->aq_hw_ops->hw_irq_disable(self->aq_hw,
348 1U << self->aq_ring_param.vec_idx);
349 napi_schedule(n: &self->napi);
350 } else {
351 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
352 return IRQ_NONE;
353 }
354
355 return IRQ_HANDLED;
356}
357
358cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
359{
360 return &self->aq_ring_param.affinity_mask;
361}
362
363bool aq_vec_is_valid_tc(struct aq_vec_s *self, const unsigned int tc)
364{
365 return tc < self->rx_rings && tc < self->tx_rings;
366}
367
368unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data)
369{
370 unsigned int count;
371
372 if (!aq_vec_is_valid_tc(self, tc))
373 return 0;
374
375 count = aq_ring_fill_stats_data(self: &self->ring[tc][AQ_VEC_RX_ID], data);
376 count += aq_ring_fill_stats_data(self: &self->ring[tc][AQ_VEC_TX_ID], data: data + count);
377
378 return count;
379}
380

source code of linux/drivers/net/ethernet/aquantia/atlantic/aq_vec.c