1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (C) 2023 Intel Corporation */ |
3 | |
4 | #include "idpf.h" |
5 | #include "idpf_virtchnl.h" |
6 | |
7 | #define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000 |
8 | #define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000) |
9 | #define IDPF_VC_XN_IDX_M GENMASK(7, 0) |
10 | #define IDPF_VC_XN_SALT_M GENMASK(15, 8) |
11 | #define IDPF_VC_XN_RING_LEN U8_MAX |
12 | |
13 | /** |
14 | * enum idpf_vc_xn_state - Virtchnl transaction status |
15 | * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used |
16 | * @IDPF_VC_XN_WAITING: expecting a reply, not yet received |
17 | * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, |
18 | * buffer updated |
19 | * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there |
20 | * was an error, buffer not updated |
21 | * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down |
22 | * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the |
23 | * return context; a callback may be provided to handle |
24 | * return |
25 | */ |
26 | enum idpf_vc_xn_state { |
27 | IDPF_VC_XN_IDLE = 1, |
28 | IDPF_VC_XN_WAITING, |
29 | IDPF_VC_XN_COMPLETED_SUCCESS, |
30 | IDPF_VC_XN_COMPLETED_FAILED, |
31 | IDPF_VC_XN_SHUTDOWN, |
32 | IDPF_VC_XN_ASYNC, |
33 | }; |
34 | |
35 | struct idpf_vc_xn; |
36 | /* Callback for asynchronous messages */ |
37 | typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *, |
38 | const struct idpf_ctlq_msg *); |
39 | |
40 | /** |
41 | * struct idpf_vc_xn - Data structure representing virtchnl transactions |
42 | * @completed: virtchnl event loop uses that to signal when a reply is |
43 | * available, uses kernel completion API |
44 | * @state: virtchnl event loop stores the data below, protected by the |
45 | * completion's lock. |
46 | * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be |
47 | * truncated on its way to the receiver thread according to |
48 | * reply_buf.iov_len. |
49 | * @reply: Reference to the buffer(s) where the reply data should be written |
50 | * to. May be 0-length (then NULL address permitted) if the reply data |
51 | * should be ignored. |
52 | * @async_handler: if sent asynchronously, a callback can be provided to handle |
53 | * the reply when it's received |
54 | * @vc_op: corresponding opcode sent with this transaction |
55 | * @idx: index used as retrieval on reply receive, used for cookie |
56 | * @salt: changed every message to make unique, used for cookie |
57 | */ |
58 | struct idpf_vc_xn { |
59 | struct completion completed; |
60 | enum idpf_vc_xn_state state; |
61 | size_t reply_sz; |
62 | struct kvec reply; |
63 | async_vc_cb async_handler; |
64 | u32 vc_op; |
65 | u8 idx; |
66 | u8 salt; |
67 | }; |
68 | |
69 | /** |
70 | * struct idpf_vc_xn_params - Parameters for executing transaction |
71 | * @send_buf: kvec for send buffer |
72 | * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length |
73 | * @timeout_ms: timeout to wait for reply |
74 | * @async: send message asynchronously, will not wait on completion |
75 | * @async_handler: If sent asynchronously, optional callback handler. The user |
76 | * must be careful when using async handlers as the memory for |
77 | * the recv_buf _cannot_ be on stack if this is async. |
78 | * @vc_op: virtchnl op to send |
79 | */ |
80 | struct idpf_vc_xn_params { |
81 | struct kvec send_buf; |
82 | struct kvec recv_buf; |
83 | int timeout_ms; |
84 | bool async; |
85 | async_vc_cb async_handler; |
86 | u32 vc_op; |
87 | }; |
88 | |
89 | /** |
90 | * struct idpf_vc_xn_manager - Manager for tracking transactions |
91 | * @ring: backing and lookup for transactions |
92 | * @free_xn_bm: bitmap for free transactions |
93 | * @xn_bm_lock: make bitmap access synchronous where necessary |
94 | * @salt: used to make cookie unique every message |
95 | */ |
96 | struct idpf_vc_xn_manager { |
97 | struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN]; |
98 | DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN); |
99 | spinlock_t xn_bm_lock; |
100 | u8 salt; |
101 | }; |
102 | |
103 | /** |
104 | * idpf_vid_to_vport - Translate vport id to vport pointer |
105 | * @adapter: private data struct |
106 | * @v_id: vport id to translate |
107 | * |
108 | * Returns vport matching v_id, NULL if not found. |
109 | */ |
110 | static |
111 | struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id) |
112 | { |
113 | u16 num_max_vports = idpf_get_max_vports(adapter); |
114 | int i; |
115 | |
116 | for (i = 0; i < num_max_vports; i++) |
117 | if (adapter->vport_ids[i] == v_id) |
118 | return adapter->vports[i]; |
119 | |
120 | return NULL; |
121 | } |
122 | |
123 | /** |
124 | * idpf_handle_event_link - Handle link event message |
125 | * @adapter: private data struct |
126 | * @v2e: virtchnl event message |
127 | */ |
128 | static void idpf_handle_event_link(struct idpf_adapter *adapter, |
129 | const struct virtchnl2_event *v2e) |
130 | { |
131 | struct idpf_netdev_priv *np; |
132 | struct idpf_vport *vport; |
133 | |
134 | vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id)); |
135 | if (!vport) { |
136 | dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n" , |
137 | v2e->vport_id); |
138 | return; |
139 | } |
140 | np = netdev_priv(dev: vport->netdev); |
141 | |
142 | vport->link_speed_mbps = le32_to_cpu(v2e->link_speed); |
143 | |
144 | if (vport->link_up == v2e->link_status) |
145 | return; |
146 | |
147 | vport->link_up = v2e->link_status; |
148 | |
149 | if (np->state != __IDPF_VPORT_UP) |
150 | return; |
151 | |
152 | if (vport->link_up) { |
153 | netif_tx_start_all_queues(dev: vport->netdev); |
154 | netif_carrier_on(dev: vport->netdev); |
155 | } else { |
156 | netif_tx_stop_all_queues(dev: vport->netdev); |
157 | netif_carrier_off(dev: vport->netdev); |
158 | } |
159 | } |
160 | |
161 | /** |
162 | * idpf_recv_event_msg - Receive virtchnl event message |
163 | * @adapter: Driver specific private structure |
164 | * @ctlq_msg: message to copy from |
165 | * |
166 | * Receive virtchnl event message |
167 | */ |
168 | static void idpf_recv_event_msg(struct idpf_adapter *adapter, |
169 | struct idpf_ctlq_msg *ctlq_msg) |
170 | { |
171 | int payload_size = ctlq_msg->ctx.indirect.payload->size; |
172 | struct virtchnl2_event *v2e; |
173 | u32 event; |
174 | |
175 | if (payload_size < sizeof(*v2e)) { |
176 | dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n" , |
177 | ctlq_msg->cookie.mbx.chnl_opcode, |
178 | payload_size); |
179 | return; |
180 | } |
181 | |
182 | v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va; |
183 | event = le32_to_cpu(v2e->event); |
184 | |
185 | switch (event) { |
186 | case VIRTCHNL2_EVENT_LINK_CHANGE: |
187 | idpf_handle_event_link(adapter, v2e); |
188 | return; |
189 | default: |
190 | dev_err(&adapter->pdev->dev, |
191 | "Unknown event %d from PF\n" , event); |
192 | break; |
193 | } |
194 | } |
195 | |
196 | /** |
197 | * idpf_mb_clean - Reclaim the send mailbox queue entries |
198 | * @adapter: Driver specific private structure |
199 | * |
200 | * Reclaim the send mailbox queue entries to be used to send further messages |
201 | * |
202 | * Returns 0 on success, negative on failure |
203 | */ |
204 | static int idpf_mb_clean(struct idpf_adapter *adapter) |
205 | { |
206 | u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN; |
207 | struct idpf_ctlq_msg **q_msg; |
208 | struct idpf_dma_mem *dma_mem; |
209 | int err; |
210 | |
211 | q_msg = kcalloc(n: num_q_msg, size: sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC); |
212 | if (!q_msg) |
213 | return -ENOMEM; |
214 | |
215 | err = idpf_ctlq_clean_sq(cq: adapter->hw.asq, clean_count: &num_q_msg, msg_status: q_msg); |
216 | if (err) |
217 | goto err_kfree; |
218 | |
219 | for (i = 0; i < num_q_msg; i++) { |
220 | if (!q_msg[i]) |
221 | continue; |
222 | dma_mem = q_msg[i]->ctx.indirect.payload; |
223 | if (dma_mem) |
224 | dma_free_coherent(dev: &adapter->pdev->dev, size: dma_mem->size, |
225 | cpu_addr: dma_mem->va, dma_handle: dma_mem->pa); |
226 | kfree(objp: q_msg[i]); |
227 | kfree(objp: dma_mem); |
228 | } |
229 | |
230 | err_kfree: |
231 | kfree(objp: q_msg); |
232 | |
233 | return err; |
234 | } |
235 | |
236 | /** |
237 | * idpf_send_mb_msg - Send message over mailbox |
238 | * @adapter: Driver specific private structure |
239 | * @op: virtchnl opcode |
240 | * @msg_size: size of the payload |
241 | * @msg: pointer to buffer holding the payload |
242 | * @cookie: unique SW generated cookie per message |
243 | * |
244 | * Will prepare the control queue message and initiates the send api |
245 | * |
246 | * Returns 0 on success, negative on failure |
247 | */ |
248 | int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, |
249 | u16 msg_size, u8 *msg, u16 cookie) |
250 | { |
251 | struct idpf_ctlq_msg *ctlq_msg; |
252 | struct idpf_dma_mem *dma_mem; |
253 | int err; |
254 | |
255 | /* If we are here and a reset is detected nothing much can be |
256 | * done. This thread should silently abort and expected to |
257 | * be corrected with a new run either by user or driver |
258 | * flows after reset |
259 | */ |
260 | if (idpf_is_reset_detected(adapter)) |
261 | return 0; |
262 | |
263 | err = idpf_mb_clean(adapter); |
264 | if (err) |
265 | return err; |
266 | |
267 | ctlq_msg = kzalloc(size: sizeof(*ctlq_msg), GFP_ATOMIC); |
268 | if (!ctlq_msg) |
269 | return -ENOMEM; |
270 | |
271 | dma_mem = kzalloc(size: sizeof(*dma_mem), GFP_ATOMIC); |
272 | if (!dma_mem) { |
273 | err = -ENOMEM; |
274 | goto dma_mem_error; |
275 | } |
276 | |
277 | ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp; |
278 | ctlq_msg->func_id = 0; |
279 | ctlq_msg->data_len = msg_size; |
280 | ctlq_msg->cookie.mbx.chnl_opcode = op; |
281 | ctlq_msg->cookie.mbx.chnl_retval = 0; |
282 | dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN; |
283 | dma_mem->va = dma_alloc_coherent(dev: &adapter->pdev->dev, size: dma_mem->size, |
284 | dma_handle: &dma_mem->pa, GFP_ATOMIC); |
285 | if (!dma_mem->va) { |
286 | err = -ENOMEM; |
287 | goto dma_alloc_error; |
288 | } |
289 | |
290 | /* It's possible we're just sending an opcode but no buffer */ |
291 | if (msg && msg_size) |
292 | memcpy(dma_mem->va, msg, msg_size); |
293 | ctlq_msg->ctx.indirect.payload = dma_mem; |
294 | ctlq_msg->ctx.sw_cookie.data = cookie; |
295 | |
296 | err = idpf_ctlq_send(hw: &adapter->hw, cq: adapter->hw.asq, num_q_msg: 1, q_msg: ctlq_msg); |
297 | if (err) |
298 | goto send_error; |
299 | |
300 | return 0; |
301 | |
302 | send_error: |
303 | dma_free_coherent(dev: &adapter->pdev->dev, size: dma_mem->size, cpu_addr: dma_mem->va, |
304 | dma_handle: dma_mem->pa); |
305 | dma_alloc_error: |
306 | kfree(objp: dma_mem); |
307 | dma_mem_error: |
308 | kfree(objp: ctlq_msg); |
309 | |
310 | return err; |
311 | } |
312 | |
313 | /* API for virtchnl "transaction" support ("xn" for short). |
314 | * |
315 | * We are reusing the completion lock to serialize the accesses to the |
316 | * transaction state for simplicity, but it could be its own separate synchro |
317 | * as well. For now, this API is only used from within a workqueue context; |
318 | * raw_spin_lock() is enough. |
319 | */ |
320 | /** |
321 | * idpf_vc_xn_lock - Request exclusive access to vc transaction |
322 | * @xn: struct idpf_vc_xn* to access |
323 | */ |
324 | #define idpf_vc_xn_lock(xn) \ |
325 | raw_spin_lock(&(xn)->completed.wait.lock) |
326 | |
327 | /** |
328 | * idpf_vc_xn_unlock - Release exclusive access to vc transaction |
329 | * @xn: struct idpf_vc_xn* to access |
330 | */ |
331 | #define idpf_vc_xn_unlock(xn) \ |
332 | raw_spin_unlock(&(xn)->completed.wait.lock) |
333 | |
334 | /** |
335 | * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and |
336 | * reset the transaction state. |
337 | * @xn: struct idpf_vc_xn to update |
338 | */ |
339 | static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn) |
340 | { |
341 | xn->reply.iov_base = NULL; |
342 | xn->reply.iov_len = 0; |
343 | |
344 | if (xn->state != IDPF_VC_XN_SHUTDOWN) |
345 | xn->state = IDPF_VC_XN_IDLE; |
346 | } |
347 | |
348 | /** |
349 | * idpf_vc_xn_init - Initialize virtchnl transaction object |
350 | * @vcxn_mngr: pointer to vc transaction manager struct |
351 | */ |
352 | static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr) |
353 | { |
354 | int i; |
355 | |
356 | spin_lock_init(&vcxn_mngr->xn_bm_lock); |
357 | |
358 | for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { |
359 | struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; |
360 | |
361 | xn->state = IDPF_VC_XN_IDLE; |
362 | xn->idx = i; |
363 | idpf_vc_xn_release_bufs(xn); |
364 | init_completion(x: &xn->completed); |
365 | } |
366 | |
367 | bitmap_fill(dst: vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); |
368 | } |
369 | |
370 | /** |
371 | * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object |
372 | * @vcxn_mngr: pointer to vc transaction manager struct |
373 | * |
374 | * All waiting threads will be woken-up and their transaction aborted. Further |
375 | * operations on that object will fail. |
376 | */ |
377 | static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr) |
378 | { |
379 | int i; |
380 | |
381 | spin_lock_bh(lock: &vcxn_mngr->xn_bm_lock); |
382 | bitmap_zero(dst: vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); |
383 | spin_unlock_bh(lock: &vcxn_mngr->xn_bm_lock); |
384 | |
385 | for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { |
386 | struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; |
387 | |
388 | idpf_vc_xn_lock(xn); |
389 | xn->state = IDPF_VC_XN_SHUTDOWN; |
390 | idpf_vc_xn_release_bufs(xn); |
391 | idpf_vc_xn_unlock(xn); |
392 | complete_all(&xn->completed); |
393 | } |
394 | } |
395 | |
396 | /** |
397 | * idpf_vc_xn_pop_free - Pop a free transaction from free list |
398 | * @vcxn_mngr: transaction manager to pop from |
399 | * |
400 | * Returns NULL if no free transactions |
401 | */ |
402 | static |
403 | struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr) |
404 | { |
405 | struct idpf_vc_xn *xn = NULL; |
406 | unsigned long free_idx; |
407 | |
408 | spin_lock_bh(lock: &vcxn_mngr->xn_bm_lock); |
409 | free_idx = find_first_bit(addr: vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); |
410 | if (free_idx == IDPF_VC_XN_RING_LEN) |
411 | goto do_unlock; |
412 | |
413 | clear_bit(nr: free_idx, addr: vcxn_mngr->free_xn_bm); |
414 | xn = &vcxn_mngr->ring[free_idx]; |
415 | xn->salt = vcxn_mngr->salt++; |
416 | |
417 | do_unlock: |
418 | spin_unlock_bh(lock: &vcxn_mngr->xn_bm_lock); |
419 | |
420 | return xn; |
421 | } |
422 | |
423 | /** |
424 | * idpf_vc_xn_push_free - Push a free transaction to free list |
425 | * @vcxn_mngr: transaction manager to push to |
426 | * @xn: transaction to push |
427 | */ |
428 | static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr, |
429 | struct idpf_vc_xn *xn) |
430 | { |
431 | idpf_vc_xn_release_bufs(xn); |
432 | set_bit(nr: xn->idx, addr: vcxn_mngr->free_xn_bm); |
433 | } |
434 | |
435 | /** |
436 | * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction |
437 | * @adapter: driver specific private structure with vcxn_mngr |
438 | * @params: parameters for this particular transaction including |
439 | * -vc_op: virtchannel operation to send |
440 | * -send_buf: kvec iov for send buf and len |
441 | * -recv_buf: kvec iov for recv buf and len (ignored if NULL) |
442 | * -timeout_ms: timeout waiting for a reply (milliseconds) |
443 | * -async: don't wait for message reply, will lose caller context |
444 | * -async_handler: callback to handle async replies |
445 | * |
446 | * @returns >= 0 for success, the size of the initial reply (may or may not be |
447 | * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for |
448 | * error. |
449 | */ |
450 | static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, |
451 | const struct idpf_vc_xn_params *params) |
452 | { |
453 | const struct kvec *send_buf = ¶ms->send_buf; |
454 | struct idpf_vc_xn *xn; |
455 | ssize_t retval; |
456 | u16 cookie; |
457 | |
458 | xn = idpf_vc_xn_pop_free(vcxn_mngr: adapter->vcxn_mngr); |
459 | /* no free transactions available */ |
460 | if (!xn) |
461 | return -ENOSPC; |
462 | |
463 | idpf_vc_xn_lock(xn); |
464 | if (xn->state == IDPF_VC_XN_SHUTDOWN) { |
465 | retval = -ENXIO; |
466 | goto only_unlock; |
467 | } else if (xn->state != IDPF_VC_XN_IDLE) { |
468 | /* We're just going to clobber this transaction even though |
469 | * it's not IDLE. If we don't reuse it we could theoretically |
470 | * eventually leak all the free transactions and not be able to |
471 | * send any messages. At least this way we make an attempt to |
472 | * remain functional even though something really bad is |
473 | * happening that's corrupting what was supposed to be free |
474 | * transactions. |
475 | */ |
476 | WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n" , |
477 | xn->idx, xn->vc_op); |
478 | } |
479 | |
480 | xn->reply = params->recv_buf; |
481 | xn->reply_sz = 0; |
482 | xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING; |
483 | xn->vc_op = params->vc_op; |
484 | xn->async_handler = params->async_handler; |
485 | idpf_vc_xn_unlock(xn); |
486 | |
487 | if (!params->async) |
488 | reinit_completion(x: &xn->completed); |
489 | cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) | |
490 | FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx); |
491 | |
492 | retval = idpf_send_mb_msg(adapter, op: params->vc_op, |
493 | msg_size: send_buf->iov_len, msg: send_buf->iov_base, |
494 | cookie); |
495 | if (retval) { |
496 | idpf_vc_xn_lock(xn); |
497 | goto release_and_unlock; |
498 | } |
499 | |
500 | if (params->async) |
501 | return 0; |
502 | |
503 | wait_for_completion_timeout(x: &xn->completed, |
504 | timeout: msecs_to_jiffies(m: params->timeout_ms)); |
505 | |
506 | /* No need to check the return value; we check the final state of the |
507 | * transaction below. It's possible the transaction actually gets more |
508 | * timeout than specified if we get preempted here but after |
509 | * wait_for_completion_timeout returns. This should be non-issue |
510 | * however. |
511 | */ |
512 | idpf_vc_xn_lock(xn); |
513 | switch (xn->state) { |
514 | case IDPF_VC_XN_SHUTDOWN: |
515 | retval = -ENXIO; |
516 | goto only_unlock; |
517 | case IDPF_VC_XN_WAITING: |
518 | dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n" , |
519 | params->vc_op, params->timeout_ms); |
520 | retval = -ETIME; |
521 | break; |
522 | case IDPF_VC_XN_COMPLETED_SUCCESS: |
523 | retval = xn->reply_sz; |
524 | break; |
525 | case IDPF_VC_XN_COMPLETED_FAILED: |
526 | dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n" , |
527 | params->vc_op); |
528 | retval = -EIO; |
529 | break; |
530 | default: |
531 | /* Invalid state. */ |
532 | WARN_ON_ONCE(1); |
533 | retval = -EIO; |
534 | break; |
535 | } |
536 | |
537 | release_and_unlock: |
538 | idpf_vc_xn_push_free(vcxn_mngr: adapter->vcxn_mngr, xn); |
539 | /* If we receive a VC reply after here, it will be dropped. */ |
540 | only_unlock: |
541 | idpf_vc_xn_unlock(xn); |
542 | |
543 | return retval; |
544 | } |
545 | |
546 | /** |
547 | * idpf_vc_xn_forward_async - Handle async reply receives |
548 | * @adapter: private data struct |
549 | * @xn: transaction to handle |
550 | * @ctlq_msg: corresponding ctlq_msg |
551 | * |
552 | * For async sends we're going to lose the caller's context so, if an |
553 | * async_handler was provided, it can deal with the reply, otherwise we'll just |
554 | * check and report if there is an error. |
555 | */ |
556 | static int |
557 | idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn, |
558 | const struct idpf_ctlq_msg *ctlq_msg) |
559 | { |
560 | int err = 0; |
561 | |
562 | if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { |
563 | dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n" , |
564 | ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); |
565 | xn->reply_sz = 0; |
566 | err = -EINVAL; |
567 | goto release_bufs; |
568 | } |
569 | |
570 | if (xn->async_handler) { |
571 | err = xn->async_handler(adapter, xn, ctlq_msg); |
572 | goto release_bufs; |
573 | } |
574 | |
575 | if (ctlq_msg->cookie.mbx.chnl_retval) { |
576 | xn->reply_sz = 0; |
577 | dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n" , |
578 | ctlq_msg->cookie.mbx.chnl_opcode); |
579 | err = -EINVAL; |
580 | } |
581 | |
582 | release_bufs: |
583 | idpf_vc_xn_push_free(vcxn_mngr: adapter->vcxn_mngr, xn); |
584 | |
585 | return err; |
586 | } |
587 | |
588 | /** |
589 | * idpf_vc_xn_forward_reply - copy a reply back to receiving thread |
590 | * @adapter: driver specific private structure with vcxn_mngr |
591 | * @ctlq_msg: controlq message to send back to receiving thread |
592 | */ |
593 | static int |
594 | idpf_vc_xn_forward_reply(struct idpf_adapter *adapter, |
595 | const struct idpf_ctlq_msg *ctlq_msg) |
596 | { |
597 | const void *payload = NULL; |
598 | size_t payload_size = 0; |
599 | struct idpf_vc_xn *xn; |
600 | u16 msg_info; |
601 | int err = 0; |
602 | u16 xn_idx; |
603 | u16 salt; |
604 | |
605 | msg_info = ctlq_msg->ctx.sw_cookie.data; |
606 | xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info); |
607 | if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) { |
608 | dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n" , |
609 | xn_idx); |
610 | return -EINVAL; |
611 | } |
612 | xn = &adapter->vcxn_mngr->ring[xn_idx]; |
613 | salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info); |
614 | if (xn->salt != salt) { |
615 | dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n" , |
616 | xn->salt, salt); |
617 | return -EINVAL; |
618 | } |
619 | |
620 | idpf_vc_xn_lock(xn); |
621 | switch (xn->state) { |
622 | case IDPF_VC_XN_WAITING: |
623 | /* success */ |
624 | break; |
625 | case IDPF_VC_XN_IDLE: |
626 | dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n" , |
627 | ctlq_msg->cookie.mbx.chnl_opcode); |
628 | err = -EINVAL; |
629 | goto out_unlock; |
630 | case IDPF_VC_XN_SHUTDOWN: |
631 | /* ENXIO is a bit special here as the recv msg loop uses that |
632 | * know if it should stop trying to clean the ring if we lost |
633 | * the virtchnl. We need to stop playing with registers and |
634 | * yield. |
635 | */ |
636 | err = -ENXIO; |
637 | goto out_unlock; |
638 | case IDPF_VC_XN_ASYNC: |
639 | err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg); |
640 | idpf_vc_xn_unlock(xn); |
641 | return err; |
642 | default: |
643 | dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n" , |
644 | ctlq_msg->cookie.mbx.chnl_opcode); |
645 | err = -EBUSY; |
646 | goto out_unlock; |
647 | } |
648 | |
649 | if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { |
650 | dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n" , |
651 | ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); |
652 | xn->reply_sz = 0; |
653 | xn->state = IDPF_VC_XN_COMPLETED_FAILED; |
654 | err = -EINVAL; |
655 | goto out_unlock; |
656 | } |
657 | |
658 | if (ctlq_msg->cookie.mbx.chnl_retval) { |
659 | xn->reply_sz = 0; |
660 | xn->state = IDPF_VC_XN_COMPLETED_FAILED; |
661 | err = -EINVAL; |
662 | goto out_unlock; |
663 | } |
664 | |
665 | if (ctlq_msg->data_len) { |
666 | payload = ctlq_msg->ctx.indirect.payload->va; |
667 | payload_size = ctlq_msg->ctx.indirect.payload->size; |
668 | } |
669 | |
670 | xn->reply_sz = payload_size; |
671 | xn->state = IDPF_VC_XN_COMPLETED_SUCCESS; |
672 | |
673 | if (xn->reply.iov_base && xn->reply.iov_len && payload_size) |
674 | memcpy(xn->reply.iov_base, payload, |
675 | min_t(size_t, xn->reply.iov_len, payload_size)); |
676 | |
677 | out_unlock: |
678 | idpf_vc_xn_unlock(xn); |
679 | /* we _cannot_ hold lock while calling complete */ |
680 | complete(&xn->completed); |
681 | |
682 | return err; |
683 | } |
684 | |
685 | /** |
686 | * idpf_recv_mb_msg - Receive message over mailbox |
687 | * @adapter: Driver specific private structure |
688 | * |
689 | * Will receive control queue message and posts the receive buffer. Returns 0 |
690 | * on success and negative on failure. |
691 | */ |
692 | int idpf_recv_mb_msg(struct idpf_adapter *adapter) |
693 | { |
694 | struct idpf_ctlq_msg ctlq_msg; |
695 | struct idpf_dma_mem *dma_mem; |
696 | int post_err, err; |
697 | u16 num_recv; |
698 | |
699 | while (1) { |
700 | /* This will get <= num_recv messages and output how many |
701 | * actually received on num_recv. |
702 | */ |
703 | num_recv = 1; |
704 | err = idpf_ctlq_recv(cq: adapter->hw.arq, num_q_msg: &num_recv, q_msg: &ctlq_msg); |
705 | if (err || !num_recv) |
706 | break; |
707 | |
708 | if (ctlq_msg.data_len) { |
709 | dma_mem = ctlq_msg.ctx.indirect.payload; |
710 | } else { |
711 | dma_mem = NULL; |
712 | num_recv = 0; |
713 | } |
714 | |
715 | if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT) |
716 | idpf_recv_event_msg(adapter, ctlq_msg: &ctlq_msg); |
717 | else |
718 | err = idpf_vc_xn_forward_reply(adapter, ctlq_msg: &ctlq_msg); |
719 | |
720 | post_err = idpf_ctlq_post_rx_buffs(hw: &adapter->hw, |
721 | cq: adapter->hw.arq, |
722 | buff_count: &num_recv, buffs: &dma_mem); |
723 | |
724 | /* If post failed clear the only buffer we supplied */ |
725 | if (post_err) { |
726 | if (dma_mem) |
727 | dmam_free_coherent(dev: &adapter->pdev->dev, |
728 | size: dma_mem->size, vaddr: dma_mem->va, |
729 | dma_handle: dma_mem->pa); |
730 | break; |
731 | } |
732 | |
733 | /* virtchnl trying to shutdown, stop cleaning */ |
734 | if (err == -ENXIO) |
735 | break; |
736 | } |
737 | |
738 | return err; |
739 | } |
740 | |
741 | /** |
742 | * idpf_wait_for_marker_event - wait for software marker response |
743 | * @vport: virtual port data structure |
744 | * |
745 | * Returns 0 success, negative on failure. |
746 | **/ |
747 | static int idpf_wait_for_marker_event(struct idpf_vport *vport) |
748 | { |
749 | int event; |
750 | int i; |
751 | |
752 | for (i = 0; i < vport->num_txq; i++) |
753 | set_bit(nr: __IDPF_Q_SW_MARKER, addr: vport->txqs[i]->flags); |
754 | |
755 | event = wait_event_timeout(vport->sw_marker_wq, |
756 | test_and_clear_bit(IDPF_VPORT_SW_MARKER, |
757 | vport->flags), |
758 | msecs_to_jiffies(500)); |
759 | |
760 | for (i = 0; i < vport->num_txq; i++) |
761 | clear_bit(nr: __IDPF_Q_POLL_MODE, addr: vport->txqs[i]->flags); |
762 | |
763 | if (event) |
764 | return 0; |
765 | |
766 | dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n" ); |
767 | |
768 | return -ETIMEDOUT; |
769 | } |
770 | |
771 | /** |
772 | * idpf_send_ver_msg - send virtchnl version message |
773 | * @adapter: Driver specific private structure |
774 | * |
775 | * Send virtchnl version message. Returns 0 on success, negative on failure. |
776 | */ |
777 | static int idpf_send_ver_msg(struct idpf_adapter *adapter) |
778 | { |
779 | struct idpf_vc_xn_params xn_params = {}; |
780 | struct virtchnl2_version_info vvi; |
781 | ssize_t reply_sz; |
782 | u32 major, minor; |
783 | int err = 0; |
784 | |
785 | if (adapter->virt_ver_maj) { |
786 | vvi.major = cpu_to_le32(adapter->virt_ver_maj); |
787 | vvi.minor = cpu_to_le32(adapter->virt_ver_min); |
788 | } else { |
789 | vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR); |
790 | vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR); |
791 | } |
792 | |
793 | xn_params.vc_op = VIRTCHNL2_OP_VERSION; |
794 | xn_params.send_buf.iov_base = &vvi; |
795 | xn_params.send_buf.iov_len = sizeof(vvi); |
796 | xn_params.recv_buf = xn_params.send_buf; |
797 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
798 | |
799 | reply_sz = idpf_vc_xn_exec(adapter, params: &xn_params); |
800 | if (reply_sz < 0) |
801 | return reply_sz; |
802 | if (reply_sz < sizeof(vvi)) |
803 | return -EIO; |
804 | |
805 | major = le32_to_cpu(vvi.major); |
806 | minor = le32_to_cpu(vvi.minor); |
807 | |
808 | if (major > IDPF_VIRTCHNL_VERSION_MAJOR) { |
809 | dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n" ); |
810 | return -EINVAL; |
811 | } |
812 | |
813 | if (major == IDPF_VIRTCHNL_VERSION_MAJOR && |
814 | minor > IDPF_VIRTCHNL_VERSION_MINOR) |
815 | dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n" ); |
816 | |
817 | /* If we have a mismatch, resend version to update receiver on what |
818 | * version we will use. |
819 | */ |
820 | if (!adapter->virt_ver_maj && |
821 | major != IDPF_VIRTCHNL_VERSION_MAJOR && |
822 | minor != IDPF_VIRTCHNL_VERSION_MINOR) |
823 | err = -EAGAIN; |
824 | |
825 | adapter->virt_ver_maj = major; |
826 | adapter->virt_ver_min = minor; |
827 | |
828 | return err; |
829 | } |
830 | |
831 | /** |
832 | * idpf_send_get_caps_msg - Send virtchnl get capabilities message |
833 | * @adapter: Driver specific private structure |
834 | * |
835 | * Send virtchl get capabilities message. Returns 0 on success, negative on |
836 | * failure. |
837 | */ |
838 | static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) |
839 | { |
840 | struct virtchnl2_get_capabilities caps = {}; |
841 | struct idpf_vc_xn_params xn_params = {}; |
842 | ssize_t reply_sz; |
843 | |
844 | caps.csum_caps = |
845 | cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | |
846 | VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP | |
847 | VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP | |
848 | VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP | |
849 | VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP | |
850 | VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP | |
851 | VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP | |
852 | VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 | |
853 | VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP | |
854 | VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP | |
855 | VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP | |
856 | VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP | |
857 | VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP | |
858 | VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP | |
859 | VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL | |
860 | VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL | |
861 | VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL | |
862 | VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL | |
863 | VIRTCHNL2_CAP_RX_CSUM_GENERIC); |
864 | |
865 | caps.seg_caps = |
866 | cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP | |
867 | VIRTCHNL2_CAP_SEG_IPV4_UDP | |
868 | VIRTCHNL2_CAP_SEG_IPV4_SCTP | |
869 | VIRTCHNL2_CAP_SEG_IPV6_TCP | |
870 | VIRTCHNL2_CAP_SEG_IPV6_UDP | |
871 | VIRTCHNL2_CAP_SEG_IPV6_SCTP | |
872 | VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL); |
873 | |
874 | caps.rss_caps = |
875 | cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP | |
876 | VIRTCHNL2_CAP_RSS_IPV4_UDP | |
877 | VIRTCHNL2_CAP_RSS_IPV4_SCTP | |
878 | VIRTCHNL2_CAP_RSS_IPV4_OTHER | |
879 | VIRTCHNL2_CAP_RSS_IPV6_TCP | |
880 | VIRTCHNL2_CAP_RSS_IPV6_UDP | |
881 | VIRTCHNL2_CAP_RSS_IPV6_SCTP | |
882 | VIRTCHNL2_CAP_RSS_IPV6_OTHER); |
883 | |
884 | caps.hsplit_caps = |
885 | cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 | |
886 | VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6); |
887 | |
888 | caps.rsc_caps = |
889 | cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP | |
890 | VIRTCHNL2_CAP_RSC_IPV6_TCP); |
891 | |
892 | caps.other_caps = |
893 | cpu_to_le64(VIRTCHNL2_CAP_SRIOV | |
894 | VIRTCHNL2_CAP_MACFILTER | |
895 | VIRTCHNL2_CAP_SPLITQ_QSCHED | |
896 | VIRTCHNL2_CAP_PROMISC | |
897 | VIRTCHNL2_CAP_LOOPBACK); |
898 | |
899 | xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS; |
900 | xn_params.send_buf.iov_base = ∩︀ |
901 | xn_params.send_buf.iov_len = sizeof(caps); |
902 | xn_params.recv_buf.iov_base = &adapter->caps; |
903 | xn_params.recv_buf.iov_len = sizeof(adapter->caps); |
904 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
905 | |
906 | reply_sz = idpf_vc_xn_exec(adapter, params: &xn_params); |
907 | if (reply_sz < 0) |
908 | return reply_sz; |
909 | if (reply_sz < sizeof(adapter->caps)) |
910 | return -EIO; |
911 | |
912 | return 0; |
913 | } |
914 | |
915 | /** |
916 | * idpf_vport_alloc_max_qs - Allocate max queues for a vport |
917 | * @adapter: Driver specific private structure |
918 | * @max_q: vport max queue structure |
919 | */ |
920 | int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter, |
921 | struct idpf_vport_max_q *max_q) |
922 | { |
923 | struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; |
924 | struct virtchnl2_get_capabilities *caps = &adapter->caps; |
925 | u16 default_vports = idpf_get_default_vports(adapter); |
926 | int max_rx_q, max_tx_q; |
927 | |
928 | mutex_lock(&adapter->queue_lock); |
929 | |
930 | max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports; |
931 | max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports; |
932 | if (adapter->num_alloc_vports < default_vports) { |
933 | max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q); |
934 | max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q); |
935 | } else { |
936 | max_q->max_rxq = IDPF_MIN_Q; |
937 | max_q->max_txq = IDPF_MIN_Q; |
938 | } |
939 | max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP; |
940 | max_q->max_complq = max_q->max_txq; |
941 | |
942 | if (avail_queues->avail_rxq < max_q->max_rxq || |
943 | avail_queues->avail_txq < max_q->max_txq || |
944 | avail_queues->avail_bufq < max_q->max_bufq || |
945 | avail_queues->avail_complq < max_q->max_complq) { |
946 | mutex_unlock(lock: &adapter->queue_lock); |
947 | |
948 | return -EINVAL; |
949 | } |
950 | |
951 | avail_queues->avail_rxq -= max_q->max_rxq; |
952 | avail_queues->avail_txq -= max_q->max_txq; |
953 | avail_queues->avail_bufq -= max_q->max_bufq; |
954 | avail_queues->avail_complq -= max_q->max_complq; |
955 | |
956 | mutex_unlock(lock: &adapter->queue_lock); |
957 | |
958 | return 0; |
959 | } |
960 | |
961 | /** |
962 | * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport |
963 | * @adapter: Driver specific private structure |
964 | * @max_q: vport max queue structure |
965 | */ |
966 | void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter, |
967 | struct idpf_vport_max_q *max_q) |
968 | { |
969 | struct idpf_avail_queue_info *avail_queues; |
970 | |
971 | mutex_lock(&adapter->queue_lock); |
972 | avail_queues = &adapter->avail_queues; |
973 | |
974 | avail_queues->avail_rxq += max_q->max_rxq; |
975 | avail_queues->avail_txq += max_q->max_txq; |
976 | avail_queues->avail_bufq += max_q->max_bufq; |
977 | avail_queues->avail_complq += max_q->max_complq; |
978 | |
979 | mutex_unlock(lock: &adapter->queue_lock); |
980 | } |
981 | |
982 | /** |
983 | * idpf_init_avail_queues - Initialize available queues on the device |
984 | * @adapter: Driver specific private structure |
985 | */ |
986 | static void idpf_init_avail_queues(struct idpf_adapter *adapter) |
987 | { |
988 | struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; |
989 | struct virtchnl2_get_capabilities *caps = &adapter->caps; |
990 | |
991 | avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q); |
992 | avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q); |
993 | avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq); |
994 | avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq); |
995 | } |
996 | |
997 | /** |
998 | * idpf_get_reg_intr_vecs - Get vector queue register offset |
999 | * @vport: virtual port structure |
1000 | * @reg_vals: Register offsets to store in |
1001 | * |
1002 | * Returns number of registers that got populated |
1003 | */ |
1004 | int idpf_get_reg_intr_vecs(struct idpf_vport *vport, |
1005 | struct idpf_vec_regs *reg_vals) |
1006 | { |
1007 | struct virtchnl2_vector_chunks *chunks; |
1008 | struct idpf_vec_regs reg_val; |
1009 | u16 num_vchunks, num_vec; |
1010 | int num_regs = 0, i, j; |
1011 | |
1012 | chunks = &vport->adapter->req_vec_chunks->vchunks; |
1013 | num_vchunks = le16_to_cpu(chunks->num_vchunks); |
1014 | |
1015 | for (j = 0; j < num_vchunks; j++) { |
1016 | struct virtchnl2_vector_chunk *chunk; |
1017 | u32 dynctl_reg_spacing; |
1018 | u32 itrn_reg_spacing; |
1019 | |
1020 | chunk = &chunks->vchunks[j]; |
1021 | num_vec = le16_to_cpu(chunk->num_vectors); |
1022 | reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start); |
1023 | reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start); |
1024 | reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing); |
1025 | |
1026 | dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing); |
1027 | itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing); |
1028 | |
1029 | for (i = 0; i < num_vec; i++) { |
1030 | reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg; |
1031 | reg_vals[num_regs].itrn_reg = reg_val.itrn_reg; |
1032 | reg_vals[num_regs].itrn_index_spacing = |
1033 | reg_val.itrn_index_spacing; |
1034 | |
1035 | reg_val.dyn_ctl_reg += dynctl_reg_spacing; |
1036 | reg_val.itrn_reg += itrn_reg_spacing; |
1037 | num_regs++; |
1038 | } |
1039 | } |
1040 | |
1041 | return num_regs; |
1042 | } |
1043 | |
1044 | /** |
1045 | * idpf_vport_get_q_reg - Get the queue registers for the vport |
1046 | * @reg_vals: register values needing to be set |
1047 | * @num_regs: amount we expect to fill |
1048 | * @q_type: queue model |
1049 | * @chunks: queue regs received over mailbox |
1050 | * |
1051 | * This function parses the queue register offsets from the queue register |
1052 | * chunk information, with a specific queue type and stores it into the array |
1053 | * passed as an argument. It returns the actual number of queue registers that |
1054 | * are filled. |
1055 | */ |
1056 | static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type, |
1057 | struct virtchnl2_queue_reg_chunks *chunks) |
1058 | { |
1059 | u16 num_chunks = le16_to_cpu(chunks->num_chunks); |
1060 | int reg_filled = 0, i; |
1061 | u32 reg_val; |
1062 | |
1063 | while (num_chunks--) { |
1064 | struct virtchnl2_queue_reg_chunk *chunk; |
1065 | u16 num_q; |
1066 | |
1067 | chunk = &chunks->chunks[num_chunks]; |
1068 | if (le32_to_cpu(chunk->type) != q_type) |
1069 | continue; |
1070 | |
1071 | num_q = le32_to_cpu(chunk->num_queues); |
1072 | reg_val = le64_to_cpu(chunk->qtail_reg_start); |
1073 | for (i = 0; i < num_q && reg_filled < num_regs ; i++) { |
1074 | reg_vals[reg_filled++] = reg_val; |
1075 | reg_val += le32_to_cpu(chunk->qtail_reg_spacing); |
1076 | } |
1077 | } |
1078 | |
1079 | return reg_filled; |
1080 | } |
1081 | |
1082 | /** |
1083 | * __idpf_queue_reg_init - initialize queue registers |
1084 | * @vport: virtual port structure |
1085 | * @reg_vals: registers we are initializing |
1086 | * @num_regs: how many registers there are in total |
1087 | * @q_type: queue model |
1088 | * |
1089 | * Return number of queues that are initialized |
1090 | */ |
1091 | static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, |
1092 | int num_regs, u32 q_type) |
1093 | { |
1094 | struct idpf_adapter *adapter = vport->adapter; |
1095 | struct idpf_queue *q; |
1096 | int i, j, k = 0; |
1097 | |
1098 | switch (q_type) { |
1099 | case VIRTCHNL2_QUEUE_TYPE_TX: |
1100 | for (i = 0; i < vport->num_txq_grp; i++) { |
1101 | struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; |
1102 | |
1103 | for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++) |
1104 | tx_qgrp->txqs[j]->tail = |
1105 | idpf_get_reg_addr(adapter, reg_offset: reg_vals[k]); |
1106 | } |
1107 | break; |
1108 | case VIRTCHNL2_QUEUE_TYPE_RX: |
1109 | for (i = 0; i < vport->num_rxq_grp; i++) { |
1110 | struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; |
1111 | u16 num_rxq = rx_qgrp->singleq.num_rxq; |
1112 | |
1113 | for (j = 0; j < num_rxq && k < num_regs; j++, k++) { |
1114 | q = rx_qgrp->singleq.rxqs[j]; |
1115 | q->tail = idpf_get_reg_addr(adapter, |
1116 | reg_offset: reg_vals[k]); |
1117 | } |
1118 | } |
1119 | break; |
1120 | case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: |
1121 | for (i = 0; i < vport->num_rxq_grp; i++) { |
1122 | struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; |
1123 | u8 num_bufqs = vport->num_bufqs_per_qgrp; |
1124 | |
1125 | for (j = 0; j < num_bufqs && k < num_regs; j++, k++) { |
1126 | q = &rx_qgrp->splitq.bufq_sets[j].bufq; |
1127 | q->tail = idpf_get_reg_addr(adapter, |
1128 | reg_offset: reg_vals[k]); |
1129 | } |
1130 | } |
1131 | break; |
1132 | default: |
1133 | break; |
1134 | } |
1135 | |
1136 | return k; |
1137 | } |
1138 | |
1139 | /** |
1140 | * idpf_queue_reg_init - initialize queue registers |
1141 | * @vport: virtual port structure |
1142 | * |
1143 | * Return 0 on success, negative on failure |
1144 | */ |
1145 | int idpf_queue_reg_init(struct idpf_vport *vport) |
1146 | { |
1147 | struct virtchnl2_create_vport *vport_params; |
1148 | struct virtchnl2_queue_reg_chunks *chunks; |
1149 | struct idpf_vport_config *vport_config; |
1150 | u16 vport_idx = vport->idx; |
1151 | int num_regs, ret = 0; |
1152 | u32 *reg_vals; |
1153 | |
1154 | /* We may never deal with more than 256 same type of queues */ |
1155 | reg_vals = kzalloc(size: sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL); |
1156 | if (!reg_vals) |
1157 | return -ENOMEM; |
1158 | |
1159 | vport_config = vport->adapter->vport_config[vport_idx]; |
1160 | if (vport_config->req_qs_chunks) { |
1161 | struct virtchnl2_add_queues *vc_aq = |
1162 | (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; |
1163 | chunks = &vc_aq->chunks; |
1164 | } else { |
1165 | vport_params = vport->adapter->vport_params_recvd[vport_idx]; |
1166 | chunks = &vport_params->chunks; |
1167 | } |
1168 | |
1169 | /* Initialize Tx queue tail register address */ |
1170 | num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, |
1171 | q_type: VIRTCHNL2_QUEUE_TYPE_TX, |
1172 | chunks); |
1173 | if (num_regs < vport->num_txq) { |
1174 | ret = -EINVAL; |
1175 | goto free_reg_vals; |
1176 | } |
1177 | |
1178 | num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, |
1179 | q_type: VIRTCHNL2_QUEUE_TYPE_TX); |
1180 | if (num_regs < vport->num_txq) { |
1181 | ret = -EINVAL; |
1182 | goto free_reg_vals; |
1183 | } |
1184 | |
1185 | /* Initialize Rx/buffer queue tail register address based on Rx queue |
1186 | * model |
1187 | */ |
1188 | if (idpf_is_queue_model_split(q_model: vport->rxq_model)) { |
1189 | num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, |
1190 | q_type: VIRTCHNL2_QUEUE_TYPE_RX_BUFFER, |
1191 | chunks); |
1192 | if (num_regs < vport->num_bufq) { |
1193 | ret = -EINVAL; |
1194 | goto free_reg_vals; |
1195 | } |
1196 | |
1197 | num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, |
1198 | q_type: VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); |
1199 | if (num_regs < vport->num_bufq) { |
1200 | ret = -EINVAL; |
1201 | goto free_reg_vals; |
1202 | } |
1203 | } else { |
1204 | num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, |
1205 | q_type: VIRTCHNL2_QUEUE_TYPE_RX, |
1206 | chunks); |
1207 | if (num_regs < vport->num_rxq) { |
1208 | ret = -EINVAL; |
1209 | goto free_reg_vals; |
1210 | } |
1211 | |
1212 | num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, |
1213 | q_type: VIRTCHNL2_QUEUE_TYPE_RX); |
1214 | if (num_regs < vport->num_rxq) { |
1215 | ret = -EINVAL; |
1216 | goto free_reg_vals; |
1217 | } |
1218 | } |
1219 | |
1220 | free_reg_vals: |
1221 | kfree(objp: reg_vals); |
1222 | |
1223 | return ret; |
1224 | } |
1225 | |
1226 | /** |
1227 | * idpf_send_create_vport_msg - Send virtchnl create vport message |
1228 | * @adapter: Driver specific private structure |
1229 | * @max_q: vport max queue info |
1230 | * |
1231 | * send virtchnl creae vport message |
1232 | * |
1233 | * Returns 0 on success, negative on failure |
1234 | */ |
1235 | int idpf_send_create_vport_msg(struct idpf_adapter *adapter, |
1236 | struct idpf_vport_max_q *max_q) |
1237 | { |
1238 | struct virtchnl2_create_vport *vport_msg; |
1239 | struct idpf_vc_xn_params xn_params = {}; |
1240 | u16 idx = adapter->next_vport; |
1241 | int err, buf_size; |
1242 | ssize_t reply_sz; |
1243 | |
1244 | buf_size = sizeof(struct virtchnl2_create_vport); |
1245 | if (!adapter->vport_params_reqd[idx]) { |
1246 | adapter->vport_params_reqd[idx] = kzalloc(size: buf_size, |
1247 | GFP_KERNEL); |
1248 | if (!adapter->vport_params_reqd[idx]) |
1249 | return -ENOMEM; |
1250 | } |
1251 | |
1252 | vport_msg = adapter->vport_params_reqd[idx]; |
1253 | vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT); |
1254 | vport_msg->vport_index = cpu_to_le16(idx); |
1255 | |
1256 | if (adapter->req_tx_splitq) |
1257 | vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); |
1258 | else |
1259 | vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); |
1260 | |
1261 | if (adapter->req_rx_splitq) |
1262 | vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); |
1263 | else |
1264 | vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); |
1265 | |
1266 | err = idpf_vport_calc_total_qs(adapter, vport_index: idx, vport_msg, max_q); |
1267 | if (err) { |
1268 | dev_err(&adapter->pdev->dev, "Enough queues are not available" ); |
1269 | |
1270 | return err; |
1271 | } |
1272 | |
1273 | if (!adapter->vport_params_recvd[idx]) { |
1274 | adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, |
1275 | GFP_KERNEL); |
1276 | if (!adapter->vport_params_recvd[idx]) { |
1277 | err = -ENOMEM; |
1278 | goto free_vport_params; |
1279 | } |
1280 | } |
1281 | |
1282 | xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT; |
1283 | xn_params.send_buf.iov_base = vport_msg; |
1284 | xn_params.send_buf.iov_len = buf_size; |
1285 | xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx]; |
1286 | xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; |
1287 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
1288 | reply_sz = idpf_vc_xn_exec(adapter, params: &xn_params); |
1289 | if (reply_sz < 0) { |
1290 | err = reply_sz; |
1291 | goto free_vport_params; |
1292 | } |
1293 | if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) { |
1294 | err = -EIO; |
1295 | goto free_vport_params; |
1296 | } |
1297 | |
1298 | return 0; |
1299 | |
1300 | free_vport_params: |
1301 | kfree(objp: adapter->vport_params_recvd[idx]); |
1302 | adapter->vport_params_recvd[idx] = NULL; |
1303 | kfree(objp: adapter->vport_params_reqd[idx]); |
1304 | adapter->vport_params_reqd[idx] = NULL; |
1305 | |
1306 | return err; |
1307 | } |
1308 | |
1309 | /** |
1310 | * idpf_check_supported_desc_ids - Verify we have required descriptor support |
1311 | * @vport: virtual port structure |
1312 | * |
1313 | * Return 0 on success, error on failure |
1314 | */ |
1315 | int idpf_check_supported_desc_ids(struct idpf_vport *vport) |
1316 | { |
1317 | struct idpf_adapter *adapter = vport->adapter; |
1318 | struct virtchnl2_create_vport *vport_msg; |
1319 | u64 rx_desc_ids, tx_desc_ids; |
1320 | |
1321 | vport_msg = adapter->vport_params_recvd[vport->idx]; |
1322 | |
1323 | rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); |
1324 | tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); |
1325 | |
1326 | if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { |
1327 | if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) { |
1328 | dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n" ); |
1329 | vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); |
1330 | } |
1331 | } else { |
1332 | if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M)) |
1333 | vport->base_rxd = true; |
1334 | } |
1335 | |
1336 | if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT) |
1337 | return 0; |
1338 | |
1339 | if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) { |
1340 | dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n" ); |
1341 | vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID); |
1342 | } |
1343 | |
1344 | return 0; |
1345 | } |
1346 | |
1347 | /** |
1348 | * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message |
1349 | * @vport: virtual port data structure |
1350 | * |
1351 | * Send virtchnl destroy vport message. Returns 0 on success, negative on |
1352 | * failure. |
1353 | */ |
1354 | int idpf_send_destroy_vport_msg(struct idpf_vport *vport) |
1355 | { |
1356 | struct idpf_vc_xn_params xn_params = {}; |
1357 | struct virtchnl2_vport v_id; |
1358 | ssize_t reply_sz; |
1359 | |
1360 | v_id.vport_id = cpu_to_le32(vport->vport_id); |
1361 | |
1362 | xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT; |
1363 | xn_params.send_buf.iov_base = &v_id; |
1364 | xn_params.send_buf.iov_len = sizeof(v_id); |
1365 | xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; |
1366 | reply_sz = idpf_vc_xn_exec(adapter: vport->adapter, params: &xn_params); |
1367 | |
1368 | return reply_sz < 0 ? reply_sz : 0; |
1369 | } |
1370 | |
1371 | /** |
1372 | * idpf_send_enable_vport_msg - Send virtchnl enable vport message |
1373 | * @vport: virtual port data structure |
1374 | * |
1375 | * Send enable vport virtchnl message. Returns 0 on success, negative on |
1376 | * failure. |
1377 | */ |
1378 | int idpf_send_enable_vport_msg(struct idpf_vport *vport) |
1379 | { |
1380 | struct idpf_vc_xn_params xn_params = {}; |
1381 | struct virtchnl2_vport v_id; |
1382 | ssize_t reply_sz; |
1383 | |
1384 | v_id.vport_id = cpu_to_le32(vport->vport_id); |
1385 | |
1386 | xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT; |
1387 | xn_params.send_buf.iov_base = &v_id; |
1388 | xn_params.send_buf.iov_len = sizeof(v_id); |
1389 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
1390 | reply_sz = idpf_vc_xn_exec(adapter: vport->adapter, params: &xn_params); |
1391 | |
1392 | return reply_sz < 0 ? reply_sz : 0; |
1393 | } |
1394 | |
1395 | /** |
1396 | * idpf_send_disable_vport_msg - Send virtchnl disable vport message |
1397 | * @vport: virtual port data structure |
1398 | * |
1399 | * Send disable vport virtchnl message. Returns 0 on success, negative on |
1400 | * failure. |
1401 | */ |
1402 | int idpf_send_disable_vport_msg(struct idpf_vport *vport) |
1403 | { |
1404 | struct idpf_vc_xn_params xn_params = {}; |
1405 | struct virtchnl2_vport v_id; |
1406 | ssize_t reply_sz; |
1407 | |
1408 | v_id.vport_id = cpu_to_le32(vport->vport_id); |
1409 | |
1410 | xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT; |
1411 | xn_params.send_buf.iov_base = &v_id; |
1412 | xn_params.send_buf.iov_len = sizeof(v_id); |
1413 | xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; |
1414 | reply_sz = idpf_vc_xn_exec(adapter: vport->adapter, params: &xn_params); |
1415 | |
1416 | return reply_sz < 0 ? reply_sz : 0; |
1417 | } |
1418 | |
1419 | /** |
1420 | * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message |
1421 | * @vport: virtual port data structure |
1422 | * |
1423 | * Send config tx queues virtchnl message. Returns 0 on success, negative on |
1424 | * failure. |
1425 | */ |
1426 | static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) |
1427 | { |
1428 | struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL; |
1429 | struct virtchnl2_txq_info *qi __free(kfree) = NULL; |
1430 | struct idpf_vc_xn_params xn_params = {}; |
1431 | u32 config_sz, chunk_sz, buf_sz; |
1432 | int totqs, num_msgs, num_chunks; |
1433 | ssize_t reply_sz; |
1434 | int i, k = 0; |
1435 | |
1436 | totqs = vport->num_txq + vport->num_complq; |
1437 | qi = kcalloc(n: totqs, size: sizeof(struct virtchnl2_txq_info), GFP_KERNEL); |
1438 | if (!qi) |
1439 | return -ENOMEM; |
1440 | |
1441 | /* Populate the queue info buffer with all queue context info */ |
1442 | for (i = 0; i < vport->num_txq_grp; i++) { |
1443 | struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; |
1444 | int j, sched_mode; |
1445 | |
1446 | for (j = 0; j < tx_qgrp->num_txq; j++, k++) { |
1447 | qi[k].queue_id = |
1448 | cpu_to_le32(tx_qgrp->txqs[j]->q_id); |
1449 | qi[k].model = |
1450 | cpu_to_le16(vport->txq_model); |
1451 | qi[k].type = |
1452 | cpu_to_le32(tx_qgrp->txqs[j]->q_type); |
1453 | qi[k].ring_len = |
1454 | cpu_to_le16(tx_qgrp->txqs[j]->desc_count); |
1455 | qi[k].dma_ring_addr = |
1456 | cpu_to_le64(tx_qgrp->txqs[j]->dma); |
1457 | if (idpf_is_queue_model_split(q_model: vport->txq_model)) { |
1458 | struct idpf_queue *q = tx_qgrp->txqs[j]; |
1459 | |
1460 | qi[k].tx_compl_queue_id = |
1461 | cpu_to_le16(tx_qgrp->complq->q_id); |
1462 | qi[k].relative_queue_id = cpu_to_le16(j); |
1463 | |
1464 | if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags)) |
1465 | qi[k].sched_mode = |
1466 | cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW); |
1467 | else |
1468 | qi[k].sched_mode = |
1469 | cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); |
1470 | } else { |
1471 | qi[k].sched_mode = |
1472 | cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); |
1473 | } |
1474 | } |
1475 | |
1476 | if (!idpf_is_queue_model_split(q_model: vport->txq_model)) |
1477 | continue; |
1478 | |
1479 | qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id); |
1480 | qi[k].model = cpu_to_le16(vport->txq_model); |
1481 | qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type); |
1482 | qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); |
1483 | qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); |
1484 | |
1485 | if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags)) |
1486 | sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; |
1487 | else |
1488 | sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; |
1489 | qi[k].sched_mode = cpu_to_le16(sched_mode); |
1490 | |
1491 | k++; |
1492 | } |
1493 | |
1494 | /* Make sure accounting agrees */ |
1495 | if (k != totqs) |
1496 | return -EINVAL; |
1497 | |
1498 | /* Chunk up the queue contexts into multiple messages to avoid |
1499 | * sending a control queue message buffer that is too large |
1500 | */ |
1501 | config_sz = sizeof(struct virtchnl2_config_tx_queues); |
1502 | chunk_sz = sizeof(struct virtchnl2_txq_info); |
1503 | |
1504 | num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), |
1505 | totqs); |
1506 | num_msgs = DIV_ROUND_UP(totqs, num_chunks); |
1507 | |
1508 | buf_sz = struct_size(ctq, qinfo, num_chunks); |
1509 | ctq = kzalloc(size: buf_sz, GFP_KERNEL); |
1510 | if (!ctq) |
1511 | return -ENOMEM; |
1512 | |
1513 | xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES; |
1514 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
1515 | |
1516 | for (i = 0, k = 0; i < num_msgs; i++) { |
1517 | memset(ctq, 0, buf_sz); |
1518 | ctq->vport_id = cpu_to_le32(vport->vport_id); |
1519 | ctq->num_qinfo = cpu_to_le16(num_chunks); |
1520 | memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks); |
1521 | |
1522 | xn_params.send_buf.iov_base = ctq; |
1523 | xn_params.send_buf.iov_len = buf_sz; |
1524 | reply_sz = idpf_vc_xn_exec(adapter: vport->adapter, params: &xn_params); |
1525 | if (reply_sz < 0) |
1526 | return reply_sz; |
1527 | |
1528 | k += num_chunks; |
1529 | totqs -= num_chunks; |
1530 | num_chunks = min(num_chunks, totqs); |
1531 | /* Recalculate buffer size */ |
1532 | buf_sz = struct_size(ctq, qinfo, num_chunks); |
1533 | } |
1534 | |
1535 | return 0; |
1536 | } |
1537 | |
1538 | /** |
1539 | * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message |
1540 | * @vport: virtual port data structure |
1541 | * |
1542 | * Send config rx queues virtchnl message. Returns 0 on success, negative on |
1543 | * failure. |
1544 | */ |
1545 | static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) |
1546 | { |
1547 | struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL; |
1548 | struct virtchnl2_rxq_info *qi __free(kfree) = NULL; |
1549 | struct idpf_vc_xn_params xn_params = {}; |
1550 | u32 config_sz, chunk_sz, buf_sz; |
1551 | int totqs, num_msgs, num_chunks; |
1552 | ssize_t reply_sz; |
1553 | int i, k = 0; |
1554 | |
1555 | totqs = vport->num_rxq + vport->num_bufq; |
1556 | qi = kcalloc(n: totqs, size: sizeof(struct virtchnl2_rxq_info), GFP_KERNEL); |
1557 | if (!qi) |
1558 | return -ENOMEM; |
1559 | |
1560 | /* Populate the queue info buffer with all queue context info */ |
1561 | for (i = 0; i < vport->num_rxq_grp; i++) { |
1562 | struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; |
1563 | u16 num_rxq; |
1564 | int j; |
1565 | |
1566 | if (!idpf_is_queue_model_split(q_model: vport->rxq_model)) |
1567 | goto setup_rxqs; |
1568 | |
1569 | for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { |
1570 | struct idpf_queue *bufq = |
1571 | &rx_qgrp->splitq.bufq_sets[j].bufq; |
1572 | |
1573 | qi[k].queue_id = cpu_to_le32(bufq->q_id); |
1574 | qi[k].model = cpu_to_le16(vport->rxq_model); |
1575 | qi[k].type = cpu_to_le32(bufq->q_type); |
1576 | qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); |
1577 | qi[k].ring_len = cpu_to_le16(bufq->desc_count); |
1578 | qi[k].dma_ring_addr = cpu_to_le64(bufq->dma); |
1579 | qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size); |
1580 | qi[k].buffer_notif_stride = bufq->rx_buf_stride; |
1581 | qi[k].rx_buffer_low_watermark = |
1582 | cpu_to_le16(bufq->rx_buffer_low_watermark); |
1583 | if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) |
1584 | qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); |
1585 | } |
1586 | |
1587 | setup_rxqs: |
1588 | if (idpf_is_queue_model_split(q_model: vport->rxq_model)) |
1589 | num_rxq = rx_qgrp->splitq.num_rxq_sets; |
1590 | else |
1591 | num_rxq = rx_qgrp->singleq.num_rxq; |
1592 | |
1593 | for (j = 0; j < num_rxq; j++, k++) { |
1594 | struct idpf_queue *rxq; |
1595 | |
1596 | if (!idpf_is_queue_model_split(q_model: vport->rxq_model)) { |
1597 | rxq = rx_qgrp->singleq.rxqs[j]; |
1598 | goto common_qi_fields; |
1599 | } |
1600 | rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; |
1601 | qi[k].rx_bufq1_id = |
1602 | cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id); |
1603 | if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { |
1604 | qi[k].bufq2_ena = IDPF_BUFQ2_ENA; |
1605 | qi[k].rx_bufq2_id = |
1606 | cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id); |
1607 | } |
1608 | qi[k].rx_buffer_low_watermark = |
1609 | cpu_to_le16(rxq->rx_buffer_low_watermark); |
1610 | if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) |
1611 | qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); |
1612 | |
1613 | common_qi_fields: |
1614 | if (rxq->rx_hsplit_en) { |
1615 | qi[k].qflags |= |
1616 | cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); |
1617 | qi[k].hdr_buffer_size = |
1618 | cpu_to_le16(rxq->rx_hbuf_size); |
1619 | } |
1620 | qi[k].queue_id = cpu_to_le32(rxq->q_id); |
1621 | qi[k].model = cpu_to_le16(vport->rxq_model); |
1622 | qi[k].type = cpu_to_le32(rxq->q_type); |
1623 | qi[k].ring_len = cpu_to_le16(rxq->desc_count); |
1624 | qi[k].dma_ring_addr = cpu_to_le64(rxq->dma); |
1625 | qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size); |
1626 | qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size); |
1627 | qi[k].qflags |= |
1628 | cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE); |
1629 | qi[k].desc_ids = cpu_to_le64(rxq->rxdids); |
1630 | } |
1631 | } |
1632 | |
1633 | /* Make sure accounting agrees */ |
1634 | if (k != totqs) |
1635 | return -EINVAL; |
1636 | |
1637 | /* Chunk up the queue contexts into multiple messages to avoid |
1638 | * sending a control queue message buffer that is too large |
1639 | */ |
1640 | config_sz = sizeof(struct virtchnl2_config_rx_queues); |
1641 | chunk_sz = sizeof(struct virtchnl2_rxq_info); |
1642 | |
1643 | num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), |
1644 | totqs); |
1645 | num_msgs = DIV_ROUND_UP(totqs, num_chunks); |
1646 | |
1647 | buf_sz = struct_size(crq, qinfo, num_chunks); |
1648 | crq = kzalloc(size: buf_sz, GFP_KERNEL); |
1649 | if (!crq) |
1650 | return -ENOMEM; |
1651 | |
1652 | xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES; |
1653 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
1654 | |
1655 | for (i = 0, k = 0; i < num_msgs; i++) { |
1656 | memset(crq, 0, buf_sz); |
1657 | crq->vport_id = cpu_to_le32(vport->vport_id); |
1658 | crq->num_qinfo = cpu_to_le16(num_chunks); |
1659 | memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks); |
1660 | |
1661 | xn_params.send_buf.iov_base = crq; |
1662 | xn_params.send_buf.iov_len = buf_sz; |
1663 | reply_sz = idpf_vc_xn_exec(adapter: vport->adapter, params: &xn_params); |
1664 | if (reply_sz < 0) |
1665 | return reply_sz; |
1666 | |
1667 | k += num_chunks; |
1668 | totqs -= num_chunks; |
1669 | num_chunks = min(num_chunks, totqs); |
1670 | /* Recalculate buffer size */ |
1671 | buf_sz = struct_size(crq, qinfo, num_chunks); |
1672 | } |
1673 | |
1674 | return 0; |
1675 | } |
1676 | |
1677 | /** |
1678 | * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable |
1679 | * queues message |
1680 | * @vport: virtual port data structure |
1681 | * @ena: if true enable, false disable |
1682 | * |
1683 | * Send enable or disable queues virtchnl message. Returns 0 on success, |
1684 | * negative on failure. |
1685 | */ |
1686 | static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena) |
1687 | { |
1688 | struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL; |
1689 | struct virtchnl2_queue_chunk *qc __free(kfree) = NULL; |
1690 | u32 num_msgs, num_chunks, num_txq, num_rxq, num_q; |
1691 | struct idpf_vc_xn_params xn_params = {}; |
1692 | struct virtchnl2_queue_chunks *qcs; |
1693 | u32 config_sz, chunk_sz, buf_sz; |
1694 | ssize_t reply_sz; |
1695 | int i, j, k = 0; |
1696 | |
1697 | num_txq = vport->num_txq + vport->num_complq; |
1698 | num_rxq = vport->num_rxq + vport->num_bufq; |
1699 | num_q = num_txq + num_rxq; |
1700 | buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q; |
1701 | qc = kzalloc(size: buf_sz, GFP_KERNEL); |
1702 | if (!qc) |
1703 | return -ENOMEM; |
1704 | |
1705 | for (i = 0; i < vport->num_txq_grp; i++) { |
1706 | struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; |
1707 | |
1708 | for (j = 0; j < tx_qgrp->num_txq; j++, k++) { |
1709 | qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); |
1710 | qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); |
1711 | qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); |
1712 | } |
1713 | } |
1714 | if (vport->num_txq != k) |
1715 | return -EINVAL; |
1716 | |
1717 | if (!idpf_is_queue_model_split(q_model: vport->txq_model)) |
1718 | goto setup_rx; |
1719 | |
1720 | for (i = 0; i < vport->num_txq_grp; i++, k++) { |
1721 | struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; |
1722 | |
1723 | qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type); |
1724 | qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); |
1725 | qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); |
1726 | } |
1727 | if (vport->num_complq != (k - vport->num_txq)) |
1728 | return -EINVAL; |
1729 | |
1730 | setup_rx: |
1731 | for (i = 0; i < vport->num_rxq_grp; i++) { |
1732 | struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; |
1733 | |
1734 | if (idpf_is_queue_model_split(q_model: vport->rxq_model)) |
1735 | num_rxq = rx_qgrp->splitq.num_rxq_sets; |
1736 | else |
1737 | num_rxq = rx_qgrp->singleq.num_rxq; |
1738 | |
1739 | for (j = 0; j < num_rxq; j++, k++) { |
1740 | if (idpf_is_queue_model_split(q_model: vport->rxq_model)) { |
1741 | qc[k].start_queue_id = |
1742 | cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id); |
1743 | qc[k].type = |
1744 | cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type); |
1745 | } else { |
1746 | qc[k].start_queue_id = |
1747 | cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id); |
1748 | qc[k].type = |
1749 | cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type); |
1750 | } |
1751 | qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); |
1752 | } |
1753 | } |
1754 | if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) |
1755 | return -EINVAL; |
1756 | |
1757 | if (!idpf_is_queue_model_split(q_model: vport->rxq_model)) |
1758 | goto send_msg; |
1759 | |
1760 | for (i = 0; i < vport->num_rxq_grp; i++) { |
1761 | struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; |
1762 | |
1763 | for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { |
1764 | struct idpf_queue *q; |
1765 | |
1766 | q = &rx_qgrp->splitq.bufq_sets[j].bufq; |
1767 | qc[k].type = cpu_to_le32(q->q_type); |
1768 | qc[k].start_queue_id = cpu_to_le32(q->q_id); |
1769 | qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); |
1770 | } |
1771 | } |
1772 | if (vport->num_bufq != k - (vport->num_txq + |
1773 | vport->num_complq + |
1774 | vport->num_rxq)) |
1775 | return -EINVAL; |
1776 | |
1777 | send_msg: |
1778 | /* Chunk up the queue info into multiple messages */ |
1779 | config_sz = sizeof(struct virtchnl2_del_ena_dis_queues); |
1780 | chunk_sz = sizeof(struct virtchnl2_queue_chunk); |
1781 | |
1782 | num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), |
1783 | num_q); |
1784 | num_msgs = DIV_ROUND_UP(num_q, num_chunks); |
1785 | |
1786 | buf_sz = struct_size(eq, chunks.chunks, num_chunks); |
1787 | eq = kzalloc(size: buf_sz, GFP_KERNEL); |
1788 | if (!eq) |
1789 | return -ENOMEM; |
1790 | |
1791 | if (ena) { |
1792 | xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES; |
1793 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
1794 | } else { |
1795 | xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES; |
1796 | xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; |
1797 | } |
1798 | |
1799 | for (i = 0, k = 0; i < num_msgs; i++) { |
1800 | memset(eq, 0, buf_sz); |
1801 | eq->vport_id = cpu_to_le32(vport->vport_id); |
1802 | eq->chunks.num_chunks = cpu_to_le16(num_chunks); |
1803 | qcs = &eq->chunks; |
1804 | memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks); |
1805 | |
1806 | xn_params.send_buf.iov_base = eq; |
1807 | xn_params.send_buf.iov_len = buf_sz; |
1808 | reply_sz = idpf_vc_xn_exec(adapter: vport->adapter, params: &xn_params); |
1809 | if (reply_sz < 0) |
1810 | return reply_sz; |
1811 | |
1812 | k += num_chunks; |
1813 | num_q -= num_chunks; |
1814 | num_chunks = min(num_chunks, num_q); |
1815 | /* Recalculate buffer size */ |
1816 | buf_sz = struct_size(eq, chunks.chunks, num_chunks); |
1817 | } |
1818 | |
1819 | return 0; |
1820 | } |
1821 | |
1822 | /** |
1823 | * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue |
1824 | * vector message |
1825 | * @vport: virtual port data structure |
1826 | * @map: true for map and false for unmap |
1827 | * |
1828 | * Send map or unmap queue vector virtchnl message. Returns 0 on success, |
1829 | * negative on failure. |
1830 | */ |
1831 | int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) |
1832 | { |
1833 | struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL; |
1834 | struct virtchnl2_queue_vector *vqv __free(kfree) = NULL; |
1835 | struct idpf_vc_xn_params xn_params = {}; |
1836 | u32 config_sz, chunk_sz, buf_sz; |
1837 | u32 num_msgs, num_chunks, num_q; |
1838 | ssize_t reply_sz; |
1839 | int i, j, k = 0; |
1840 | |
1841 | num_q = vport->num_txq + vport->num_rxq; |
1842 | |
1843 | buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q; |
1844 | vqv = kzalloc(size: buf_sz, GFP_KERNEL); |
1845 | if (!vqv) |
1846 | return -ENOMEM; |
1847 | |
1848 | for (i = 0; i < vport->num_txq_grp; i++) { |
1849 | struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; |
1850 | |
1851 | for (j = 0; j < tx_qgrp->num_txq; j++, k++) { |
1852 | vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); |
1853 | vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); |
1854 | |
1855 | if (idpf_is_queue_model_split(q_model: vport->txq_model)) { |
1856 | vqv[k].vector_id = |
1857 | cpu_to_le16(tx_qgrp->complq->q_vector->v_idx); |
1858 | vqv[k].itr_idx = |
1859 | cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx); |
1860 | } else { |
1861 | vqv[k].vector_id = |
1862 | cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx); |
1863 | vqv[k].itr_idx = |
1864 | cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx); |
1865 | } |
1866 | } |
1867 | } |
1868 | |
1869 | if (vport->num_txq != k) |
1870 | return -EINVAL; |
1871 | |
1872 | for (i = 0; i < vport->num_rxq_grp; i++) { |
1873 | struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; |
1874 | u16 num_rxq; |
1875 | |
1876 | if (idpf_is_queue_model_split(q_model: vport->rxq_model)) |
1877 | num_rxq = rx_qgrp->splitq.num_rxq_sets; |
1878 | else |
1879 | num_rxq = rx_qgrp->singleq.num_rxq; |
1880 | |
1881 | for (j = 0; j < num_rxq; j++, k++) { |
1882 | struct idpf_queue *rxq; |
1883 | |
1884 | if (idpf_is_queue_model_split(q_model: vport->rxq_model)) |
1885 | rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; |
1886 | else |
1887 | rxq = rx_qgrp->singleq.rxqs[j]; |
1888 | |
1889 | vqv[k].queue_type = cpu_to_le32(rxq->q_type); |
1890 | vqv[k].queue_id = cpu_to_le32(rxq->q_id); |
1891 | vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx); |
1892 | vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx); |
1893 | } |
1894 | } |
1895 | |
1896 | if (idpf_is_queue_model_split(q_model: vport->txq_model)) { |
1897 | if (vport->num_rxq != k - vport->num_complq) |
1898 | return -EINVAL; |
1899 | } else { |
1900 | if (vport->num_rxq != k - vport->num_txq) |
1901 | return -EINVAL; |
1902 | } |
1903 | |
1904 | /* Chunk up the vector info into multiple messages */ |
1905 | config_sz = sizeof(struct virtchnl2_queue_vector_maps); |
1906 | chunk_sz = sizeof(struct virtchnl2_queue_vector); |
1907 | |
1908 | num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), |
1909 | num_q); |
1910 | num_msgs = DIV_ROUND_UP(num_q, num_chunks); |
1911 | |
1912 | buf_sz = struct_size(vqvm, qv_maps, num_chunks); |
1913 | vqvm = kzalloc(size: buf_sz, GFP_KERNEL); |
1914 | if (!vqvm) |
1915 | return -ENOMEM; |
1916 | |
1917 | if (map) { |
1918 | xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR; |
1919 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
1920 | } else { |
1921 | xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR; |
1922 | xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; |
1923 | } |
1924 | |
1925 | for (i = 0, k = 0; i < num_msgs; i++) { |
1926 | memset(vqvm, 0, buf_sz); |
1927 | xn_params.send_buf.iov_base = vqvm; |
1928 | xn_params.send_buf.iov_len = buf_sz; |
1929 | vqvm->vport_id = cpu_to_le32(vport->vport_id); |
1930 | vqvm->num_qv_maps = cpu_to_le16(num_chunks); |
1931 | memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks); |
1932 | |
1933 | reply_sz = idpf_vc_xn_exec(adapter: vport->adapter, params: &xn_params); |
1934 | if (reply_sz < 0) |
1935 | return reply_sz; |
1936 | |
1937 | k += num_chunks; |
1938 | num_q -= num_chunks; |
1939 | num_chunks = min(num_chunks, num_q); |
1940 | /* Recalculate buffer size */ |
1941 | buf_sz = struct_size(vqvm, qv_maps, num_chunks); |
1942 | } |
1943 | |
1944 | return 0; |
1945 | } |
1946 | |
1947 | /** |
1948 | * idpf_send_enable_queues_msg - send enable queues virtchnl message |
1949 | * @vport: Virtual port private data structure |
1950 | * |
1951 | * Will send enable queues virtchnl message. Returns 0 on success, negative on |
1952 | * failure. |
1953 | */ |
1954 | int idpf_send_enable_queues_msg(struct idpf_vport *vport) |
1955 | { |
1956 | return idpf_send_ena_dis_queues_msg(vport, ena: true); |
1957 | } |
1958 | |
1959 | /** |
1960 | * idpf_send_disable_queues_msg - send disable queues virtchnl message |
1961 | * @vport: Virtual port private data structure |
1962 | * |
1963 | * Will send disable queues virtchnl message. Returns 0 on success, negative |
1964 | * on failure. |
1965 | */ |
1966 | int idpf_send_disable_queues_msg(struct idpf_vport *vport) |
1967 | { |
1968 | int err, i; |
1969 | |
1970 | err = idpf_send_ena_dis_queues_msg(vport, ena: false); |
1971 | if (err) |
1972 | return err; |
1973 | |
1974 | /* switch to poll mode as interrupts will be disabled after disable |
1975 | * queues virtchnl message is sent |
1976 | */ |
1977 | for (i = 0; i < vport->num_txq; i++) |
1978 | set_bit(nr: __IDPF_Q_POLL_MODE, addr: vport->txqs[i]->flags); |
1979 | |
1980 | /* schedule the napi to receive all the marker packets */ |
1981 | local_bh_disable(); |
1982 | for (i = 0; i < vport->num_q_vectors; i++) |
1983 | napi_schedule(n: &vport->q_vectors[i].napi); |
1984 | local_bh_enable(); |
1985 | |
1986 | return idpf_wait_for_marker_event(vport); |
1987 | } |
1988 | |
1989 | /** |
1990 | * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right |
1991 | * structure |
1992 | * @dchunks: Destination chunks to store data to |
1993 | * @schunks: Source chunks to copy data from |
1994 | * @num_chunks: number of chunks to copy |
1995 | */ |
1996 | static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks, |
1997 | struct virtchnl2_queue_reg_chunk *schunks, |
1998 | u16 num_chunks) |
1999 | { |
2000 | u16 i; |
2001 | |
2002 | for (i = 0; i < num_chunks; i++) { |
2003 | dchunks[i].type = schunks[i].type; |
2004 | dchunks[i].start_queue_id = schunks[i].start_queue_id; |
2005 | dchunks[i].num_queues = schunks[i].num_queues; |
2006 | } |
2007 | } |
2008 | |
2009 | /** |
2010 | * idpf_send_delete_queues_msg - send delete queues virtchnl message |
2011 | * @vport: Virtual port private data structure |
2012 | * |
2013 | * Will send delete queues virtchnl message. Return 0 on success, negative on |
2014 | * failure. |
2015 | */ |
2016 | int idpf_send_delete_queues_msg(struct idpf_vport *vport) |
2017 | { |
2018 | struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL; |
2019 | struct virtchnl2_create_vport *vport_params; |
2020 | struct virtchnl2_queue_reg_chunks *chunks; |
2021 | struct idpf_vc_xn_params xn_params = {}; |
2022 | struct idpf_vport_config *vport_config; |
2023 | u16 vport_idx = vport->idx; |
2024 | ssize_t reply_sz; |
2025 | u16 num_chunks; |
2026 | int buf_size; |
2027 | |
2028 | vport_config = vport->adapter->vport_config[vport_idx]; |
2029 | if (vport_config->req_qs_chunks) { |
2030 | chunks = &vport_config->req_qs_chunks->chunks; |
2031 | } else { |
2032 | vport_params = vport->adapter->vport_params_recvd[vport_idx]; |
2033 | chunks = &vport_params->chunks; |
2034 | } |
2035 | |
2036 | num_chunks = le16_to_cpu(chunks->num_chunks); |
2037 | buf_size = struct_size(eq, chunks.chunks, num_chunks); |
2038 | |
2039 | eq = kzalloc(size: buf_size, GFP_KERNEL); |
2040 | if (!eq) |
2041 | return -ENOMEM; |
2042 | |
2043 | eq->vport_id = cpu_to_le32(vport->vport_id); |
2044 | eq->chunks.num_chunks = cpu_to_le16(num_chunks); |
2045 | |
2046 | idpf_convert_reg_to_queue_chunks(dchunks: eq->chunks.chunks, schunks: chunks->chunks, |
2047 | num_chunks); |
2048 | |
2049 | xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES; |
2050 | xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; |
2051 | xn_params.send_buf.iov_base = eq; |
2052 | xn_params.send_buf.iov_len = buf_size; |
2053 | reply_sz = idpf_vc_xn_exec(adapter: vport->adapter, params: &xn_params); |
2054 | |
2055 | return reply_sz < 0 ? reply_sz : 0; |
2056 | } |
2057 | |
2058 | /** |
2059 | * idpf_send_config_queues_msg - Send config queues virtchnl message |
2060 | * @vport: Virtual port private data structure |
2061 | * |
2062 | * Will send config queues virtchnl message. Returns 0 on success, negative on |
2063 | * failure. |
2064 | */ |
2065 | int idpf_send_config_queues_msg(struct idpf_vport *vport) |
2066 | { |
2067 | int err; |
2068 | |
2069 | err = idpf_send_config_tx_queues_msg(vport); |
2070 | if (err) |
2071 | return err; |
2072 | |
2073 | return idpf_send_config_rx_queues_msg(vport); |
2074 | } |
2075 | |
2076 | /** |
2077 | * idpf_send_add_queues_msg - Send virtchnl add queues message |
2078 | * @vport: Virtual port private data structure |
2079 | * @num_tx_q: number of transmit queues |
2080 | * @num_complq: number of transmit completion queues |
2081 | * @num_rx_q: number of receive queues |
2082 | * @num_rx_bufq: number of receive buffer queues |
2083 | * |
2084 | * Returns 0 on success, negative on failure. vport _MUST_ be const here as |
2085 | * we should not change any fields within vport itself in this function. |
2086 | */ |
2087 | int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, |
2088 | u16 num_complq, u16 num_rx_q, u16 num_rx_bufq) |
2089 | { |
2090 | struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL; |
2091 | struct idpf_vc_xn_params xn_params = {}; |
2092 | struct idpf_vport_config *vport_config; |
2093 | struct virtchnl2_add_queues aq = {}; |
2094 | u16 vport_idx = vport->idx; |
2095 | ssize_t reply_sz; |
2096 | int size; |
2097 | |
2098 | vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); |
2099 | if (!vc_msg) |
2100 | return -ENOMEM; |
2101 | |
2102 | vport_config = vport->adapter->vport_config[vport_idx]; |
2103 | kfree(objp: vport_config->req_qs_chunks); |
2104 | vport_config->req_qs_chunks = NULL; |
2105 | |
2106 | aq.vport_id = cpu_to_le32(vport->vport_id); |
2107 | aq.num_tx_q = cpu_to_le16(num_tx_q); |
2108 | aq.num_tx_complq = cpu_to_le16(num_complq); |
2109 | aq.num_rx_q = cpu_to_le16(num_rx_q); |
2110 | aq.num_rx_bufq = cpu_to_le16(num_rx_bufq); |
2111 | |
2112 | xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES; |
2113 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
2114 | xn_params.send_buf.iov_base = &aq; |
2115 | xn_params.send_buf.iov_len = sizeof(aq); |
2116 | xn_params.recv_buf.iov_base = vc_msg; |
2117 | xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; |
2118 | reply_sz = idpf_vc_xn_exec(adapter: vport->adapter, params: &xn_params); |
2119 | if (reply_sz < 0) |
2120 | return reply_sz; |
2121 | |
2122 | /* compare vc_msg num queues with vport num queues */ |
2123 | if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q || |
2124 | le16_to_cpu(vc_msg->num_rx_q) != num_rx_q || |
2125 | le16_to_cpu(vc_msg->num_tx_complq) != num_complq || |
2126 | le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) |
2127 | return -EINVAL; |
2128 | |
2129 | size = struct_size(vc_msg, chunks.chunks, |
2130 | le16_to_cpu(vc_msg->chunks.num_chunks)); |
2131 | if (reply_sz < size) |
2132 | return -EIO; |
2133 | |
2134 | vport_config->req_qs_chunks = kmemdup(p: vc_msg, size, GFP_KERNEL); |
2135 | if (!vport_config->req_qs_chunks) |
2136 | return -ENOMEM; |
2137 | |
2138 | return 0; |
2139 | } |
2140 | |
2141 | /** |
2142 | * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message |
2143 | * @adapter: Driver specific private structure |
2144 | * @num_vectors: number of vectors to be allocated |
2145 | * |
2146 | * Returns 0 on success, negative on failure. |
2147 | */ |
2148 | int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors) |
2149 | { |
2150 | struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL; |
2151 | struct idpf_vc_xn_params xn_params = {}; |
2152 | struct virtchnl2_alloc_vectors ac = {}; |
2153 | ssize_t reply_sz; |
2154 | u16 num_vchunks; |
2155 | int size; |
2156 | |
2157 | ac.num_vectors = cpu_to_le16(num_vectors); |
2158 | |
2159 | rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); |
2160 | if (!rcvd_vec) |
2161 | return -ENOMEM; |
2162 | |
2163 | xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS; |
2164 | xn_params.send_buf.iov_base = ∾ |
2165 | xn_params.send_buf.iov_len = sizeof(ac); |
2166 | xn_params.recv_buf.iov_base = rcvd_vec; |
2167 | xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; |
2168 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
2169 | reply_sz = idpf_vc_xn_exec(adapter, params: &xn_params); |
2170 | if (reply_sz < 0) |
2171 | return reply_sz; |
2172 | |
2173 | num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks); |
2174 | size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks); |
2175 | if (reply_sz < size) |
2176 | return -EIO; |
2177 | |
2178 | if (size > IDPF_CTLQ_MAX_BUF_LEN) |
2179 | return -EINVAL; |
2180 | |
2181 | kfree(objp: adapter->req_vec_chunks); |
2182 | adapter->req_vec_chunks = kmemdup(p: rcvd_vec, size, GFP_KERNEL); |
2183 | if (!adapter->req_vec_chunks) |
2184 | return -ENOMEM; |
2185 | |
2186 | if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) { |
2187 | kfree(objp: adapter->req_vec_chunks); |
2188 | adapter->req_vec_chunks = NULL; |
2189 | return -EINVAL; |
2190 | } |
2191 | |
2192 | return 0; |
2193 | } |
2194 | |
2195 | /** |
2196 | * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message |
2197 | * @adapter: Driver specific private structure |
2198 | * |
2199 | * Returns 0 on success, negative on failure. |
2200 | */ |
2201 | int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter) |
2202 | { |
2203 | struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks; |
2204 | struct virtchnl2_vector_chunks *vcs = &ac->vchunks; |
2205 | struct idpf_vc_xn_params xn_params = {}; |
2206 | ssize_t reply_sz; |
2207 | int buf_size; |
2208 | |
2209 | buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks)); |
2210 | |
2211 | xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS; |
2212 | xn_params.send_buf.iov_base = vcs; |
2213 | xn_params.send_buf.iov_len = buf_size; |
2214 | xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; |
2215 | reply_sz = idpf_vc_xn_exec(adapter, params: &xn_params); |
2216 | if (reply_sz < 0) |
2217 | return reply_sz; |
2218 | |
2219 | kfree(objp: adapter->req_vec_chunks); |
2220 | adapter->req_vec_chunks = NULL; |
2221 | |
2222 | return 0; |
2223 | } |
2224 | |
2225 | /** |
2226 | * idpf_get_max_vfs - Get max number of vfs supported |
2227 | * @adapter: Driver specific private structure |
2228 | * |
2229 | * Returns max number of VFs |
2230 | */ |
2231 | static int idpf_get_max_vfs(struct idpf_adapter *adapter) |
2232 | { |
2233 | return le16_to_cpu(adapter->caps.max_sriov_vfs); |
2234 | } |
2235 | |
2236 | /** |
2237 | * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message |
2238 | * @adapter: Driver specific private structure |
2239 | * @num_vfs: number of virtual functions to be created |
2240 | * |
2241 | * Returns 0 on success, negative on failure. |
2242 | */ |
2243 | int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs) |
2244 | { |
2245 | struct virtchnl2_sriov_vfs_info svi = {}; |
2246 | struct idpf_vc_xn_params xn_params = {}; |
2247 | ssize_t reply_sz; |
2248 | |
2249 | svi.num_vfs = cpu_to_le16(num_vfs); |
2250 | xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS; |
2251 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
2252 | xn_params.send_buf.iov_base = &svi; |
2253 | xn_params.send_buf.iov_len = sizeof(svi); |
2254 | reply_sz = idpf_vc_xn_exec(adapter, params: &xn_params); |
2255 | |
2256 | return reply_sz < 0 ? reply_sz : 0; |
2257 | } |
2258 | |
2259 | /** |
2260 | * idpf_send_get_stats_msg - Send virtchnl get statistics message |
2261 | * @vport: vport to get stats for |
2262 | * |
2263 | * Returns 0 on success, negative on failure. |
2264 | */ |
2265 | int idpf_send_get_stats_msg(struct idpf_vport *vport) |
2266 | { |
2267 | struct idpf_netdev_priv *np = netdev_priv(dev: vport->netdev); |
2268 | struct rtnl_link_stats64 *netstats = &np->netstats; |
2269 | struct virtchnl2_vport_stats stats_msg = {}; |
2270 | struct idpf_vc_xn_params xn_params = {}; |
2271 | ssize_t reply_sz; |
2272 | |
2273 | |
2274 | /* Don't send get_stats message if the link is down */ |
2275 | if (np->state <= __IDPF_VPORT_DOWN) |
2276 | return 0; |
2277 | |
2278 | stats_msg.vport_id = cpu_to_le32(vport->vport_id); |
2279 | |
2280 | xn_params.vc_op = VIRTCHNL2_OP_GET_STATS; |
2281 | xn_params.send_buf.iov_base = &stats_msg; |
2282 | xn_params.send_buf.iov_len = sizeof(stats_msg); |
2283 | xn_params.recv_buf = xn_params.send_buf; |
2284 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
2285 | |
2286 | reply_sz = idpf_vc_xn_exec(adapter: vport->adapter, params: &xn_params); |
2287 | if (reply_sz < 0) |
2288 | return reply_sz; |
2289 | if (reply_sz < sizeof(stats_msg)) |
2290 | return -EIO; |
2291 | |
2292 | spin_lock_bh(lock: &np->stats_lock); |
2293 | |
2294 | netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) + |
2295 | le64_to_cpu(stats_msg.rx_multicast) + |
2296 | le64_to_cpu(stats_msg.rx_broadcast); |
2297 | netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) + |
2298 | le64_to_cpu(stats_msg.tx_multicast) + |
2299 | le64_to_cpu(stats_msg.tx_broadcast); |
2300 | netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes); |
2301 | netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes); |
2302 | netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors); |
2303 | netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors); |
2304 | netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards); |
2305 | netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards); |
2306 | |
2307 | vport->port_stats.vport_stats = stats_msg; |
2308 | |
2309 | spin_unlock_bh(lock: &np->stats_lock); |
2310 | |
2311 | return 0; |
2312 | } |
2313 | |
2314 | /** |
2315 | * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message |
2316 | * @vport: virtual port data structure |
2317 | * @get: flag to set or get rss look up table |
2318 | * |
2319 | * Returns 0 on success, negative on failure. |
2320 | */ |
2321 | int (struct idpf_vport *vport, bool get) |
2322 | { |
2323 | struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL; |
2324 | struct virtchnl2_rss_lut *rl __free(kfree) = NULL; |
2325 | struct idpf_vc_xn_params xn_params = {}; |
2326 | struct idpf_rss_data *; |
2327 | int buf_size, lut_buf_size; |
2328 | ssize_t reply_sz; |
2329 | int i; |
2330 | |
2331 | rss_data = |
2332 | &vport->adapter->vport_config[vport->idx]->user_config.rss_data; |
2333 | buf_size = struct_size(rl, lut, rss_data->rss_lut_size); |
2334 | rl = kzalloc(size: buf_size, GFP_KERNEL); |
2335 | if (!rl) |
2336 | return -ENOMEM; |
2337 | |
2338 | rl->vport_id = cpu_to_le32(vport->vport_id); |
2339 | |
2340 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
2341 | xn_params.send_buf.iov_base = rl; |
2342 | xn_params.send_buf.iov_len = buf_size; |
2343 | |
2344 | if (get) { |
2345 | recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); |
2346 | if (!recv_rl) |
2347 | return -ENOMEM; |
2348 | xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT; |
2349 | xn_params.recv_buf.iov_base = recv_rl; |
2350 | xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; |
2351 | } else { |
2352 | rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size); |
2353 | for (i = 0; i < rss_data->rss_lut_size; i++) |
2354 | rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]); |
2355 | |
2356 | xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT; |
2357 | } |
2358 | reply_sz = idpf_vc_xn_exec(adapter: vport->adapter, params: &xn_params); |
2359 | if (reply_sz < 0) |
2360 | return reply_sz; |
2361 | if (!get) |
2362 | return 0; |
2363 | if (reply_sz < sizeof(struct virtchnl2_rss_lut)) |
2364 | return -EIO; |
2365 | |
2366 | lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32); |
2367 | if (reply_sz < lut_buf_size) |
2368 | return -EIO; |
2369 | |
2370 | /* size didn't change, we can reuse existing lut buf */ |
2371 | if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries)) |
2372 | goto do_memcpy; |
2373 | |
2374 | rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries); |
2375 | kfree(objp: rss_data->rss_lut); |
2376 | |
2377 | rss_data->rss_lut = kzalloc(size: lut_buf_size, GFP_KERNEL); |
2378 | if (!rss_data->rss_lut) { |
2379 | rss_data->rss_lut_size = 0; |
2380 | return -ENOMEM; |
2381 | } |
2382 | |
2383 | do_memcpy: |
2384 | memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size); |
2385 | |
2386 | return 0; |
2387 | } |
2388 | |
2389 | /** |
2390 | * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message |
2391 | * @vport: virtual port data structure |
2392 | * @get: flag to set or get rss look up table |
2393 | * |
2394 | * Returns 0 on success, negative on failure |
2395 | */ |
2396 | int (struct idpf_vport *vport, bool get) |
2397 | { |
2398 | struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL; |
2399 | struct virtchnl2_rss_key *rk __free(kfree) = NULL; |
2400 | struct idpf_vc_xn_params xn_params = {}; |
2401 | struct idpf_rss_data *; |
2402 | ssize_t reply_sz; |
2403 | int i, buf_size; |
2404 | u16 key_size; |
2405 | |
2406 | rss_data = |
2407 | &vport->adapter->vport_config[vport->idx]->user_config.rss_data; |
2408 | buf_size = struct_size(rk, key_flex, rss_data->rss_key_size); |
2409 | rk = kzalloc(size: buf_size, GFP_KERNEL); |
2410 | if (!rk) |
2411 | return -ENOMEM; |
2412 | |
2413 | rk->vport_id = cpu_to_le32(vport->vport_id); |
2414 | xn_params.send_buf.iov_base = rk; |
2415 | xn_params.send_buf.iov_len = buf_size; |
2416 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
2417 | if (get) { |
2418 | recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); |
2419 | if (!recv_rk) |
2420 | return -ENOMEM; |
2421 | |
2422 | xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY; |
2423 | xn_params.recv_buf.iov_base = recv_rk; |
2424 | xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; |
2425 | } else { |
2426 | rk->key_len = cpu_to_le16(rss_data->rss_key_size); |
2427 | for (i = 0; i < rss_data->rss_key_size; i++) |
2428 | rk->key_flex[i] = rss_data->rss_key[i]; |
2429 | |
2430 | xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY; |
2431 | } |
2432 | |
2433 | reply_sz = idpf_vc_xn_exec(adapter: vport->adapter, params: &xn_params); |
2434 | if (reply_sz < 0) |
2435 | return reply_sz; |
2436 | if (!get) |
2437 | return 0; |
2438 | if (reply_sz < sizeof(struct virtchnl2_rss_key)) |
2439 | return -EIO; |
2440 | |
2441 | key_size = min_t(u16, NETDEV_RSS_KEY_LEN, |
2442 | le16_to_cpu(recv_rk->key_len)); |
2443 | if (reply_sz < key_size) |
2444 | return -EIO; |
2445 | |
2446 | /* key len didn't change, reuse existing buf */ |
2447 | if (rss_data->rss_key_size == key_size) |
2448 | goto do_memcpy; |
2449 | |
2450 | rss_data->rss_key_size = key_size; |
2451 | kfree(objp: rss_data->rss_key); |
2452 | rss_data->rss_key = kzalloc(size: key_size, GFP_KERNEL); |
2453 | if (!rss_data->rss_key) { |
2454 | rss_data->rss_key_size = 0; |
2455 | return -ENOMEM; |
2456 | } |
2457 | |
2458 | do_memcpy: |
2459 | memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size); |
2460 | |
2461 | return 0; |
2462 | } |
2463 | |
2464 | /** |
2465 | * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table |
2466 | * @ptype: ptype lookup table |
2467 | * @pstate: state machine for ptype lookup table |
2468 | * @ipv4: ipv4 or ipv6 |
2469 | * @frag: fragmentation allowed |
2470 | * |
2471 | */ |
2472 | static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype, |
2473 | struct idpf_ptype_state *pstate, |
2474 | bool ipv4, bool frag) |
2475 | { |
2476 | if (!pstate->outer_ip || !pstate->outer_frag) { |
2477 | ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP; |
2478 | pstate->outer_ip = true; |
2479 | |
2480 | if (ipv4) |
2481 | ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4; |
2482 | else |
2483 | ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6; |
2484 | |
2485 | if (frag) { |
2486 | ptype->outer_frag = IDPF_RX_PTYPE_FRAG; |
2487 | pstate->outer_frag = true; |
2488 | } |
2489 | } else { |
2490 | ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP; |
2491 | pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP; |
2492 | |
2493 | if (ipv4) |
2494 | ptype->tunnel_end_prot = |
2495 | IDPF_RX_PTYPE_TUNNEL_END_IPV4; |
2496 | else |
2497 | ptype->tunnel_end_prot = |
2498 | IDPF_RX_PTYPE_TUNNEL_END_IPV6; |
2499 | |
2500 | if (frag) |
2501 | ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG; |
2502 | } |
2503 | } |
2504 | |
2505 | /** |
2506 | * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info |
2507 | * @vport: virtual port data structure |
2508 | * |
2509 | * Returns 0 on success, negative on failure. |
2510 | */ |
2511 | int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) |
2512 | { |
2513 | struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL; |
2514 | struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL; |
2515 | struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup; |
2516 | int max_ptype, ptypes_recvd = 0, ptype_offset; |
2517 | struct idpf_adapter *adapter = vport->adapter; |
2518 | struct idpf_vc_xn_params xn_params = {}; |
2519 | u16 next_ptype_id = 0; |
2520 | ssize_t reply_sz; |
2521 | int i, j, k; |
2522 | |
2523 | if (idpf_is_queue_model_split(q_model: vport->rxq_model)) |
2524 | max_ptype = IDPF_RX_MAX_PTYPE; |
2525 | else |
2526 | max_ptype = IDPF_RX_MAX_BASE_PTYPE; |
2527 | |
2528 | memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup)); |
2529 | |
2530 | get_ptype_info = kzalloc(size: sizeof(*get_ptype_info), GFP_KERNEL); |
2531 | if (!get_ptype_info) |
2532 | return -ENOMEM; |
2533 | |
2534 | ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); |
2535 | if (!ptype_info) |
2536 | return -ENOMEM; |
2537 | |
2538 | xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO; |
2539 | xn_params.send_buf.iov_base = get_ptype_info; |
2540 | xn_params.send_buf.iov_len = sizeof(*get_ptype_info); |
2541 | xn_params.recv_buf.iov_base = ptype_info; |
2542 | xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; |
2543 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
2544 | |
2545 | while (next_ptype_id < max_ptype) { |
2546 | get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id); |
2547 | |
2548 | if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype) |
2549 | get_ptype_info->num_ptypes = |
2550 | cpu_to_le16(max_ptype - next_ptype_id); |
2551 | else |
2552 | get_ptype_info->num_ptypes = |
2553 | cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF); |
2554 | |
2555 | reply_sz = idpf_vc_xn_exec(adapter, params: &xn_params); |
2556 | if (reply_sz < 0) |
2557 | return reply_sz; |
2558 | |
2559 | if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) |
2560 | return -EIO; |
2561 | |
2562 | ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes); |
2563 | if (ptypes_recvd > max_ptype) |
2564 | return -EINVAL; |
2565 | |
2566 | next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) + |
2567 | le16_to_cpu(get_ptype_info->num_ptypes); |
2568 | |
2569 | ptype_offset = IDPF_RX_PTYPE_HDR_SZ; |
2570 | |
2571 | for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) { |
2572 | struct idpf_ptype_state pstate = { }; |
2573 | struct virtchnl2_ptype *ptype; |
2574 | u16 id; |
2575 | |
2576 | ptype = (struct virtchnl2_ptype *) |
2577 | ((u8 *)ptype_info + ptype_offset); |
2578 | |
2579 | ptype_offset += IDPF_GET_PTYPE_SIZE(ptype); |
2580 | if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) |
2581 | return -EINVAL; |
2582 | |
2583 | /* 0xFFFF indicates end of ptypes */ |
2584 | if (le16_to_cpu(ptype->ptype_id_10) == |
2585 | IDPF_INVALID_PTYPE_ID) |
2586 | return 0; |
2587 | |
2588 | if (idpf_is_queue_model_split(q_model: vport->rxq_model)) |
2589 | k = le16_to_cpu(ptype->ptype_id_10); |
2590 | else |
2591 | k = ptype->ptype_id_8; |
2592 | |
2593 | if (ptype->proto_id_count) |
2594 | ptype_lkup[k].known = 1; |
2595 | |
2596 | for (j = 0; j < ptype->proto_id_count; j++) { |
2597 | id = le16_to_cpu(ptype->proto_id[j]); |
2598 | switch (id) { |
2599 | case VIRTCHNL2_PROTO_HDR_GRE: |
2600 | if (pstate.tunnel_state == |
2601 | IDPF_PTYPE_TUNNEL_IP) { |
2602 | ptype_lkup[k].tunnel_type = |
2603 | IDPF_RX_PTYPE_TUNNEL_IP_GRENAT; |
2604 | pstate.tunnel_state |= |
2605 | IDPF_PTYPE_TUNNEL_IP_GRENAT; |
2606 | } |
2607 | break; |
2608 | case VIRTCHNL2_PROTO_HDR_MAC: |
2609 | ptype_lkup[k].outer_ip = |
2610 | IDPF_RX_PTYPE_OUTER_L2; |
2611 | if (pstate.tunnel_state == |
2612 | IDPF_TUN_IP_GRE) { |
2613 | ptype_lkup[k].tunnel_type = |
2614 | IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC; |
2615 | pstate.tunnel_state |= |
2616 | IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC; |
2617 | } |
2618 | break; |
2619 | case VIRTCHNL2_PROTO_HDR_IPV4: |
2620 | idpf_fill_ptype_lookup(ptype: &ptype_lkup[k], |
2621 | pstate: &pstate, ipv4: true, |
2622 | frag: false); |
2623 | break; |
2624 | case VIRTCHNL2_PROTO_HDR_IPV6: |
2625 | idpf_fill_ptype_lookup(ptype: &ptype_lkup[k], |
2626 | pstate: &pstate, ipv4: false, |
2627 | frag: false); |
2628 | break; |
2629 | case VIRTCHNL2_PROTO_HDR_IPV4_FRAG: |
2630 | idpf_fill_ptype_lookup(ptype: &ptype_lkup[k], |
2631 | pstate: &pstate, ipv4: true, |
2632 | frag: true); |
2633 | break; |
2634 | case VIRTCHNL2_PROTO_HDR_IPV6_FRAG: |
2635 | idpf_fill_ptype_lookup(ptype: &ptype_lkup[k], |
2636 | pstate: &pstate, ipv4: false, |
2637 | frag: true); |
2638 | break; |
2639 | case VIRTCHNL2_PROTO_HDR_UDP: |
2640 | ptype_lkup[k].inner_prot = |
2641 | IDPF_RX_PTYPE_INNER_PROT_UDP; |
2642 | break; |
2643 | case VIRTCHNL2_PROTO_HDR_TCP: |
2644 | ptype_lkup[k].inner_prot = |
2645 | IDPF_RX_PTYPE_INNER_PROT_TCP; |
2646 | break; |
2647 | case VIRTCHNL2_PROTO_HDR_SCTP: |
2648 | ptype_lkup[k].inner_prot = |
2649 | IDPF_RX_PTYPE_INNER_PROT_SCTP; |
2650 | break; |
2651 | case VIRTCHNL2_PROTO_HDR_ICMP: |
2652 | ptype_lkup[k].inner_prot = |
2653 | IDPF_RX_PTYPE_INNER_PROT_ICMP; |
2654 | break; |
2655 | case VIRTCHNL2_PROTO_HDR_PAY: |
2656 | ptype_lkup[k].payload_layer = |
2657 | IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2; |
2658 | break; |
2659 | case VIRTCHNL2_PROTO_HDR_ICMPV6: |
2660 | case VIRTCHNL2_PROTO_HDR_IPV6_EH: |
2661 | case VIRTCHNL2_PROTO_HDR_PRE_MAC: |
2662 | case VIRTCHNL2_PROTO_HDR_POST_MAC: |
2663 | case VIRTCHNL2_PROTO_HDR_ETHERTYPE: |
2664 | case VIRTCHNL2_PROTO_HDR_SVLAN: |
2665 | case VIRTCHNL2_PROTO_HDR_CVLAN: |
2666 | case VIRTCHNL2_PROTO_HDR_MPLS: |
2667 | case VIRTCHNL2_PROTO_HDR_MMPLS: |
2668 | case VIRTCHNL2_PROTO_HDR_PTP: |
2669 | case VIRTCHNL2_PROTO_HDR_CTRL: |
2670 | case VIRTCHNL2_PROTO_HDR_LLDP: |
2671 | case VIRTCHNL2_PROTO_HDR_ARP: |
2672 | case VIRTCHNL2_PROTO_HDR_ECP: |
2673 | case VIRTCHNL2_PROTO_HDR_EAPOL: |
2674 | case VIRTCHNL2_PROTO_HDR_PPPOD: |
2675 | case VIRTCHNL2_PROTO_HDR_PPPOE: |
2676 | case VIRTCHNL2_PROTO_HDR_IGMP: |
2677 | case VIRTCHNL2_PROTO_HDR_AH: |
2678 | case VIRTCHNL2_PROTO_HDR_ESP: |
2679 | case VIRTCHNL2_PROTO_HDR_IKE: |
2680 | case VIRTCHNL2_PROTO_HDR_NATT_KEEP: |
2681 | case VIRTCHNL2_PROTO_HDR_L2TPV2: |
2682 | case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL: |
2683 | case VIRTCHNL2_PROTO_HDR_L2TPV3: |
2684 | case VIRTCHNL2_PROTO_HDR_GTP: |
2685 | case VIRTCHNL2_PROTO_HDR_GTP_EH: |
2686 | case VIRTCHNL2_PROTO_HDR_GTPCV2: |
2687 | case VIRTCHNL2_PROTO_HDR_GTPC_TEID: |
2688 | case VIRTCHNL2_PROTO_HDR_GTPU: |
2689 | case VIRTCHNL2_PROTO_HDR_GTPU_UL: |
2690 | case VIRTCHNL2_PROTO_HDR_GTPU_DL: |
2691 | case VIRTCHNL2_PROTO_HDR_ECPRI: |
2692 | case VIRTCHNL2_PROTO_HDR_VRRP: |
2693 | case VIRTCHNL2_PROTO_HDR_OSPF: |
2694 | case VIRTCHNL2_PROTO_HDR_TUN: |
2695 | case VIRTCHNL2_PROTO_HDR_NVGRE: |
2696 | case VIRTCHNL2_PROTO_HDR_VXLAN: |
2697 | case VIRTCHNL2_PROTO_HDR_VXLAN_GPE: |
2698 | case VIRTCHNL2_PROTO_HDR_GENEVE: |
2699 | case VIRTCHNL2_PROTO_HDR_NSH: |
2700 | case VIRTCHNL2_PROTO_HDR_QUIC: |
2701 | case VIRTCHNL2_PROTO_HDR_PFCP: |
2702 | case VIRTCHNL2_PROTO_HDR_PFCP_NODE: |
2703 | case VIRTCHNL2_PROTO_HDR_PFCP_SESSION: |
2704 | case VIRTCHNL2_PROTO_HDR_RTP: |
2705 | case VIRTCHNL2_PROTO_HDR_NO_PROTO: |
2706 | break; |
2707 | default: |
2708 | break; |
2709 | } |
2710 | } |
2711 | } |
2712 | } |
2713 | |
2714 | return 0; |
2715 | } |
2716 | |
2717 | /** |
2718 | * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback |
2719 | * message |
2720 | * @vport: virtual port data structure |
2721 | * |
2722 | * Returns 0 on success, negative on failure. |
2723 | */ |
2724 | int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport) |
2725 | { |
2726 | struct idpf_vc_xn_params xn_params = {}; |
2727 | struct virtchnl2_loopback loopback; |
2728 | ssize_t reply_sz; |
2729 | |
2730 | loopback.vport_id = cpu_to_le32(vport->vport_id); |
2731 | loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK); |
2732 | |
2733 | xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK; |
2734 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
2735 | xn_params.send_buf.iov_base = &loopback; |
2736 | xn_params.send_buf.iov_len = sizeof(loopback); |
2737 | reply_sz = idpf_vc_xn_exec(adapter: vport->adapter, params: &xn_params); |
2738 | |
2739 | return reply_sz < 0 ? reply_sz : 0; |
2740 | } |
2741 | |
2742 | /** |
2743 | * idpf_find_ctlq - Given a type and id, find ctlq info |
2744 | * @hw: hardware struct |
2745 | * @type: type of ctrlq to find |
2746 | * @id: ctlq id to find |
2747 | * |
2748 | * Returns pointer to found ctlq info struct, NULL otherwise. |
2749 | */ |
2750 | static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw, |
2751 | enum idpf_ctlq_type type, int id) |
2752 | { |
2753 | struct idpf_ctlq_info *cq, *tmp; |
2754 | |
2755 | list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) |
2756 | if (cq->q_id == id && cq->cq_type == type) |
2757 | return cq; |
2758 | |
2759 | return NULL; |
2760 | } |
2761 | |
2762 | /** |
2763 | * idpf_init_dflt_mbx - Setup default mailbox parameters and make request |
2764 | * @adapter: adapter info struct |
2765 | * |
2766 | * Returns 0 on success, negative otherwise |
2767 | */ |
2768 | int idpf_init_dflt_mbx(struct idpf_adapter *adapter) |
2769 | { |
2770 | struct idpf_ctlq_create_info ctlq_info[] = { |
2771 | { |
2772 | .type = IDPF_CTLQ_TYPE_MAILBOX_TX, |
2773 | .id = IDPF_DFLT_MBX_ID, |
2774 | .len = IDPF_DFLT_MBX_Q_LEN, |
2775 | .buf_size = IDPF_CTLQ_MAX_BUF_LEN |
2776 | }, |
2777 | { |
2778 | .type = IDPF_CTLQ_TYPE_MAILBOX_RX, |
2779 | .id = IDPF_DFLT_MBX_ID, |
2780 | .len = IDPF_DFLT_MBX_Q_LEN, |
2781 | .buf_size = IDPF_CTLQ_MAX_BUF_LEN |
2782 | } |
2783 | }; |
2784 | struct idpf_hw *hw = &adapter->hw; |
2785 | int err; |
2786 | |
2787 | adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info); |
2788 | |
2789 | err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, q_info: ctlq_info); |
2790 | if (err) |
2791 | return err; |
2792 | |
2793 | hw->asq = idpf_find_ctlq(hw, type: IDPF_CTLQ_TYPE_MAILBOX_TX, |
2794 | IDPF_DFLT_MBX_ID); |
2795 | hw->arq = idpf_find_ctlq(hw, type: IDPF_CTLQ_TYPE_MAILBOX_RX, |
2796 | IDPF_DFLT_MBX_ID); |
2797 | |
2798 | if (!hw->asq || !hw->arq) { |
2799 | idpf_ctlq_deinit(hw); |
2800 | |
2801 | return -ENOENT; |
2802 | } |
2803 | |
2804 | adapter->state = __IDPF_VER_CHECK; |
2805 | |
2806 | return 0; |
2807 | } |
2808 | |
2809 | /** |
2810 | * idpf_deinit_dflt_mbx - Free up ctlqs setup |
2811 | * @adapter: Driver specific private data structure |
2812 | */ |
2813 | void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter) |
2814 | { |
2815 | if (adapter->hw.arq && adapter->hw.asq) { |
2816 | idpf_mb_clean(adapter); |
2817 | idpf_ctlq_deinit(hw: &adapter->hw); |
2818 | } |
2819 | adapter->hw.arq = NULL; |
2820 | adapter->hw.asq = NULL; |
2821 | } |
2822 | |
2823 | /** |
2824 | * idpf_vport_params_buf_rel - Release memory for MailBox resources |
2825 | * @adapter: Driver specific private data structure |
2826 | * |
2827 | * Will release memory to hold the vport parameters received on MailBox |
2828 | */ |
2829 | static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter) |
2830 | { |
2831 | kfree(objp: adapter->vport_params_recvd); |
2832 | adapter->vport_params_recvd = NULL; |
2833 | kfree(objp: adapter->vport_params_reqd); |
2834 | adapter->vport_params_reqd = NULL; |
2835 | kfree(objp: adapter->vport_ids); |
2836 | adapter->vport_ids = NULL; |
2837 | } |
2838 | |
2839 | /** |
2840 | * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources |
2841 | * @adapter: Driver specific private data structure |
2842 | * |
2843 | * Will alloc memory to hold the vport parameters received on MailBox |
2844 | */ |
2845 | static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter) |
2846 | { |
2847 | u16 num_max_vports = idpf_get_max_vports(adapter); |
2848 | |
2849 | adapter->vport_params_reqd = kcalloc(n: num_max_vports, |
2850 | size: sizeof(*adapter->vport_params_reqd), |
2851 | GFP_KERNEL); |
2852 | if (!adapter->vport_params_reqd) |
2853 | return -ENOMEM; |
2854 | |
2855 | adapter->vport_params_recvd = kcalloc(n: num_max_vports, |
2856 | size: sizeof(*adapter->vport_params_recvd), |
2857 | GFP_KERNEL); |
2858 | if (!adapter->vport_params_recvd) |
2859 | goto err_mem; |
2860 | |
2861 | adapter->vport_ids = kcalloc(n: num_max_vports, size: sizeof(u32), GFP_KERNEL); |
2862 | if (!adapter->vport_ids) |
2863 | goto err_mem; |
2864 | |
2865 | if (adapter->vport_config) |
2866 | return 0; |
2867 | |
2868 | adapter->vport_config = kcalloc(n: num_max_vports, |
2869 | size: sizeof(*adapter->vport_config), |
2870 | GFP_KERNEL); |
2871 | if (!adapter->vport_config) |
2872 | goto err_mem; |
2873 | |
2874 | return 0; |
2875 | |
2876 | err_mem: |
2877 | idpf_vport_params_buf_rel(adapter); |
2878 | |
2879 | return -ENOMEM; |
2880 | } |
2881 | |
2882 | /** |
2883 | * idpf_vc_core_init - Initialize state machine and get driver specific |
2884 | * resources |
2885 | * @adapter: Driver specific private structure |
2886 | * |
2887 | * This function will initialize the state machine and request all necessary |
2888 | * resources required by the device driver. Once the state machine is |
2889 | * initialized, allocate memory to store vport specific information and also |
2890 | * requests required interrupts. |
2891 | * |
2892 | * Returns 0 on success, -EAGAIN function will get called again, |
2893 | * otherwise negative on failure. |
2894 | */ |
2895 | int idpf_vc_core_init(struct idpf_adapter *adapter) |
2896 | { |
2897 | int task_delay = 30; |
2898 | u16 num_max_vports; |
2899 | int err = 0; |
2900 | |
2901 | if (!adapter->vcxn_mngr) { |
2902 | adapter->vcxn_mngr = kzalloc(size: sizeof(*adapter->vcxn_mngr), GFP_KERNEL); |
2903 | if (!adapter->vcxn_mngr) { |
2904 | err = -ENOMEM; |
2905 | goto init_failed; |
2906 | } |
2907 | } |
2908 | idpf_vc_xn_init(vcxn_mngr: adapter->vcxn_mngr); |
2909 | |
2910 | while (adapter->state != __IDPF_INIT_SW) { |
2911 | switch (adapter->state) { |
2912 | case __IDPF_VER_CHECK: |
2913 | err = idpf_send_ver_msg(adapter); |
2914 | switch (err) { |
2915 | case 0: |
2916 | /* success, move state machine forward */ |
2917 | adapter->state = __IDPF_GET_CAPS; |
2918 | fallthrough; |
2919 | case -EAGAIN: |
2920 | goto restart; |
2921 | default: |
2922 | /* Something bad happened, try again but only a |
2923 | * few times. |
2924 | */ |
2925 | goto init_failed; |
2926 | } |
2927 | case __IDPF_GET_CAPS: |
2928 | err = idpf_send_get_caps_msg(adapter); |
2929 | if (err) |
2930 | goto init_failed; |
2931 | adapter->state = __IDPF_INIT_SW; |
2932 | break; |
2933 | default: |
2934 | dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n" , |
2935 | adapter->state); |
2936 | err = -EINVAL; |
2937 | goto init_failed; |
2938 | } |
2939 | break; |
2940 | restart: |
2941 | /* Give enough time before proceeding further with |
2942 | * state machine |
2943 | */ |
2944 | msleep(msecs: task_delay); |
2945 | } |
2946 | |
2947 | pci_sriov_set_totalvfs(dev: adapter->pdev, numvfs: idpf_get_max_vfs(adapter)); |
2948 | num_max_vports = idpf_get_max_vports(adapter); |
2949 | adapter->max_vports = num_max_vports; |
2950 | adapter->vports = kcalloc(n: num_max_vports, size: sizeof(*adapter->vports), |
2951 | GFP_KERNEL); |
2952 | if (!adapter->vports) |
2953 | return -ENOMEM; |
2954 | |
2955 | if (!adapter->netdevs) { |
2956 | adapter->netdevs = kcalloc(n: num_max_vports, |
2957 | size: sizeof(struct net_device *), |
2958 | GFP_KERNEL); |
2959 | if (!adapter->netdevs) { |
2960 | err = -ENOMEM; |
2961 | goto err_netdev_alloc; |
2962 | } |
2963 | } |
2964 | |
2965 | err = idpf_vport_params_buf_alloc(adapter); |
2966 | if (err) { |
2967 | dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n" , |
2968 | err); |
2969 | goto err_netdev_alloc; |
2970 | } |
2971 | |
2972 | /* Start the mailbox task before requesting vectors. This will ensure |
2973 | * vector information response from mailbox is handled |
2974 | */ |
2975 | queue_delayed_work(wq: adapter->mbx_wq, dwork: &adapter->mbx_task, delay: 0); |
2976 | |
2977 | queue_delayed_work(wq: adapter->serv_wq, dwork: &adapter->serv_task, |
2978 | delay: msecs_to_jiffies(m: 5 * (adapter->pdev->devfn & 0x07))); |
2979 | |
2980 | err = idpf_intr_req(adapter); |
2981 | if (err) { |
2982 | dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n" , |
2983 | err); |
2984 | goto err_intr_req; |
2985 | } |
2986 | |
2987 | idpf_init_avail_queues(adapter); |
2988 | |
2989 | /* Skew the delay for init tasks for each function based on fn number |
2990 | * to prevent every function from making the same call simultaneously. |
2991 | */ |
2992 | queue_delayed_work(wq: adapter->init_wq, dwork: &adapter->init_task, |
2993 | delay: msecs_to_jiffies(m: 5 * (adapter->pdev->devfn & 0x07))); |
2994 | |
2995 | set_bit(nr: IDPF_VC_CORE_INIT, addr: adapter->flags); |
2996 | |
2997 | return 0; |
2998 | |
2999 | err_intr_req: |
3000 | cancel_delayed_work_sync(dwork: &adapter->serv_task); |
3001 | cancel_delayed_work_sync(dwork: &adapter->mbx_task); |
3002 | idpf_vport_params_buf_rel(adapter); |
3003 | err_netdev_alloc: |
3004 | kfree(objp: adapter->vports); |
3005 | adapter->vports = NULL; |
3006 | return err; |
3007 | |
3008 | init_failed: |
3009 | /* Don't retry if we're trying to go down, just bail. */ |
3010 | if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) |
3011 | return err; |
3012 | |
3013 | if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) { |
3014 | dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n" ); |
3015 | |
3016 | return -EFAULT; |
3017 | } |
3018 | /* If it reached here, it is possible that mailbox queue initialization |
3019 | * register writes might not have taken effect. Retry to initialize |
3020 | * the mailbox again |
3021 | */ |
3022 | adapter->state = __IDPF_VER_CHECK; |
3023 | if (adapter->vcxn_mngr) |
3024 | idpf_vc_xn_shutdown(vcxn_mngr: adapter->vcxn_mngr); |
3025 | idpf_deinit_dflt_mbx(adapter); |
3026 | set_bit(nr: IDPF_HR_DRV_LOAD, addr: adapter->flags); |
3027 | queue_delayed_work(wq: adapter->vc_event_wq, dwork: &adapter->vc_event_task, |
3028 | delay: msecs_to_jiffies(m: task_delay)); |
3029 | |
3030 | return -EAGAIN; |
3031 | } |
3032 | |
3033 | /** |
3034 | * idpf_vc_core_deinit - Device deinit routine |
3035 | * @adapter: Driver specific private structure |
3036 | * |
3037 | */ |
3038 | void idpf_vc_core_deinit(struct idpf_adapter *adapter) |
3039 | { |
3040 | if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags)) |
3041 | return; |
3042 | |
3043 | idpf_vc_xn_shutdown(vcxn_mngr: adapter->vcxn_mngr); |
3044 | idpf_deinit_task(adapter); |
3045 | idpf_intr_rel(adapter); |
3046 | |
3047 | cancel_delayed_work_sync(dwork: &adapter->serv_task); |
3048 | cancel_delayed_work_sync(dwork: &adapter->mbx_task); |
3049 | |
3050 | idpf_vport_params_buf_rel(adapter); |
3051 | |
3052 | kfree(objp: adapter->vports); |
3053 | adapter->vports = NULL; |
3054 | |
3055 | clear_bit(nr: IDPF_VC_CORE_INIT, addr: adapter->flags); |
3056 | } |
3057 | |
3058 | /** |
3059 | * idpf_vport_alloc_vec_indexes - Get relative vector indexes |
3060 | * @vport: virtual port data struct |
3061 | * |
3062 | * This function requests the vector information required for the vport and |
3063 | * stores the vector indexes received from the 'global vector distribution' |
3064 | * in the vport's queue vectors array. |
3065 | * |
3066 | * Return 0 on success, error on failure |
3067 | */ |
3068 | int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport) |
3069 | { |
3070 | struct idpf_vector_info vec_info; |
3071 | int num_alloc_vecs; |
3072 | |
3073 | vec_info.num_curr_vecs = vport->num_q_vectors; |
3074 | vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq); |
3075 | vec_info.default_vport = vport->default_vport; |
3076 | vec_info.index = vport->idx; |
3077 | |
3078 | num_alloc_vecs = idpf_req_rel_vector_indexes(adapter: vport->adapter, |
3079 | q_vector_idxs: vport->q_vector_idxs, |
3080 | vec_info: &vec_info); |
3081 | if (num_alloc_vecs <= 0) { |
3082 | dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n" , |
3083 | num_alloc_vecs); |
3084 | return -EINVAL; |
3085 | } |
3086 | |
3087 | vport->num_q_vectors = num_alloc_vecs; |
3088 | |
3089 | return 0; |
3090 | } |
3091 | |
3092 | /** |
3093 | * idpf_vport_init - Initialize virtual port |
3094 | * @vport: virtual port to be initialized |
3095 | * @max_q: vport max queue info |
3096 | * |
3097 | * Will initialize vport with the info received through MB earlier |
3098 | */ |
3099 | void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) |
3100 | { |
3101 | struct idpf_adapter *adapter = vport->adapter; |
3102 | struct virtchnl2_create_vport *vport_msg; |
3103 | struct idpf_vport_config *vport_config; |
3104 | u16 tx_itr[] = {2, 8, 64, 128, 256}; |
3105 | u16 rx_itr[] = {2, 8, 32, 96, 128}; |
3106 | struct idpf_rss_data *; |
3107 | u16 idx = vport->idx; |
3108 | |
3109 | vport_config = adapter->vport_config[idx]; |
3110 | rss_data = &vport_config->user_config.rss_data; |
3111 | vport_msg = adapter->vport_params_recvd[idx]; |
3112 | |
3113 | vport_config->max_q.max_txq = max_q->max_txq; |
3114 | vport_config->max_q.max_rxq = max_q->max_rxq; |
3115 | vport_config->max_q.max_complq = max_q->max_complq; |
3116 | vport_config->max_q.max_bufq = max_q->max_bufq; |
3117 | |
3118 | vport->txq_model = le16_to_cpu(vport_msg->txq_model); |
3119 | vport->rxq_model = le16_to_cpu(vport_msg->rxq_model); |
3120 | vport->vport_type = le16_to_cpu(vport_msg->vport_type); |
3121 | vport->vport_id = le32_to_cpu(vport_msg->vport_id); |
3122 | |
3123 | rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN, |
3124 | le16_to_cpu(vport_msg->rss_key_size)); |
3125 | rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size); |
3126 | |
3127 | ether_addr_copy(dst: vport->default_mac_addr, src: vport_msg->default_mac_addr); |
3128 | vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD; |
3129 | |
3130 | /* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */ |
3131 | memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); |
3132 | memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); |
3133 | |
3134 | idpf_vport_set_hsplit(vport, val: ETHTOOL_TCP_DATA_SPLIT_ENABLED); |
3135 | |
3136 | idpf_vport_init_num_qs(vport, vport_msg); |
3137 | idpf_vport_calc_num_q_desc(vport); |
3138 | idpf_vport_calc_num_q_groups(vport); |
3139 | idpf_vport_alloc_vec_indexes(vport); |
3140 | |
3141 | vport->crc_enable = adapter->crc_enable; |
3142 | } |
3143 | |
3144 | /** |
3145 | * idpf_get_vec_ids - Initialize vector id from Mailbox parameters |
3146 | * @adapter: adapter structure to get the mailbox vector id |
3147 | * @vecids: Array of vector ids |
3148 | * @num_vecids: number of vector ids |
3149 | * @chunks: vector ids received over mailbox |
3150 | * |
3151 | * Will initialize the mailbox vector id which is received from the |
3152 | * get capabilities and data queue vector ids with ids received as |
3153 | * mailbox parameters. |
3154 | * Returns number of ids filled |
3155 | */ |
3156 | int idpf_get_vec_ids(struct idpf_adapter *adapter, |
3157 | u16 *vecids, int num_vecids, |
3158 | struct virtchnl2_vector_chunks *chunks) |
3159 | { |
3160 | u16 num_chunks = le16_to_cpu(chunks->num_vchunks); |
3161 | int num_vecid_filled = 0; |
3162 | int i, j; |
3163 | |
3164 | vecids[num_vecid_filled] = adapter->mb_vector.v_idx; |
3165 | num_vecid_filled++; |
3166 | |
3167 | for (j = 0; j < num_chunks; j++) { |
3168 | struct virtchnl2_vector_chunk *chunk; |
3169 | u16 start_vecid, num_vec; |
3170 | |
3171 | chunk = &chunks->vchunks[j]; |
3172 | num_vec = le16_to_cpu(chunk->num_vectors); |
3173 | start_vecid = le16_to_cpu(chunk->start_vector_id); |
3174 | |
3175 | for (i = 0; i < num_vec; i++) { |
3176 | if ((num_vecid_filled + i) < num_vecids) { |
3177 | vecids[num_vecid_filled + i] = start_vecid; |
3178 | start_vecid++; |
3179 | } else { |
3180 | break; |
3181 | } |
3182 | } |
3183 | num_vecid_filled = num_vecid_filled + i; |
3184 | } |
3185 | |
3186 | return num_vecid_filled; |
3187 | } |
3188 | |
3189 | /** |
3190 | * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters |
3191 | * @qids: Array of queue ids |
3192 | * @num_qids: number of queue ids |
3193 | * @q_type: queue model |
3194 | * @chunks: queue ids received over mailbox |
3195 | * |
3196 | * Will initialize all queue ids with ids received as mailbox parameters |
3197 | * Returns number of ids filled |
3198 | */ |
3199 | static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type, |
3200 | struct virtchnl2_queue_reg_chunks *chunks) |
3201 | { |
3202 | u16 num_chunks = le16_to_cpu(chunks->num_chunks); |
3203 | u32 num_q_id_filled = 0, i; |
3204 | u32 start_q_id, num_q; |
3205 | |
3206 | while (num_chunks--) { |
3207 | struct virtchnl2_queue_reg_chunk *chunk; |
3208 | |
3209 | chunk = &chunks->chunks[num_chunks]; |
3210 | if (le32_to_cpu(chunk->type) != q_type) |
3211 | continue; |
3212 | |
3213 | num_q = le32_to_cpu(chunk->num_queues); |
3214 | start_q_id = le32_to_cpu(chunk->start_queue_id); |
3215 | |
3216 | for (i = 0; i < num_q; i++) { |
3217 | if ((num_q_id_filled + i) < num_qids) { |
3218 | qids[num_q_id_filled + i] = start_q_id; |
3219 | start_q_id++; |
3220 | } else { |
3221 | break; |
3222 | } |
3223 | } |
3224 | num_q_id_filled = num_q_id_filled + i; |
3225 | } |
3226 | |
3227 | return num_q_id_filled; |
3228 | } |
3229 | |
3230 | /** |
3231 | * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters |
3232 | * @vport: virtual port for which the queues ids are initialized |
3233 | * @qids: queue ids |
3234 | * @num_qids: number of queue ids |
3235 | * @q_type: type of queue |
3236 | * |
3237 | * Will initialize all queue ids with ids received as mailbox |
3238 | * parameters. Returns number of queue ids initialized. |
3239 | */ |
3240 | static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, |
3241 | const u32 *qids, |
3242 | int num_qids, |
3243 | u32 q_type) |
3244 | { |
3245 | struct idpf_queue *q; |
3246 | int i, j, k = 0; |
3247 | |
3248 | switch (q_type) { |
3249 | case VIRTCHNL2_QUEUE_TYPE_TX: |
3250 | for (i = 0; i < vport->num_txq_grp; i++) { |
3251 | struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; |
3252 | |
3253 | for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) { |
3254 | tx_qgrp->txqs[j]->q_id = qids[k]; |
3255 | tx_qgrp->txqs[j]->q_type = |
3256 | VIRTCHNL2_QUEUE_TYPE_TX; |
3257 | } |
3258 | } |
3259 | break; |
3260 | case VIRTCHNL2_QUEUE_TYPE_RX: |
3261 | for (i = 0; i < vport->num_rxq_grp; i++) { |
3262 | struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; |
3263 | u16 num_rxq; |
3264 | |
3265 | if (idpf_is_queue_model_split(q_model: vport->rxq_model)) |
3266 | num_rxq = rx_qgrp->splitq.num_rxq_sets; |
3267 | else |
3268 | num_rxq = rx_qgrp->singleq.num_rxq; |
3269 | |
3270 | for (j = 0; j < num_rxq && k < num_qids; j++, k++) { |
3271 | if (idpf_is_queue_model_split(q_model: vport->rxq_model)) |
3272 | q = &rx_qgrp->splitq.rxq_sets[j]->rxq; |
3273 | else |
3274 | q = rx_qgrp->singleq.rxqs[j]; |
3275 | q->q_id = qids[k]; |
3276 | q->q_type = VIRTCHNL2_QUEUE_TYPE_RX; |
3277 | } |
3278 | } |
3279 | break; |
3280 | case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION: |
3281 | for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) { |
3282 | struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; |
3283 | |
3284 | tx_qgrp->complq->q_id = qids[k]; |
3285 | tx_qgrp->complq->q_type = |
3286 | VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; |
3287 | } |
3288 | break; |
3289 | case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: |
3290 | for (i = 0; i < vport->num_rxq_grp; i++) { |
3291 | struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; |
3292 | u8 num_bufqs = vport->num_bufqs_per_qgrp; |
3293 | |
3294 | for (j = 0; j < num_bufqs && k < num_qids; j++, k++) { |
3295 | q = &rx_qgrp->splitq.bufq_sets[j].bufq; |
3296 | q->q_id = qids[k]; |
3297 | q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; |
3298 | } |
3299 | } |
3300 | break; |
3301 | default: |
3302 | break; |
3303 | } |
3304 | |
3305 | return k; |
3306 | } |
3307 | |
3308 | /** |
3309 | * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters |
3310 | * @vport: virtual port for which the queues ids are initialized |
3311 | * |
3312 | * Will initialize all queue ids with ids received as mailbox parameters. |
3313 | * Returns 0 on success, negative if all the queues are not initialized. |
3314 | */ |
3315 | int idpf_vport_queue_ids_init(struct idpf_vport *vport) |
3316 | { |
3317 | struct virtchnl2_create_vport *vport_params; |
3318 | struct virtchnl2_queue_reg_chunks *chunks; |
3319 | struct idpf_vport_config *vport_config; |
3320 | u16 vport_idx = vport->idx; |
3321 | int num_ids, err = 0; |
3322 | u16 q_type; |
3323 | u32 *qids; |
3324 | |
3325 | vport_config = vport->adapter->vport_config[vport_idx]; |
3326 | if (vport_config->req_qs_chunks) { |
3327 | struct virtchnl2_add_queues *vc_aq = |
3328 | (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; |
3329 | chunks = &vc_aq->chunks; |
3330 | } else { |
3331 | vport_params = vport->adapter->vport_params_recvd[vport_idx]; |
3332 | chunks = &vport_params->chunks; |
3333 | } |
3334 | |
3335 | qids = kcalloc(IDPF_MAX_QIDS, size: sizeof(u32), GFP_KERNEL); |
3336 | if (!qids) |
3337 | return -ENOMEM; |
3338 | |
3339 | num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, |
3340 | q_type: VIRTCHNL2_QUEUE_TYPE_TX, |
3341 | chunks); |
3342 | if (num_ids < vport->num_txq) { |
3343 | err = -EINVAL; |
3344 | goto mem_rel; |
3345 | } |
3346 | num_ids = __idpf_vport_queue_ids_init(vport, qids, num_qids: num_ids, |
3347 | q_type: VIRTCHNL2_QUEUE_TYPE_TX); |
3348 | if (num_ids < vport->num_txq) { |
3349 | err = -EINVAL; |
3350 | goto mem_rel; |
3351 | } |
3352 | |
3353 | num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, |
3354 | q_type: VIRTCHNL2_QUEUE_TYPE_RX, |
3355 | chunks); |
3356 | if (num_ids < vport->num_rxq) { |
3357 | err = -EINVAL; |
3358 | goto mem_rel; |
3359 | } |
3360 | num_ids = __idpf_vport_queue_ids_init(vport, qids, num_qids: num_ids, |
3361 | q_type: VIRTCHNL2_QUEUE_TYPE_RX); |
3362 | if (num_ids < vport->num_rxq) { |
3363 | err = -EINVAL; |
3364 | goto mem_rel; |
3365 | } |
3366 | |
3367 | if (!idpf_is_queue_model_split(q_model: vport->txq_model)) |
3368 | goto check_rxq; |
3369 | |
3370 | q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; |
3371 | num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); |
3372 | if (num_ids < vport->num_complq) { |
3373 | err = -EINVAL; |
3374 | goto mem_rel; |
3375 | } |
3376 | num_ids = __idpf_vport_queue_ids_init(vport, qids, num_qids: num_ids, q_type); |
3377 | if (num_ids < vport->num_complq) { |
3378 | err = -EINVAL; |
3379 | goto mem_rel; |
3380 | } |
3381 | |
3382 | check_rxq: |
3383 | if (!idpf_is_queue_model_split(q_model: vport->rxq_model)) |
3384 | goto mem_rel; |
3385 | |
3386 | q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; |
3387 | num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); |
3388 | if (num_ids < vport->num_bufq) { |
3389 | err = -EINVAL; |
3390 | goto mem_rel; |
3391 | } |
3392 | num_ids = __idpf_vport_queue_ids_init(vport, qids, num_qids: num_ids, q_type); |
3393 | if (num_ids < vport->num_bufq) |
3394 | err = -EINVAL; |
3395 | |
3396 | mem_rel: |
3397 | kfree(objp: qids); |
3398 | |
3399 | return err; |
3400 | } |
3401 | |
3402 | /** |
3403 | * idpf_vport_adjust_qs - Adjust to new requested queues |
3404 | * @vport: virtual port data struct |
3405 | * |
3406 | * Renegotiate queues. Returns 0 on success, negative on failure. |
3407 | */ |
3408 | int idpf_vport_adjust_qs(struct idpf_vport *vport) |
3409 | { |
3410 | struct virtchnl2_create_vport vport_msg; |
3411 | int err; |
3412 | |
3413 | vport_msg.txq_model = cpu_to_le16(vport->txq_model); |
3414 | vport_msg.rxq_model = cpu_to_le16(vport->rxq_model); |
3415 | err = idpf_vport_calc_total_qs(adapter: vport->adapter, vport_index: vport->idx, vport_msg: &vport_msg, |
3416 | NULL); |
3417 | if (err) |
3418 | return err; |
3419 | |
3420 | idpf_vport_init_num_qs(vport, vport_msg: &vport_msg); |
3421 | idpf_vport_calc_num_q_groups(vport); |
3422 | |
3423 | return 0; |
3424 | } |
3425 | |
3426 | /** |
3427 | * idpf_is_capability_ena - Default implementation of capability checking |
3428 | * @adapter: Private data struct |
3429 | * @all: all or one flag |
3430 | * @field: caps field to check for flags |
3431 | * @flag: flag to check |
3432 | * |
3433 | * Return true if all capabilities are supported, false otherwise |
3434 | */ |
3435 | bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, |
3436 | enum idpf_cap_field field, u64 flag) |
3437 | { |
3438 | u8 *caps = (u8 *)&adapter->caps; |
3439 | u32 *cap_field; |
3440 | |
3441 | if (!caps) |
3442 | return false; |
3443 | |
3444 | if (field == IDPF_BASE_CAPS) |
3445 | return false; |
3446 | |
3447 | cap_field = (u32 *)(caps + field); |
3448 | |
3449 | if (all) |
3450 | return (*cap_field & flag) == flag; |
3451 | else |
3452 | return !!(*cap_field & flag); |
3453 | } |
3454 | |
3455 | /** |
3456 | * idpf_get_vport_id: Get vport id |
3457 | * @vport: virtual port structure |
3458 | * |
3459 | * Return vport id from the adapter persistent data |
3460 | */ |
3461 | u32 idpf_get_vport_id(struct idpf_vport *vport) |
3462 | { |
3463 | struct virtchnl2_create_vport *vport_msg; |
3464 | |
3465 | vport_msg = vport->adapter->vport_params_recvd[vport->idx]; |
3466 | |
3467 | return le32_to_cpu(vport_msg->vport_id); |
3468 | } |
3469 | |
3470 | /** |
3471 | * idpf_mac_filter_async_handler - Async callback for mac filters |
3472 | * @adapter: private data struct |
3473 | * @xn: transaction for message |
3474 | * @ctlq_msg: received message |
3475 | * |
3476 | * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is |
3477 | * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult |
3478 | * situation to deal with errors returned on the reply. The best we can |
3479 | * ultimately do is remove it from our list of mac filters and report the |
3480 | * error. |
3481 | */ |
3482 | static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter, |
3483 | struct idpf_vc_xn *xn, |
3484 | const struct idpf_ctlq_msg *ctlq_msg) |
3485 | { |
3486 | struct virtchnl2_mac_addr_list *ma_list; |
3487 | struct idpf_vport_config *vport_config; |
3488 | struct virtchnl2_mac_addr *mac_addr; |
3489 | struct idpf_mac_filter *f, *tmp; |
3490 | struct list_head *ma_list_head; |
3491 | struct idpf_vport *vport; |
3492 | u16 num_entries; |
3493 | int i; |
3494 | |
3495 | /* if success we're done, we're only here if something bad happened */ |
3496 | if (!ctlq_msg->cookie.mbx.chnl_retval) |
3497 | return 0; |
3498 | |
3499 | /* make sure at least struct is there */ |
3500 | if (xn->reply_sz < sizeof(*ma_list)) |
3501 | goto invalid_payload; |
3502 | |
3503 | ma_list = ctlq_msg->ctx.indirect.payload->va; |
3504 | mac_addr = ma_list->mac_addr_list; |
3505 | num_entries = le16_to_cpu(ma_list->num_mac_addr); |
3506 | /* we should have received a buffer at least this big */ |
3507 | if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries)) |
3508 | goto invalid_payload; |
3509 | |
3510 | vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id)); |
3511 | if (!vport) |
3512 | goto invalid_payload; |
3513 | |
3514 | vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)]; |
3515 | ma_list_head = &vport_config->user_config.mac_filter_list; |
3516 | |
3517 | /* We can't do much to reconcile bad filters at this point, however we |
3518 | * should at least remove them from our list one way or the other so we |
3519 | * have some idea what good filters we have. |
3520 | */ |
3521 | spin_lock_bh(lock: &vport_config->mac_filter_list_lock); |
3522 | list_for_each_entry_safe(f, tmp, ma_list_head, list) |
3523 | for (i = 0; i < num_entries; i++) |
3524 | if (ether_addr_equal(addr1: mac_addr[i].addr, addr2: f->macaddr)) |
3525 | list_del(entry: &f->list); |
3526 | spin_unlock_bh(lock: &vport_config->mac_filter_list_lock); |
3527 | dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n" , |
3528 | xn->vc_op); |
3529 | |
3530 | return 0; |
3531 | |
3532 | invalid_payload: |
3533 | dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n" , |
3534 | xn->vc_op, xn->reply_sz); |
3535 | |
3536 | return -EINVAL; |
3537 | } |
3538 | |
3539 | /** |
3540 | * idpf_add_del_mac_filters - Add/del mac filters |
3541 | * @vport: Virtual port data structure |
3542 | * @np: Netdev private structure |
3543 | * @add: Add or delete flag |
3544 | * @async: Don't wait for return message |
3545 | * |
3546 | * Returns 0 on success, error on failure. |
3547 | **/ |
3548 | int idpf_add_del_mac_filters(struct idpf_vport *vport, |
3549 | struct idpf_netdev_priv *np, |
3550 | bool add, bool async) |
3551 | { |
3552 | struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL; |
3553 | struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL; |
3554 | struct idpf_adapter *adapter = np->adapter; |
3555 | struct idpf_vc_xn_params xn_params = {}; |
3556 | struct idpf_vport_config *vport_config; |
3557 | u32 num_msgs, total_filters = 0; |
3558 | struct idpf_mac_filter *f; |
3559 | ssize_t reply_sz; |
3560 | int i = 0, k; |
3561 | |
3562 | xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR : |
3563 | VIRTCHNL2_OP_DEL_MAC_ADDR; |
3564 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
3565 | xn_params.async = async; |
3566 | xn_params.async_handler = idpf_mac_filter_async_handler; |
3567 | |
3568 | vport_config = adapter->vport_config[np->vport_idx]; |
3569 | spin_lock_bh(lock: &vport_config->mac_filter_list_lock); |
3570 | |
3571 | /* Find the number of newly added filters */ |
3572 | list_for_each_entry(f, &vport_config->user_config.mac_filter_list, |
3573 | list) { |
3574 | if (add && f->add) |
3575 | total_filters++; |
3576 | else if (!add && f->remove) |
3577 | total_filters++; |
3578 | } |
3579 | |
3580 | if (!total_filters) { |
3581 | spin_unlock_bh(lock: &vport_config->mac_filter_list_lock); |
3582 | |
3583 | return 0; |
3584 | } |
3585 | |
3586 | /* Fill all the new filters into virtchannel message */ |
3587 | mac_addr = kcalloc(n: total_filters, size: sizeof(struct virtchnl2_mac_addr), |
3588 | GFP_ATOMIC); |
3589 | if (!mac_addr) { |
3590 | spin_unlock_bh(lock: &vport_config->mac_filter_list_lock); |
3591 | |
3592 | return -ENOMEM; |
3593 | } |
3594 | |
3595 | list_for_each_entry(f, &vport_config->user_config.mac_filter_list, |
3596 | list) { |
3597 | if (add && f->add) { |
3598 | ether_addr_copy(dst: mac_addr[i].addr, src: f->macaddr); |
3599 | i++; |
3600 | f->add = false; |
3601 | if (i == total_filters) |
3602 | break; |
3603 | } |
3604 | if (!add && f->remove) { |
3605 | ether_addr_copy(dst: mac_addr[i].addr, src: f->macaddr); |
3606 | i++; |
3607 | f->remove = false; |
3608 | if (i == total_filters) |
3609 | break; |
3610 | } |
3611 | } |
3612 | |
3613 | spin_unlock_bh(lock: &vport_config->mac_filter_list_lock); |
3614 | |
3615 | /* Chunk up the filters into multiple messages to avoid |
3616 | * sending a control queue message buffer that is too large |
3617 | */ |
3618 | num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG); |
3619 | |
3620 | for (i = 0, k = 0; i < num_msgs; i++) { |
3621 | u32 entries_size, buf_size, num_entries; |
3622 | |
3623 | num_entries = min_t(u32, total_filters, |
3624 | IDPF_NUM_FILTERS_PER_MSG); |
3625 | entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries; |
3626 | buf_size = struct_size(ma_list, mac_addr_list, num_entries); |
3627 | |
3628 | if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) { |
3629 | kfree(objp: ma_list); |
3630 | ma_list = kzalloc(size: buf_size, GFP_ATOMIC); |
3631 | if (!ma_list) |
3632 | return -ENOMEM; |
3633 | } else { |
3634 | memset(ma_list, 0, buf_size); |
3635 | } |
3636 | |
3637 | ma_list->vport_id = cpu_to_le32(np->vport_id); |
3638 | ma_list->num_mac_addr = cpu_to_le16(num_entries); |
3639 | memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size); |
3640 | |
3641 | xn_params.send_buf.iov_base = ma_list; |
3642 | xn_params.send_buf.iov_len = buf_size; |
3643 | reply_sz = idpf_vc_xn_exec(adapter, params: &xn_params); |
3644 | if (reply_sz < 0) |
3645 | return reply_sz; |
3646 | |
3647 | k += num_entries; |
3648 | total_filters -= num_entries; |
3649 | } |
3650 | |
3651 | return 0; |
3652 | } |
3653 | |
3654 | /** |
3655 | * idpf_set_promiscuous - set promiscuous and send message to mailbox |
3656 | * @adapter: Driver specific private structure |
3657 | * @config_data: Vport specific config data |
3658 | * @vport_id: Vport identifier |
3659 | * |
3660 | * Request to enable promiscuous mode for the vport. Message is sent |
3661 | * asynchronously and won't wait for response. Returns 0 on success, negative |
3662 | * on failure; |
3663 | */ |
3664 | int idpf_set_promiscuous(struct idpf_adapter *adapter, |
3665 | struct idpf_vport_user_config_data *config_data, |
3666 | u32 vport_id) |
3667 | { |
3668 | struct idpf_vc_xn_params xn_params = {}; |
3669 | struct virtchnl2_promisc_info vpi; |
3670 | ssize_t reply_sz; |
3671 | u16 flags = 0; |
3672 | |
3673 | if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags)) |
3674 | flags |= VIRTCHNL2_UNICAST_PROMISC; |
3675 | if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags)) |
3676 | flags |= VIRTCHNL2_MULTICAST_PROMISC; |
3677 | |
3678 | vpi.vport_id = cpu_to_le32(vport_id); |
3679 | vpi.flags = cpu_to_le16(flags); |
3680 | |
3681 | xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE; |
3682 | xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; |
3683 | xn_params.send_buf.iov_base = &vpi; |
3684 | xn_params.send_buf.iov_len = sizeof(vpi); |
3685 | /* setting promiscuous is only ever done asynchronously */ |
3686 | xn_params.async = true; |
3687 | reply_sz = idpf_vc_xn_exec(adapter, params: &xn_params); |
3688 | |
3689 | return reply_sz < 0 ? reply_sz : 0; |
3690 | } |
3691 | |