1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. |
4 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. |
5 | */ |
6 | |
7 | #include <linux/kernel.h> |
8 | #include <linux/errno.h> |
9 | #include <linux/types.h> |
10 | #include <linux/pci.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/if_ether.h> |
13 | |
14 | #include "vnic_resource.h" |
15 | #include "vnic_devcmd.h" |
16 | #include "vnic_dev.h" |
17 | #include "vnic_wq.h" |
18 | #include "vnic_stats.h" |
19 | #include "enic.h" |
20 | |
21 | #define VNIC_MAX_RES_HDR_SIZE \ |
22 | (sizeof(struct vnic_resource_header) + \ |
23 | sizeof(struct vnic_resource) * RES_TYPE_MAX) |
24 | #define VNIC_RES_STRIDE 128 |
25 | |
26 | void *vnic_dev_priv(struct vnic_dev *vdev) |
27 | { |
28 | return vdev->priv; |
29 | } |
30 | |
31 | static int vnic_dev_discover_res(struct vnic_dev *vdev, |
32 | struct vnic_dev_bar *bar, unsigned int num_bars) |
33 | { |
34 | struct vnic_resource_header __iomem *rh; |
35 | struct mgmt_barmap_hdr __iomem *mrh; |
36 | struct vnic_resource __iomem *r; |
37 | u8 type; |
38 | |
39 | if (num_bars == 0) |
40 | return -EINVAL; |
41 | |
42 | if (bar->len < VNIC_MAX_RES_HDR_SIZE) { |
43 | vdev_err(vdev, "vNIC BAR0 res hdr length error\n" ); |
44 | return -EINVAL; |
45 | } |
46 | |
47 | rh = bar->vaddr; |
48 | mrh = bar->vaddr; |
49 | if (!rh) { |
50 | vdev_err(vdev, "vNIC BAR0 res hdr not mem-mapped\n" ); |
51 | return -EINVAL; |
52 | } |
53 | |
54 | /* Check for mgmt vnic in addition to normal vnic */ |
55 | if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) || |
56 | (ioread32(&rh->version) != VNIC_RES_VERSION)) { |
57 | if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || |
58 | (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { |
59 | vdev_err(vdev, "vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n" , |
60 | VNIC_RES_MAGIC, VNIC_RES_VERSION, |
61 | MGMTVNIC_MAGIC, MGMTVNIC_VERSION, |
62 | ioread32(&rh->magic), ioread32(&rh->version)); |
63 | return -EINVAL; |
64 | } |
65 | } |
66 | |
67 | if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC) |
68 | r = (struct vnic_resource __iomem *)(mrh + 1); |
69 | else |
70 | r = (struct vnic_resource __iomem *)(rh + 1); |
71 | |
72 | |
73 | while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { |
74 | |
75 | u8 bar_num = ioread8(&r->bar); |
76 | u32 bar_offset = ioread32(&r->bar_offset); |
77 | u32 count = ioread32(&r->count); |
78 | u32 len; |
79 | |
80 | r++; |
81 | |
82 | if (bar_num >= num_bars) |
83 | continue; |
84 | |
85 | if (!bar[bar_num].len || !bar[bar_num].vaddr) |
86 | continue; |
87 | |
88 | switch (type) { |
89 | case RES_TYPE_WQ: |
90 | case RES_TYPE_RQ: |
91 | case RES_TYPE_CQ: |
92 | case RES_TYPE_INTR_CTRL: |
93 | /* each count is stride bytes long */ |
94 | len = count * VNIC_RES_STRIDE; |
95 | if (len + bar_offset > bar[bar_num].len) { |
96 | vdev_err(vdev, "vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n" , |
97 | type, bar_offset, len, |
98 | bar[bar_num].len); |
99 | return -EINVAL; |
100 | } |
101 | break; |
102 | case RES_TYPE_INTR_PBA_LEGACY: |
103 | case RES_TYPE_DEVCMD: |
104 | case RES_TYPE_DEVCMD2: |
105 | len = count; |
106 | break; |
107 | default: |
108 | continue; |
109 | } |
110 | |
111 | vdev->res[type].count = count; |
112 | vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr + |
113 | bar_offset; |
114 | vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset; |
115 | } |
116 | |
117 | return 0; |
118 | } |
119 | |
120 | unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, |
121 | enum vnic_res_type type) |
122 | { |
123 | return vdev->res[type].count; |
124 | } |
125 | EXPORT_SYMBOL(vnic_dev_get_res_count); |
126 | |
127 | void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, |
128 | unsigned int index) |
129 | { |
130 | if (!vdev->res[type].vaddr) |
131 | return NULL; |
132 | |
133 | switch (type) { |
134 | case RES_TYPE_WQ: |
135 | case RES_TYPE_RQ: |
136 | case RES_TYPE_CQ: |
137 | case RES_TYPE_INTR_CTRL: |
138 | return (char __iomem *)vdev->res[type].vaddr + |
139 | index * VNIC_RES_STRIDE; |
140 | default: |
141 | return (char __iomem *)vdev->res[type].vaddr; |
142 | } |
143 | } |
144 | EXPORT_SYMBOL(vnic_dev_get_res); |
145 | |
146 | static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, |
147 | unsigned int desc_count, unsigned int desc_size) |
148 | { |
149 | /* The base address of the desc rings must be 512 byte aligned. |
150 | * Descriptor count is aligned to groups of 32 descriptors. A |
151 | * count of 0 means the maximum 4096 descriptors. Descriptor |
152 | * size is aligned to 16 bytes. |
153 | */ |
154 | |
155 | unsigned int count_align = 32; |
156 | unsigned int desc_align = 16; |
157 | |
158 | ring->base_align = 512; |
159 | |
160 | if (desc_count == 0) |
161 | desc_count = 4096; |
162 | |
163 | ring->desc_count = ALIGN(desc_count, count_align); |
164 | |
165 | ring->desc_size = ALIGN(desc_size, desc_align); |
166 | |
167 | ring->size = ring->desc_count * ring->desc_size; |
168 | ring->size_unaligned = ring->size + ring->base_align; |
169 | |
170 | return ring->size_unaligned; |
171 | } |
172 | |
173 | void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) |
174 | { |
175 | memset(ring->descs, 0, ring->size); |
176 | } |
177 | |
178 | int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, |
179 | unsigned int desc_count, unsigned int desc_size) |
180 | { |
181 | vnic_dev_desc_ring_size(ring, desc_count, desc_size); |
182 | |
183 | ring->descs_unaligned = dma_alloc_coherent(dev: &vdev->pdev->dev, |
184 | size: ring->size_unaligned, |
185 | dma_handle: &ring->base_addr_unaligned, |
186 | GFP_KERNEL); |
187 | |
188 | if (!ring->descs_unaligned) { |
189 | vdev_err(vdev, "Failed to allocate ring (size=%d), aborting\n" , |
190 | (int)ring->size); |
191 | return -ENOMEM; |
192 | } |
193 | |
194 | ring->base_addr = ALIGN(ring->base_addr_unaligned, |
195 | ring->base_align); |
196 | ring->descs = (u8 *)ring->descs_unaligned + |
197 | (ring->base_addr - ring->base_addr_unaligned); |
198 | |
199 | vnic_dev_clear_desc_ring(ring); |
200 | |
201 | ring->desc_avail = ring->desc_count - 1; |
202 | |
203 | return 0; |
204 | } |
205 | |
206 | void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) |
207 | { |
208 | if (ring->descs) { |
209 | dma_free_coherent(dev: &vdev->pdev->dev, size: ring->size_unaligned, |
210 | cpu_addr: ring->descs_unaligned, |
211 | dma_handle: ring->base_addr_unaligned); |
212 | ring->descs = NULL; |
213 | } |
214 | } |
215 | |
216 | static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, |
217 | int wait) |
218 | { |
219 | struct vnic_devcmd __iomem *devcmd = vdev->devcmd; |
220 | unsigned int i; |
221 | int delay; |
222 | u32 status; |
223 | int err; |
224 | |
225 | status = ioread32(&devcmd->status); |
226 | if (status == 0xFFFFFFFF) { |
227 | /* PCI-e target device is gone */ |
228 | return -ENODEV; |
229 | } |
230 | if (status & STAT_BUSY) { |
231 | vdev_neterr(vdev, "Busy devcmd %d\n" , _CMD_N(cmd)); |
232 | return -EBUSY; |
233 | } |
234 | |
235 | if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { |
236 | for (i = 0; i < VNIC_DEVCMD_NARGS; i++) |
237 | writeq(val: vdev->args[i], addr: &devcmd->args[i]); |
238 | wmb(); |
239 | } |
240 | |
241 | iowrite32(cmd, &devcmd->cmd); |
242 | |
243 | if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) |
244 | return 0; |
245 | |
246 | for (delay = 0; delay < wait; delay++) { |
247 | |
248 | udelay(100); |
249 | |
250 | status = ioread32(&devcmd->status); |
251 | if (status == 0xFFFFFFFF) { |
252 | /* PCI-e target device is gone */ |
253 | return -ENODEV; |
254 | } |
255 | |
256 | if (!(status & STAT_BUSY)) { |
257 | |
258 | if (status & STAT_ERROR) { |
259 | err = (int)readq(addr: &devcmd->args[0]); |
260 | if (err == ERR_EINVAL && |
261 | cmd == CMD_CAPABILITY) |
262 | return -err; |
263 | if (err != ERR_ECMDUNKNOWN || |
264 | cmd != CMD_CAPABILITY) |
265 | vdev_neterr(vdev, "Error %d devcmd %d\n" , |
266 | err, _CMD_N(cmd)); |
267 | return -err; |
268 | } |
269 | |
270 | if (_CMD_DIR(cmd) & _CMD_DIR_READ) { |
271 | rmb(); |
272 | for (i = 0; i < VNIC_DEVCMD_NARGS; i++) |
273 | vdev->args[i] = readq(addr: &devcmd->args[i]); |
274 | } |
275 | |
276 | return 0; |
277 | } |
278 | } |
279 | |
280 | vdev_neterr(vdev, "Timedout devcmd %d\n" , _CMD_N(cmd)); |
281 | return -ETIMEDOUT; |
282 | } |
283 | |
284 | static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, |
285 | int wait) |
286 | { |
287 | struct devcmd2_controller *dc2c = vdev->devcmd2; |
288 | struct devcmd2_result *result; |
289 | u8 color; |
290 | unsigned int i; |
291 | int delay, err; |
292 | u32 fetch_index, new_posted; |
293 | u32 posted = dc2c->posted; |
294 | |
295 | fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index); |
296 | |
297 | if (fetch_index == 0xFFFFFFFF) |
298 | return -ENODEV; |
299 | |
300 | new_posted = (posted + 1) % DEVCMD2_RING_SIZE; |
301 | |
302 | if (new_posted == fetch_index) { |
303 | vdev_neterr(vdev, "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n" , |
304 | _CMD_N(cmd), fetch_index, posted); |
305 | return -EBUSY; |
306 | } |
307 | dc2c->cmd_ring[posted].cmd = cmd; |
308 | dc2c->cmd_ring[posted].flags = 0; |
309 | |
310 | if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) |
311 | dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT; |
312 | if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) |
313 | for (i = 0; i < VNIC_DEVCMD_NARGS; i++) |
314 | dc2c->cmd_ring[posted].args[i] = vdev->args[i]; |
315 | |
316 | /* Adding write memory barrier prevents compiler and/or CPU reordering, |
317 | * thus avoiding descriptor posting before descriptor is initialized. |
318 | * Otherwise, hardware can read stale descriptor fields. |
319 | */ |
320 | wmb(); |
321 | iowrite32(new_posted, &dc2c->wq_ctrl->posted_index); |
322 | dc2c->posted = new_posted; |
323 | |
324 | if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) |
325 | return 0; |
326 | |
327 | result = dc2c->result + dc2c->next_result; |
328 | color = dc2c->color; |
329 | |
330 | dc2c->next_result++; |
331 | if (dc2c->next_result == dc2c->result_size) { |
332 | dc2c->next_result = 0; |
333 | dc2c->color = dc2c->color ? 0 : 1; |
334 | } |
335 | |
336 | for (delay = 0; delay < wait; delay++) { |
337 | if (result->color == color) { |
338 | if (result->error) { |
339 | err = result->error; |
340 | if (err != ERR_ECMDUNKNOWN || |
341 | cmd != CMD_CAPABILITY) |
342 | vdev_neterr(vdev, "Error %d devcmd %d\n" , |
343 | err, _CMD_N(cmd)); |
344 | return -err; |
345 | } |
346 | if (_CMD_DIR(cmd) & _CMD_DIR_READ) |
347 | for (i = 0; i < VNIC_DEVCMD2_NARGS; i++) |
348 | vdev->args[i] = result->results[i]; |
349 | |
350 | return 0; |
351 | } |
352 | udelay(100); |
353 | } |
354 | |
355 | vdev_neterr(vdev, "devcmd %d timed out\n" , _CMD_N(cmd)); |
356 | |
357 | return -ETIMEDOUT; |
358 | } |
359 | |
360 | static int vnic_dev_init_devcmd1(struct vnic_dev *vdev) |
361 | { |
362 | vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); |
363 | if (!vdev->devcmd) |
364 | return -ENODEV; |
365 | vdev->devcmd_rtn = _vnic_dev_cmd; |
366 | |
367 | return 0; |
368 | } |
369 | |
370 | static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) |
371 | { |
372 | int err; |
373 | unsigned int fetch_index; |
374 | |
375 | if (vdev->devcmd2) |
376 | return 0; |
377 | |
378 | vdev->devcmd2 = kzalloc(size: sizeof(*vdev->devcmd2), GFP_KERNEL); |
379 | if (!vdev->devcmd2) |
380 | return -ENOMEM; |
381 | |
382 | vdev->devcmd2->color = 1; |
383 | vdev->devcmd2->result_size = DEVCMD2_RING_SIZE; |
384 | err = enic_wq_devcmd2_alloc(vdev, wq: &vdev->devcmd2->wq, DEVCMD2_RING_SIZE, |
385 | DEVCMD2_DESC_SIZE); |
386 | if (err) |
387 | goto err_free_devcmd2; |
388 | |
389 | fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); |
390 | if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ |
391 | vdev_err(vdev, "Fatal error in devcmd2 init - hardware surprise removal\n" ); |
392 | err = -ENODEV; |
393 | goto err_free_wq; |
394 | } |
395 | |
396 | enic_wq_init_start(wq: &vdev->devcmd2->wq, cq_index: 0, fetch_index, posted_index: fetch_index, error_interrupt_enable: 0, |
397 | error_interrupt_offset: 0); |
398 | vdev->devcmd2->posted = fetch_index; |
399 | vnic_wq_enable(wq: &vdev->devcmd2->wq); |
400 | |
401 | err = vnic_dev_alloc_desc_ring(vdev, ring: &vdev->devcmd2->results_ring, |
402 | DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); |
403 | if (err) |
404 | goto err_disable_wq; |
405 | |
406 | vdev->devcmd2->result = vdev->devcmd2->results_ring.descs; |
407 | vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs; |
408 | vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl; |
409 | vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr | |
410 | VNIC_PADDR_TARGET; |
411 | vdev->args[1] = DEVCMD2_RING_SIZE; |
412 | |
413 | err = _vnic_dev_cmd2(vdev, cmd: CMD_INITIALIZE_DEVCMD2, wait: 1000); |
414 | if (err) |
415 | goto err_free_desc_ring; |
416 | |
417 | vdev->devcmd_rtn = _vnic_dev_cmd2; |
418 | |
419 | return 0; |
420 | |
421 | err_free_desc_ring: |
422 | vnic_dev_free_desc_ring(vdev, ring: &vdev->devcmd2->results_ring); |
423 | err_disable_wq: |
424 | vnic_wq_disable(wq: &vdev->devcmd2->wq); |
425 | err_free_wq: |
426 | vnic_wq_free(wq: &vdev->devcmd2->wq); |
427 | err_free_devcmd2: |
428 | kfree(objp: vdev->devcmd2); |
429 | vdev->devcmd2 = NULL; |
430 | |
431 | return err; |
432 | } |
433 | |
434 | static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev) |
435 | { |
436 | vnic_dev_free_desc_ring(vdev, ring: &vdev->devcmd2->results_ring); |
437 | vnic_wq_disable(wq: &vdev->devcmd2->wq); |
438 | vnic_wq_free(wq: &vdev->devcmd2->wq); |
439 | kfree(objp: vdev->devcmd2); |
440 | } |
441 | |
442 | static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, |
443 | enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, |
444 | u64 *a0, u64 *a1, int wait) |
445 | { |
446 | u32 status; |
447 | int err; |
448 | |
449 | memset(vdev->args, 0, sizeof(vdev->args)); |
450 | |
451 | vdev->args[0] = vdev->proxy_index; |
452 | vdev->args[1] = cmd; |
453 | vdev->args[2] = *a0; |
454 | vdev->args[3] = *a1; |
455 | |
456 | err = vdev->devcmd_rtn(vdev, proxy_cmd, wait); |
457 | if (err) |
458 | return err; |
459 | |
460 | status = (u32)vdev->args[0]; |
461 | if (status & STAT_ERROR) { |
462 | err = (int)vdev->args[1]; |
463 | if (err != ERR_ECMDUNKNOWN || |
464 | cmd != CMD_CAPABILITY) |
465 | vdev_neterr(vdev, "Error %d proxy devcmd %d\n" , |
466 | err, _CMD_N(cmd)); |
467 | return err; |
468 | } |
469 | |
470 | *a0 = vdev->args[1]; |
471 | *a1 = vdev->args[2]; |
472 | |
473 | return 0; |
474 | } |
475 | |
476 | static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, |
477 | enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) |
478 | { |
479 | int err; |
480 | |
481 | vdev->args[0] = *a0; |
482 | vdev->args[1] = *a1; |
483 | |
484 | err = vdev->devcmd_rtn(vdev, cmd, wait); |
485 | |
486 | *a0 = vdev->args[0]; |
487 | *a1 = vdev->args[1]; |
488 | |
489 | return err; |
490 | } |
491 | |
492 | void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index) |
493 | { |
494 | vdev->proxy = PROXY_BY_INDEX; |
495 | vdev->proxy_index = index; |
496 | } |
497 | |
498 | void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev) |
499 | { |
500 | vdev->proxy = PROXY_NONE; |
501 | vdev->proxy_index = 0; |
502 | } |
503 | |
504 | int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, |
505 | u64 *a0, u64 *a1, int wait) |
506 | { |
507 | memset(vdev->args, 0, sizeof(vdev->args)); |
508 | |
509 | switch (vdev->proxy) { |
510 | case PROXY_BY_INDEX: |
511 | return vnic_dev_cmd_proxy(vdev, proxy_cmd: CMD_PROXY_BY_INDEX, cmd, |
512 | a0, a1, wait); |
513 | case PROXY_BY_BDF: |
514 | return vnic_dev_cmd_proxy(vdev, proxy_cmd: CMD_PROXY_BY_BDF, cmd, |
515 | a0, a1, wait); |
516 | case PROXY_NONE: |
517 | default: |
518 | return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait); |
519 | } |
520 | } |
521 | |
522 | static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) |
523 | { |
524 | u64 a0 = (u32)cmd, a1 = 0; |
525 | int wait = 1000; |
526 | int err; |
527 | |
528 | err = vnic_dev_cmd(vdev, cmd: CMD_CAPABILITY, a0: &a0, a1: &a1, wait); |
529 | |
530 | return !(err || a0); |
531 | } |
532 | |
533 | int vnic_dev_fw_info(struct vnic_dev *vdev, |
534 | struct vnic_devcmd_fw_info **fw_info) |
535 | { |
536 | u64 a0, a1 = 0; |
537 | int wait = 1000; |
538 | int err = 0; |
539 | |
540 | if (!vdev->fw_info) { |
541 | vdev->fw_info = dma_alloc_coherent(dev: &vdev->pdev->dev, |
542 | size: sizeof(struct vnic_devcmd_fw_info), |
543 | dma_handle: &vdev->fw_info_pa, GFP_ATOMIC); |
544 | if (!vdev->fw_info) |
545 | return -ENOMEM; |
546 | |
547 | a0 = vdev->fw_info_pa; |
548 | a1 = sizeof(struct vnic_devcmd_fw_info); |
549 | |
550 | /* only get fw_info once and cache it */ |
551 | if (vnic_dev_capable(vdev, cmd: CMD_MCPU_FW_INFO)) |
552 | err = vnic_dev_cmd(vdev, cmd: CMD_MCPU_FW_INFO, |
553 | a0: &a0, a1: &a1, wait); |
554 | else |
555 | err = vnic_dev_cmd(vdev, cmd: CMD_MCPU_FW_INFO_OLD, |
556 | a0: &a0, a1: &a1, wait); |
557 | } |
558 | |
559 | *fw_info = vdev->fw_info; |
560 | |
561 | return err; |
562 | } |
563 | |
564 | int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, |
565 | void *value) |
566 | { |
567 | u64 a0, a1; |
568 | int wait = 1000; |
569 | int err; |
570 | |
571 | a0 = offset; |
572 | a1 = size; |
573 | |
574 | err = vnic_dev_cmd(vdev, cmd: CMD_DEV_SPEC, a0: &a0, a1: &a1, wait); |
575 | |
576 | switch (size) { |
577 | case 1: *(u8 *)value = (u8)a0; break; |
578 | case 2: *(u16 *)value = (u16)a0; break; |
579 | case 4: *(u32 *)value = (u32)a0; break; |
580 | case 8: *(u64 *)value = a0; break; |
581 | default: BUG(); break; |
582 | } |
583 | |
584 | return err; |
585 | } |
586 | |
587 | int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) |
588 | { |
589 | u64 a0, a1; |
590 | int wait = 1000; |
591 | |
592 | if (!vdev->stats) { |
593 | vdev->stats = dma_alloc_coherent(dev: &vdev->pdev->dev, |
594 | size: sizeof(struct vnic_stats), |
595 | dma_handle: &vdev->stats_pa, GFP_ATOMIC); |
596 | if (!vdev->stats) |
597 | return -ENOMEM; |
598 | } |
599 | |
600 | *stats = vdev->stats; |
601 | a0 = vdev->stats_pa; |
602 | a1 = sizeof(struct vnic_stats); |
603 | |
604 | return vnic_dev_cmd(vdev, cmd: CMD_STATS_DUMP, a0: &a0, a1: &a1, wait); |
605 | } |
606 | |
607 | int vnic_dev_close(struct vnic_dev *vdev) |
608 | { |
609 | u64 a0 = 0, a1 = 0; |
610 | int wait = 1000; |
611 | return vnic_dev_cmd(vdev, cmd: CMD_CLOSE, a0: &a0, a1: &a1, wait); |
612 | } |
613 | |
614 | int vnic_dev_enable_wait(struct vnic_dev *vdev) |
615 | { |
616 | u64 a0 = 0, a1 = 0; |
617 | int wait = 1000; |
618 | |
619 | if (vnic_dev_capable(vdev, cmd: CMD_ENABLE_WAIT)) |
620 | return vnic_dev_cmd(vdev, cmd: CMD_ENABLE_WAIT, a0: &a0, a1: &a1, wait); |
621 | else |
622 | return vnic_dev_cmd(vdev, cmd: CMD_ENABLE, a0: &a0, a1: &a1, wait); |
623 | } |
624 | |
625 | int vnic_dev_disable(struct vnic_dev *vdev) |
626 | { |
627 | u64 a0 = 0, a1 = 0; |
628 | int wait = 1000; |
629 | return vnic_dev_cmd(vdev, cmd: CMD_DISABLE, a0: &a0, a1: &a1, wait); |
630 | } |
631 | |
632 | int vnic_dev_open(struct vnic_dev *vdev, int arg) |
633 | { |
634 | u64 a0 = (u32)arg, a1 = 0; |
635 | int wait = 1000; |
636 | return vnic_dev_cmd(vdev, cmd: CMD_OPEN, a0: &a0, a1: &a1, wait); |
637 | } |
638 | |
639 | int vnic_dev_open_done(struct vnic_dev *vdev, int *done) |
640 | { |
641 | u64 a0 = 0, a1 = 0; |
642 | int wait = 1000; |
643 | int err; |
644 | |
645 | *done = 0; |
646 | |
647 | err = vnic_dev_cmd(vdev, cmd: CMD_OPEN_STATUS, a0: &a0, a1: &a1, wait); |
648 | if (err) |
649 | return err; |
650 | |
651 | *done = (a0 == 0); |
652 | |
653 | return 0; |
654 | } |
655 | |
656 | int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg) |
657 | { |
658 | u64 a0 = (u32)arg, a1 = 0; |
659 | int wait = 1000; |
660 | return vnic_dev_cmd(vdev, cmd: CMD_SOFT_RESET, a0: &a0, a1: &a1, wait); |
661 | } |
662 | |
663 | int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) |
664 | { |
665 | u64 a0 = 0, a1 = 0; |
666 | int wait = 1000; |
667 | int err; |
668 | |
669 | *done = 0; |
670 | |
671 | err = vnic_dev_cmd(vdev, cmd: CMD_SOFT_RESET_STATUS, a0: &a0, a1: &a1, wait); |
672 | if (err) |
673 | return err; |
674 | |
675 | *done = (a0 == 0); |
676 | |
677 | return 0; |
678 | } |
679 | |
680 | int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg) |
681 | { |
682 | u64 a0 = (u32)arg, a1 = 0; |
683 | int wait = 1000; |
684 | int err; |
685 | |
686 | if (vnic_dev_capable(vdev, cmd: CMD_HANG_RESET)) { |
687 | return vnic_dev_cmd(vdev, cmd: CMD_HANG_RESET, |
688 | a0: &a0, a1: &a1, wait); |
689 | } else { |
690 | err = vnic_dev_soft_reset(vdev, arg); |
691 | if (err) |
692 | return err; |
693 | return vnic_dev_init(vdev, arg: 0); |
694 | } |
695 | } |
696 | |
697 | int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done) |
698 | { |
699 | u64 a0 = 0, a1 = 0; |
700 | int wait = 1000; |
701 | int err; |
702 | |
703 | *done = 0; |
704 | |
705 | if (vnic_dev_capable(vdev, cmd: CMD_HANG_RESET_STATUS)) { |
706 | err = vnic_dev_cmd(vdev, cmd: CMD_HANG_RESET_STATUS, |
707 | a0: &a0, a1: &a1, wait); |
708 | if (err) |
709 | return err; |
710 | } else { |
711 | return vnic_dev_soft_reset_done(vdev, done); |
712 | } |
713 | |
714 | *done = (a0 == 0); |
715 | |
716 | return 0; |
717 | } |
718 | |
719 | int vnic_dev_hang_notify(struct vnic_dev *vdev) |
720 | { |
721 | u64 a0, a1; |
722 | int wait = 1000; |
723 | return vnic_dev_cmd(vdev, cmd: CMD_HANG_NOTIFY, a0: &a0, a1: &a1, wait); |
724 | } |
725 | |
726 | int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) |
727 | { |
728 | u64 a0, a1; |
729 | int wait = 1000; |
730 | int err, i; |
731 | |
732 | for (i = 0; i < ETH_ALEN; i++) |
733 | mac_addr[i] = 0; |
734 | |
735 | err = vnic_dev_cmd(vdev, cmd: CMD_GET_MAC_ADDR, a0: &a0, a1: &a1, wait); |
736 | if (err) |
737 | return err; |
738 | |
739 | for (i = 0; i < ETH_ALEN; i++) |
740 | mac_addr[i] = ((u8 *)&a0)[i]; |
741 | |
742 | return 0; |
743 | } |
744 | |
745 | int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, |
746 | int broadcast, int promisc, int allmulti) |
747 | { |
748 | u64 a0, a1 = 0; |
749 | int wait = 1000; |
750 | int err; |
751 | |
752 | a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | |
753 | (multicast ? CMD_PFILTER_MULTICAST : 0) | |
754 | (broadcast ? CMD_PFILTER_BROADCAST : 0) | |
755 | (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | |
756 | (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); |
757 | |
758 | err = vnic_dev_cmd(vdev, cmd: CMD_PACKET_FILTER, a0: &a0, a1: &a1, wait); |
759 | if (err) |
760 | vdev_neterr(vdev, "Can't set packet filter\n" ); |
761 | |
762 | return err; |
763 | } |
764 | |
765 | int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr) |
766 | { |
767 | u64 a0 = 0, a1 = 0; |
768 | int wait = 1000; |
769 | int err; |
770 | int i; |
771 | |
772 | for (i = 0; i < ETH_ALEN; i++) |
773 | ((u8 *)&a0)[i] = addr[i]; |
774 | |
775 | err = vnic_dev_cmd(vdev, cmd: CMD_ADDR_ADD, a0: &a0, a1: &a1, wait); |
776 | if (err) |
777 | vdev_neterr(vdev, "Can't add addr [%pM], %d\n" , addr, err); |
778 | |
779 | return err; |
780 | } |
781 | |
782 | int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr) |
783 | { |
784 | u64 a0 = 0, a1 = 0; |
785 | int wait = 1000; |
786 | int err; |
787 | int i; |
788 | |
789 | for (i = 0; i < ETH_ALEN; i++) |
790 | ((u8 *)&a0)[i] = addr[i]; |
791 | |
792 | err = vnic_dev_cmd(vdev, cmd: CMD_ADDR_DEL, a0: &a0, a1: &a1, wait); |
793 | if (err) |
794 | vdev_neterr(vdev, "Can't del addr [%pM], %d\n" , addr, err); |
795 | |
796 | return err; |
797 | } |
798 | |
799 | int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, |
800 | u8 ig_vlan_rewrite_mode) |
801 | { |
802 | u64 a0 = ig_vlan_rewrite_mode, a1 = 0; |
803 | int wait = 1000; |
804 | |
805 | if (vnic_dev_capable(vdev, cmd: CMD_IG_VLAN_REWRITE_MODE)) |
806 | return vnic_dev_cmd(vdev, cmd: CMD_IG_VLAN_REWRITE_MODE, |
807 | a0: &a0, a1: &a1, wait); |
808 | else |
809 | return 0; |
810 | } |
811 | |
812 | static int vnic_dev_notify_setcmd(struct vnic_dev *vdev, |
813 | void *notify_addr, dma_addr_t notify_pa, u16 intr) |
814 | { |
815 | u64 a0, a1; |
816 | int wait = 1000; |
817 | int r; |
818 | |
819 | memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); |
820 | vdev->notify = notify_addr; |
821 | vdev->notify_pa = notify_pa; |
822 | |
823 | a0 = (u64)notify_pa; |
824 | a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; |
825 | a1 += sizeof(struct vnic_devcmd_notify); |
826 | |
827 | r = vnic_dev_cmd(vdev, cmd: CMD_NOTIFY, a0: &a0, a1: &a1, wait); |
828 | vdev->notify_sz = (r == 0) ? (u32)a1 : 0; |
829 | return r; |
830 | } |
831 | |
832 | int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) |
833 | { |
834 | void *notify_addr; |
835 | dma_addr_t notify_pa; |
836 | |
837 | if (vdev->notify || vdev->notify_pa) { |
838 | vdev_neterr(vdev, "notify block %p still allocated\n" , |
839 | vdev->notify); |
840 | return -EINVAL; |
841 | } |
842 | |
843 | notify_addr = dma_alloc_coherent(dev: &vdev->pdev->dev, |
844 | size: sizeof(struct vnic_devcmd_notify), |
845 | dma_handle: ¬ify_pa, GFP_ATOMIC); |
846 | if (!notify_addr) |
847 | return -ENOMEM; |
848 | |
849 | return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); |
850 | } |
851 | |
852 | static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) |
853 | { |
854 | u64 a0, a1; |
855 | int wait = 1000; |
856 | int err; |
857 | |
858 | a0 = 0; /* paddr = 0 to unset notify buffer */ |
859 | a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ |
860 | a1 += sizeof(struct vnic_devcmd_notify); |
861 | |
862 | err = vnic_dev_cmd(vdev, cmd: CMD_NOTIFY, a0: &a0, a1: &a1, wait); |
863 | vdev->notify = NULL; |
864 | vdev->notify_pa = 0; |
865 | vdev->notify_sz = 0; |
866 | |
867 | return err; |
868 | } |
869 | |
870 | int vnic_dev_notify_unset(struct vnic_dev *vdev) |
871 | { |
872 | if (vdev->notify) { |
873 | dma_free_coherent(dev: &vdev->pdev->dev, |
874 | size: sizeof(struct vnic_devcmd_notify), |
875 | cpu_addr: vdev->notify, dma_handle: vdev->notify_pa); |
876 | } |
877 | |
878 | return vnic_dev_notify_unsetcmd(vdev); |
879 | } |
880 | |
881 | static int vnic_dev_notify_ready(struct vnic_dev *vdev) |
882 | { |
883 | u32 *words; |
884 | unsigned int nwords = vdev->notify_sz / 4; |
885 | unsigned int i; |
886 | u32 csum; |
887 | |
888 | if (!vdev->notify || !vdev->notify_sz) |
889 | return 0; |
890 | |
891 | do { |
892 | csum = 0; |
893 | memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz); |
894 | words = (u32 *)&vdev->notify_copy; |
895 | for (i = 1; i < nwords; i++) |
896 | csum += words[i]; |
897 | } while (csum != words[0]); |
898 | |
899 | return 1; |
900 | } |
901 | |
902 | int vnic_dev_init(struct vnic_dev *vdev, int arg) |
903 | { |
904 | u64 a0 = (u32)arg, a1 = 0; |
905 | int wait = 1000; |
906 | int r = 0; |
907 | |
908 | if (vnic_dev_capable(vdev, cmd: CMD_INIT)) |
909 | r = vnic_dev_cmd(vdev, cmd: CMD_INIT, a0: &a0, a1: &a1, wait); |
910 | else { |
911 | vnic_dev_cmd(vdev, cmd: CMD_INIT_v1, a0: &a0, a1: &a1, wait); |
912 | if (a0 & CMD_INITF_DEFAULT_MAC) { |
913 | /* Emulate these for old CMD_INIT_v1 which |
914 | * didn't pass a0 so no CMD_INITF_*. |
915 | */ |
916 | vnic_dev_cmd(vdev, cmd: CMD_GET_MAC_ADDR, a0: &a0, a1: &a1, wait); |
917 | vnic_dev_cmd(vdev, cmd: CMD_ADDR_ADD, a0: &a0, a1: &a1, wait); |
918 | } |
919 | } |
920 | return r; |
921 | } |
922 | |
923 | int vnic_dev_deinit(struct vnic_dev *vdev) |
924 | { |
925 | u64 a0 = 0, a1 = 0; |
926 | int wait = 1000; |
927 | |
928 | return vnic_dev_cmd(vdev, cmd: CMD_DEINIT, a0: &a0, a1: &a1, wait); |
929 | } |
930 | |
931 | void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev) |
932 | { |
933 | /* Default: hardware intr coal timer is in units of 1.5 usecs */ |
934 | vdev->intr_coal_timer_info.mul = 2; |
935 | vdev->intr_coal_timer_info.div = 3; |
936 | vdev->intr_coal_timer_info.max_usec = |
937 | vnic_dev_intr_coal_timer_hw_to_usec(vdev, hw_cycles: 0xffff); |
938 | } |
939 | |
940 | int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev) |
941 | { |
942 | int wait = 1000; |
943 | int err; |
944 | |
945 | memset(vdev->args, 0, sizeof(vdev->args)); |
946 | |
947 | if (vnic_dev_capable(vdev, cmd: CMD_INTR_COAL_CONVERT)) |
948 | err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait); |
949 | else |
950 | err = ERR_ECMDUNKNOWN; |
951 | |
952 | /* Use defaults when firmware doesn't support the devcmd at all or |
953 | * supports it for only specific hardware |
954 | */ |
955 | if ((err == ERR_ECMDUNKNOWN) || |
956 | (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) { |
957 | vdev_netwarn(vdev, "Using default conversion factor for interrupt coalesce timer\n" ); |
958 | vnic_dev_intr_coal_timer_info_default(vdev); |
959 | return 0; |
960 | } |
961 | |
962 | if (!err) { |
963 | vdev->intr_coal_timer_info.mul = (u32) vdev->args[0]; |
964 | vdev->intr_coal_timer_info.div = (u32) vdev->args[1]; |
965 | vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2]; |
966 | } |
967 | |
968 | return err; |
969 | } |
970 | |
971 | int vnic_dev_link_status(struct vnic_dev *vdev) |
972 | { |
973 | if (!vnic_dev_notify_ready(vdev)) |
974 | return 0; |
975 | |
976 | return vdev->notify_copy.link_state; |
977 | } |
978 | |
979 | u32 vnic_dev_port_speed(struct vnic_dev *vdev) |
980 | { |
981 | if (!vnic_dev_notify_ready(vdev)) |
982 | return 0; |
983 | |
984 | return vdev->notify_copy.port_speed; |
985 | } |
986 | |
987 | u32 vnic_dev_msg_lvl(struct vnic_dev *vdev) |
988 | { |
989 | if (!vnic_dev_notify_ready(vdev)) |
990 | return 0; |
991 | |
992 | return vdev->notify_copy.msglvl; |
993 | } |
994 | |
995 | u32 vnic_dev_mtu(struct vnic_dev *vdev) |
996 | { |
997 | if (!vnic_dev_notify_ready(vdev)) |
998 | return 0; |
999 | |
1000 | return vdev->notify_copy.mtu; |
1001 | } |
1002 | |
1003 | void vnic_dev_set_intr_mode(struct vnic_dev *vdev, |
1004 | enum vnic_dev_intr_mode intr_mode) |
1005 | { |
1006 | vdev->intr_mode = intr_mode; |
1007 | } |
1008 | |
1009 | enum vnic_dev_intr_mode vnic_dev_get_intr_mode( |
1010 | struct vnic_dev *vdev) |
1011 | { |
1012 | return vdev->intr_mode; |
1013 | } |
1014 | |
1015 | u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec) |
1016 | { |
1017 | return (usec * vdev->intr_coal_timer_info.mul) / |
1018 | vdev->intr_coal_timer_info.div; |
1019 | } |
1020 | |
1021 | u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles) |
1022 | { |
1023 | return (hw_cycles * vdev->intr_coal_timer_info.div) / |
1024 | vdev->intr_coal_timer_info.mul; |
1025 | } |
1026 | |
1027 | u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) |
1028 | { |
1029 | return vdev->intr_coal_timer_info.max_usec; |
1030 | } |
1031 | |
1032 | void vnic_dev_unregister(struct vnic_dev *vdev) |
1033 | { |
1034 | if (vdev) { |
1035 | if (vdev->notify) |
1036 | dma_free_coherent(dev: &vdev->pdev->dev, |
1037 | size: sizeof(struct vnic_devcmd_notify), |
1038 | cpu_addr: vdev->notify, dma_handle: vdev->notify_pa); |
1039 | if (vdev->stats) |
1040 | dma_free_coherent(dev: &vdev->pdev->dev, |
1041 | size: sizeof(struct vnic_stats), |
1042 | cpu_addr: vdev->stats, dma_handle: vdev->stats_pa); |
1043 | if (vdev->fw_info) |
1044 | dma_free_coherent(dev: &vdev->pdev->dev, |
1045 | size: sizeof(struct vnic_devcmd_fw_info), |
1046 | cpu_addr: vdev->fw_info, dma_handle: vdev->fw_info_pa); |
1047 | if (vdev->devcmd2) |
1048 | vnic_dev_deinit_devcmd2(vdev); |
1049 | |
1050 | kfree(objp: vdev); |
1051 | } |
1052 | } |
1053 | EXPORT_SYMBOL(vnic_dev_unregister); |
1054 | |
1055 | struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, |
1056 | void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, |
1057 | unsigned int num_bars) |
1058 | { |
1059 | if (!vdev) { |
1060 | vdev = kzalloc(size: sizeof(struct vnic_dev), GFP_KERNEL); |
1061 | if (!vdev) |
1062 | return NULL; |
1063 | } |
1064 | |
1065 | vdev->priv = priv; |
1066 | vdev->pdev = pdev; |
1067 | |
1068 | if (vnic_dev_discover_res(vdev, bar, num_bars)) |
1069 | goto err_out; |
1070 | |
1071 | return vdev; |
1072 | |
1073 | err_out: |
1074 | vnic_dev_unregister(vdev); |
1075 | return NULL; |
1076 | } |
1077 | EXPORT_SYMBOL(vnic_dev_register); |
1078 | |
1079 | struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev) |
1080 | { |
1081 | return vdev->pdev; |
1082 | } |
1083 | EXPORT_SYMBOL(vnic_dev_get_pdev); |
1084 | |
1085 | int vnic_devcmd_init(struct vnic_dev *vdev) |
1086 | { |
1087 | void __iomem *res; |
1088 | int err; |
1089 | |
1090 | res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); |
1091 | if (res) { |
1092 | err = vnic_dev_init_devcmd2(vdev); |
1093 | if (err) |
1094 | vdev_warn(vdev, "DEVCMD2 init failed: %d, Using DEVCMD1\n" , |
1095 | err); |
1096 | else |
1097 | return 0; |
1098 | } else { |
1099 | vdev_warn(vdev, "DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n" ); |
1100 | } |
1101 | err = vnic_dev_init_devcmd1(vdev); |
1102 | if (err) |
1103 | vdev_err(vdev, "DEVCMD1 initialization failed: %d\n" , err); |
1104 | |
1105 | return err; |
1106 | } |
1107 | |
1108 | int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len) |
1109 | { |
1110 | u64 a0, a1 = len; |
1111 | int wait = 1000; |
1112 | dma_addr_t prov_pa; |
1113 | void *prov_buf; |
1114 | int ret; |
1115 | |
1116 | prov_buf = dma_alloc_coherent(dev: &vdev->pdev->dev, size: len, dma_handle: &prov_pa, GFP_ATOMIC); |
1117 | if (!prov_buf) |
1118 | return -ENOMEM; |
1119 | |
1120 | memcpy(prov_buf, buf, len); |
1121 | |
1122 | a0 = prov_pa; |
1123 | |
1124 | ret = vnic_dev_cmd(vdev, cmd: CMD_INIT_PROV_INFO2, a0: &a0, a1: &a1, wait); |
1125 | |
1126 | dma_free_coherent(dev: &vdev->pdev->dev, size: len, cpu_addr: prov_buf, dma_handle: prov_pa); |
1127 | |
1128 | return ret; |
1129 | } |
1130 | |
1131 | int vnic_dev_enable2(struct vnic_dev *vdev, int active) |
1132 | { |
1133 | u64 a0, a1 = 0; |
1134 | int wait = 1000; |
1135 | |
1136 | a0 = (active ? CMD_ENABLE2_ACTIVE : 0); |
1137 | |
1138 | return vnic_dev_cmd(vdev, cmd: CMD_ENABLE2, a0: &a0, a1: &a1, wait); |
1139 | } |
1140 | |
1141 | static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, |
1142 | int *status) |
1143 | { |
1144 | u64 a0 = cmd, a1 = 0; |
1145 | int wait = 1000; |
1146 | int ret; |
1147 | |
1148 | ret = vnic_dev_cmd(vdev, cmd: CMD_STATUS, a0: &a0, a1: &a1, wait); |
1149 | if (!ret) |
1150 | *status = (int)a0; |
1151 | |
1152 | return ret; |
1153 | } |
1154 | |
1155 | int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status) |
1156 | { |
1157 | return vnic_dev_cmd_status(vdev, cmd: CMD_ENABLE2, status); |
1158 | } |
1159 | |
1160 | int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status) |
1161 | { |
1162 | return vnic_dev_cmd_status(vdev, cmd: CMD_DEINIT, status); |
1163 | } |
1164 | |
1165 | int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) |
1166 | { |
1167 | u64 a0, a1; |
1168 | int wait = 1000; |
1169 | int i; |
1170 | |
1171 | for (i = 0; i < ETH_ALEN; i++) |
1172 | ((u8 *)&a0)[i] = mac_addr[i]; |
1173 | |
1174 | return vnic_dev_cmd(vdev, cmd: CMD_SET_MAC_ADDR, a0: &a0, a1: &a1, wait); |
1175 | } |
1176 | |
1177 | /* vnic_dev_classifier: Add/Delete classifier entries |
1178 | * @vdev: vdev of the device |
1179 | * @cmd: CLSF_ADD for Add filter |
1180 | * CLSF_DEL for Delete filter |
1181 | * @entry: In case of ADD filter, the caller passes the RQ number in this |
1182 | * variable. |
1183 | * |
1184 | * This function stores the filter_id returned by the firmware in the |
1185 | * same variable before return; |
1186 | * |
1187 | * In case of DEL filter, the caller passes the RQ number. Return |
1188 | * value is irrelevant. |
1189 | * @data: filter data |
1190 | */ |
1191 | int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, |
1192 | struct filter *data) |
1193 | { |
1194 | u64 a0, a1; |
1195 | int wait = 1000; |
1196 | dma_addr_t tlv_pa; |
1197 | int ret = -EINVAL; |
1198 | struct filter_tlv *tlv, *tlv_va; |
1199 | struct filter_action *action; |
1200 | u64 tlv_size; |
1201 | |
1202 | if (cmd == CLSF_ADD) { |
1203 | tlv_size = sizeof(struct filter) + |
1204 | sizeof(struct filter_action) + |
1205 | 2 * sizeof(struct filter_tlv); |
1206 | tlv_va = dma_alloc_coherent(dev: &vdev->pdev->dev, size: tlv_size, |
1207 | dma_handle: &tlv_pa, GFP_ATOMIC); |
1208 | if (!tlv_va) |
1209 | return -ENOMEM; |
1210 | tlv = tlv_va; |
1211 | a0 = tlv_pa; |
1212 | a1 = tlv_size; |
1213 | memset(tlv, 0, tlv_size); |
1214 | tlv->type = CLSF_TLV_FILTER; |
1215 | tlv->length = sizeof(struct filter); |
1216 | *(struct filter *)&tlv->val = *data; |
1217 | |
1218 | tlv = (struct filter_tlv *)((char *)tlv + |
1219 | sizeof(struct filter_tlv) + |
1220 | sizeof(struct filter)); |
1221 | |
1222 | tlv->type = CLSF_TLV_ACTION; |
1223 | tlv->length = sizeof(struct filter_action); |
1224 | action = (struct filter_action *)&tlv->val; |
1225 | action->type = FILTER_ACTION_RQ_STEERING; |
1226 | action->u.rq_idx = *entry; |
1227 | |
1228 | ret = vnic_dev_cmd(vdev, cmd: CMD_ADD_FILTER, a0: &a0, a1: &a1, wait); |
1229 | *entry = (u16)a0; |
1230 | dma_free_coherent(dev: &vdev->pdev->dev, size: tlv_size, cpu_addr: tlv_va, dma_handle: tlv_pa); |
1231 | } else if (cmd == CLSF_DEL) { |
1232 | a0 = *entry; |
1233 | ret = vnic_dev_cmd(vdev, cmd: CMD_DEL_FILTER, a0: &a0, a1: &a1, wait); |
1234 | } |
1235 | |
1236 | return ret; |
1237 | } |
1238 | |
1239 | int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config) |
1240 | { |
1241 | u64 a0 = overlay; |
1242 | u64 a1 = config; |
1243 | int wait = 1000; |
1244 | |
1245 | return vnic_dev_cmd(vdev, cmd: CMD_OVERLAY_OFFLOAD_CTRL, a0: &a0, a1: &a1, wait); |
1246 | } |
1247 | |
1248 | int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, |
1249 | u16 vxlan_udp_port_number) |
1250 | { |
1251 | u64 a1 = vxlan_udp_port_number; |
1252 | u64 a0 = overlay; |
1253 | int wait = 1000; |
1254 | |
1255 | return vnic_dev_cmd(vdev, cmd: CMD_OVERLAY_OFFLOAD_CFG, a0: &a0, a1: &a1, wait); |
1256 | } |
1257 | |
1258 | int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature, |
1259 | u64 *supported_versions, u64 *a1) |
1260 | { |
1261 | u64 a0 = feature; |
1262 | int wait = 1000; |
1263 | int ret; |
1264 | |
1265 | ret = vnic_dev_cmd(vdev, cmd: CMD_GET_SUPP_FEATURE_VER, a0: &a0, a1, wait); |
1266 | if (!ret) |
1267 | *supported_versions = a0; |
1268 | |
1269 | return ret; |
1270 | } |
1271 | |
1272 | int (struct vnic_dev *vdev, u8 *) |
1273 | { |
1274 | u64 a0 = CMD_NIC_CFG, a1 = 0; |
1275 | int wait = 1000; |
1276 | int err; |
1277 | |
1278 | err = vnic_dev_cmd(vdev, cmd: CMD_CAPABILITY, a0: &a0, a1: &a1, wait); |
1279 | /* rss_hash_type is valid only when a0 is 1. Adapter which does not |
1280 | * support CMD_CAPABILITY for rss_hash_type has a0 = 0 |
1281 | */ |
1282 | if (err || (a0 != 1)) |
1283 | return -EOPNOTSUPP; |
1284 | |
1285 | a1 = (a1 >> NIC_CFG_RSS_HASH_TYPE_SHIFT) & |
1286 | NIC_CFG_RSS_HASH_TYPE_MASK_FIELD; |
1287 | |
1288 | *rss_hash_type = (u8)a1; |
1289 | |
1290 | return 0; |
1291 | } |
1292 | |