1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | // Copyright 2014 Cisco Systems, Inc. All rights reserved. |
3 | |
4 | #include <linux/kernel.h> |
5 | #include <linux/errno.h> |
6 | #include <linux/types.h> |
7 | #include <linux/pci.h> |
8 | #include <linux/delay.h> |
9 | #include <linux/if_ether.h> |
10 | #include <linux/slab.h> |
11 | #include "vnic_resource.h" |
12 | #include "vnic_devcmd.h" |
13 | #include "vnic_dev.h" |
14 | #include "vnic_stats.h" |
15 | #include "vnic_wq.h" |
16 | |
17 | #define VNIC_DVCMD_TMO 10000 /* Devcmd Timeout value */ |
18 | #define VNIC_NOTIFY_INTR_MASK 0x0000ffff00000000ULL |
19 | |
20 | struct devcmd2_controller { |
21 | struct vnic_wq_ctrl __iomem *wq_ctrl; |
22 | struct vnic_dev_ring results_ring; |
23 | struct vnic_wq wq; |
24 | struct vnic_devcmd2 *cmd_ring; |
25 | struct devcmd2_result *result; |
26 | u16 next_result; |
27 | u16 result_size; |
28 | int color; |
29 | }; |
30 | |
31 | struct vnic_res { |
32 | void __iomem *vaddr; |
33 | unsigned int count; |
34 | }; |
35 | |
36 | struct vnic_dev { |
37 | void *priv; |
38 | struct pci_dev *pdev; |
39 | struct vnic_res res[RES_TYPE_MAX]; |
40 | enum vnic_dev_intr_mode intr_mode; |
41 | struct vnic_devcmd __iomem *devcmd; |
42 | struct vnic_devcmd_notify *notify; |
43 | struct vnic_devcmd_notify notify_copy; |
44 | dma_addr_t notify_pa; |
45 | u32 *linkstatus; |
46 | dma_addr_t linkstatus_pa; |
47 | struct vnic_stats *stats; |
48 | dma_addr_t stats_pa; |
49 | struct vnic_devcmd_fw_info *fw_info; |
50 | dma_addr_t fw_info_pa; |
51 | u64 args[VNIC_DEVCMD_NARGS]; |
52 | struct devcmd2_controller *devcmd2; |
53 | |
54 | int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, |
55 | int wait); |
56 | }; |
57 | |
58 | #define VNIC_MAX_RES_HDR_SIZE \ |
59 | (sizeof(struct vnic_resource_header) + \ |
60 | sizeof(struct vnic_resource) * RES_TYPE_MAX) |
61 | #define VNIC_RES_STRIDE 128 |
62 | |
63 | void *svnic_dev_priv(struct vnic_dev *vdev) |
64 | { |
65 | return vdev->priv; |
66 | } |
67 | |
68 | static int vnic_dev_discover_res(struct vnic_dev *vdev, |
69 | struct vnic_dev_bar *bar, unsigned int num_bars) |
70 | { |
71 | struct vnic_resource_header __iomem *rh; |
72 | struct vnic_resource __iomem *r; |
73 | u8 type; |
74 | |
75 | if (num_bars == 0) |
76 | return -EINVAL; |
77 | |
78 | if (bar->len < VNIC_MAX_RES_HDR_SIZE) { |
79 | pr_err("vNIC BAR0 res hdr length error\n" ); |
80 | |
81 | return -EINVAL; |
82 | } |
83 | |
84 | rh = bar->vaddr; |
85 | if (!rh) { |
86 | pr_err("vNIC BAR0 res hdr not mem-mapped\n" ); |
87 | |
88 | return -EINVAL; |
89 | } |
90 | |
91 | if (ioread32(&rh->magic) != VNIC_RES_MAGIC || |
92 | ioread32(&rh->version) != VNIC_RES_VERSION) { |
93 | pr_err("vNIC BAR0 res magic/version error exp (%lx/%lx) curr (%x/%x)\n" , |
94 | VNIC_RES_MAGIC, VNIC_RES_VERSION, |
95 | ioread32(&rh->magic), ioread32(&rh->version)); |
96 | |
97 | return -EINVAL; |
98 | } |
99 | |
100 | r = (struct vnic_resource __iomem *)(rh + 1); |
101 | |
102 | while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { |
103 | |
104 | u8 bar_num = ioread8(&r->bar); |
105 | u32 bar_offset = ioread32(&r->bar_offset); |
106 | u32 count = ioread32(&r->count); |
107 | u32 len; |
108 | |
109 | r++; |
110 | |
111 | if (bar_num >= num_bars) |
112 | continue; |
113 | |
114 | if (!bar[bar_num].len || !bar[bar_num].vaddr) |
115 | continue; |
116 | |
117 | switch (type) { |
118 | case RES_TYPE_WQ: |
119 | case RES_TYPE_RQ: |
120 | case RES_TYPE_CQ: |
121 | case RES_TYPE_INTR_CTRL: |
122 | /* each count is stride bytes long */ |
123 | len = count * VNIC_RES_STRIDE; |
124 | if (len + bar_offset > bar->len) { |
125 | pr_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n" , |
126 | type, bar_offset, |
127 | len, |
128 | bar->len); |
129 | |
130 | return -EINVAL; |
131 | } |
132 | break; |
133 | |
134 | case RES_TYPE_INTR_PBA_LEGACY: |
135 | case RES_TYPE_DEVCMD: |
136 | case RES_TYPE_DEVCMD2: |
137 | len = count; |
138 | break; |
139 | |
140 | default: |
141 | continue; |
142 | } |
143 | |
144 | vdev->res[type].count = count; |
145 | vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset; |
146 | } |
147 | |
148 | return 0; |
149 | } |
150 | |
151 | unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev, |
152 | enum vnic_res_type type) |
153 | { |
154 | return vdev->res[type].count; |
155 | } |
156 | |
157 | void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, |
158 | unsigned int index) |
159 | { |
160 | if (!vdev->res[type].vaddr) |
161 | return NULL; |
162 | |
163 | switch (type) { |
164 | case RES_TYPE_WQ: |
165 | case RES_TYPE_RQ: |
166 | case RES_TYPE_CQ: |
167 | case RES_TYPE_INTR_CTRL: |
168 | return (char __iomem *)vdev->res[type].vaddr + |
169 | index * VNIC_RES_STRIDE; |
170 | |
171 | default: |
172 | return (char __iomem *)vdev->res[type].vaddr; |
173 | } |
174 | } |
175 | |
176 | unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring, |
177 | unsigned int desc_count, |
178 | unsigned int desc_size) |
179 | { |
180 | /* The base address of the desc rings must be 512 byte aligned. |
181 | * Descriptor count is aligned to groups of 32 descriptors. A |
182 | * count of 0 means the maximum 4096 descriptors. Descriptor |
183 | * size is aligned to 16 bytes. |
184 | */ |
185 | |
186 | unsigned int count_align = 32; |
187 | unsigned int desc_align = 16; |
188 | |
189 | ring->base_align = 512; |
190 | |
191 | if (desc_count == 0) |
192 | desc_count = 4096; |
193 | |
194 | ring->desc_count = ALIGN(desc_count, count_align); |
195 | |
196 | ring->desc_size = ALIGN(desc_size, desc_align); |
197 | |
198 | ring->size = ring->desc_count * ring->desc_size; |
199 | ring->size_unaligned = ring->size + ring->base_align; |
200 | |
201 | return ring->size_unaligned; |
202 | } |
203 | |
204 | void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) |
205 | { |
206 | memset(ring->descs, 0, ring->size); |
207 | } |
208 | |
209 | int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, |
210 | unsigned int desc_count, unsigned int desc_size) |
211 | { |
212 | svnic_dev_desc_ring_size(ring, desc_count, desc_size); |
213 | |
214 | ring->descs_unaligned = dma_alloc_coherent(dev: &vdev->pdev->dev, |
215 | size: ring->size_unaligned, dma_handle: &ring->base_addr_unaligned, |
216 | GFP_KERNEL); |
217 | if (!ring->descs_unaligned) { |
218 | pr_err("Failed to allocate ring (size=%d), aborting\n" , |
219 | (int)ring->size); |
220 | |
221 | return -ENOMEM; |
222 | } |
223 | |
224 | ring->base_addr = ALIGN(ring->base_addr_unaligned, |
225 | ring->base_align); |
226 | ring->descs = (u8 *)ring->descs_unaligned + |
227 | (ring->base_addr - ring->base_addr_unaligned); |
228 | |
229 | svnic_dev_clear_desc_ring(ring); |
230 | |
231 | ring->desc_avail = ring->desc_count - 1; |
232 | |
233 | return 0; |
234 | } |
235 | |
236 | void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) |
237 | { |
238 | if (ring->descs) { |
239 | dma_free_coherent(dev: &vdev->pdev->dev, |
240 | size: ring->size_unaligned, |
241 | cpu_addr: ring->descs_unaligned, |
242 | dma_handle: ring->base_addr_unaligned); |
243 | ring->descs = NULL; |
244 | } |
245 | } |
246 | |
247 | static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, |
248 | int wait) |
249 | { |
250 | struct devcmd2_controller *dc2c = vdev->devcmd2; |
251 | struct devcmd2_result *result = NULL; |
252 | unsigned int i; |
253 | int delay; |
254 | int err; |
255 | u32 posted; |
256 | u32 fetch_idx; |
257 | u32 new_posted; |
258 | u8 color; |
259 | |
260 | fetch_idx = ioread32(&dc2c->wq_ctrl->fetch_index); |
261 | if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */ |
262 | /* Hardware surprise removal: return error */ |
263 | return -ENODEV; |
264 | } |
265 | |
266 | posted = ioread32(&dc2c->wq_ctrl->posted_index); |
267 | |
268 | if (posted == 0xFFFFFFFF) { /* check for hardware gone */ |
269 | /* Hardware surprise removal: return error */ |
270 | return -ENODEV; |
271 | } |
272 | |
273 | new_posted = (posted + 1) % DEVCMD2_RING_SIZE; |
274 | if (new_posted == fetch_idx) { |
275 | pr_err("%s: wq is full while issuing devcmd2 command %d, fetch index: %u, posted index: %u\n" , |
276 | pci_name(vdev->pdev), _CMD_N(cmd), fetch_idx, posted); |
277 | |
278 | return -EBUSY; |
279 | } |
280 | |
281 | dc2c->cmd_ring[posted].cmd = cmd; |
282 | dc2c->cmd_ring[posted].flags = 0; |
283 | |
284 | if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) |
285 | dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT; |
286 | |
287 | if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { |
288 | for (i = 0; i < VNIC_DEVCMD_NARGS; i++) |
289 | dc2c->cmd_ring[posted].args[i] = vdev->args[i]; |
290 | } |
291 | /* Adding write memory barrier prevents compiler and/or CPU |
292 | * reordering, thus avoiding descriptor posting before |
293 | * descriptor is initialized. Otherwise, hardware can read |
294 | * stale descriptor fields. |
295 | */ |
296 | wmb(); |
297 | iowrite32(new_posted, &dc2c->wq_ctrl->posted_index); |
298 | |
299 | if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) |
300 | return 0; |
301 | |
302 | result = dc2c->result + dc2c->next_result; |
303 | color = dc2c->color; |
304 | |
305 | /* |
306 | * Increment next_result, after posting the devcmd, irrespective of |
307 | * devcmd result, and it should be done only once. |
308 | */ |
309 | dc2c->next_result++; |
310 | if (dc2c->next_result == dc2c->result_size) { |
311 | dc2c->next_result = 0; |
312 | dc2c->color = dc2c->color ? 0 : 1; |
313 | } |
314 | |
315 | for (delay = 0; delay < wait; delay++) { |
316 | udelay(100); |
317 | if (result->color == color) { |
318 | if (result->error) { |
319 | err = (int) result->error; |
320 | if (err != ERR_ECMDUNKNOWN || |
321 | cmd != CMD_CAPABILITY) |
322 | pr_err("Error %d devcmd %d\n" , |
323 | err, _CMD_N(cmd)); |
324 | |
325 | return err; |
326 | } |
327 | if (_CMD_DIR(cmd) & _CMD_DIR_READ) { |
328 | for (i = 0; i < VNIC_DEVCMD_NARGS; i++) |
329 | vdev->args[i] = result->results[i]; |
330 | } |
331 | |
332 | return 0; |
333 | } |
334 | } |
335 | |
336 | pr_err("Timed out devcmd %d\n" , _CMD_N(cmd)); |
337 | |
338 | return -ETIMEDOUT; |
339 | } |
340 | |
341 | static int svnic_dev_init_devcmd2(struct vnic_dev *vdev) |
342 | { |
343 | struct devcmd2_controller *dc2c = NULL; |
344 | unsigned int fetch_idx; |
345 | int ret; |
346 | void __iomem *p; |
347 | |
348 | if (vdev->devcmd2) |
349 | return 0; |
350 | |
351 | p = svnic_dev_get_res(vdev, type: RES_TYPE_DEVCMD2, index: 0); |
352 | if (!p) |
353 | return -ENODEV; |
354 | |
355 | dc2c = kzalloc(size: sizeof(*dc2c), GFP_ATOMIC); |
356 | if (!dc2c) |
357 | return -ENOMEM; |
358 | |
359 | vdev->devcmd2 = dc2c; |
360 | |
361 | dc2c->color = 1; |
362 | dc2c->result_size = DEVCMD2_RING_SIZE; |
363 | |
364 | ret = vnic_wq_devcmd2_alloc(vdev, |
365 | wq: &dc2c->wq, |
366 | DEVCMD2_RING_SIZE, |
367 | DEVCMD2_DESC_SIZE); |
368 | if (ret) |
369 | goto err_free_devcmd2; |
370 | |
371 | fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index); |
372 | if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */ |
373 | /* Hardware surprise removal: reset fetch_index */ |
374 | fetch_idx = 0; |
375 | } |
376 | |
377 | /* |
378 | * Don't change fetch_index ever and |
379 | * set posted_index same as fetch_index |
380 | * when setting up the WQ for devcmd2. |
381 | */ |
382 | vnic_wq_init_start(wq: &dc2c->wq, cq_index: 0, fetch_index: fetch_idx, post_index: fetch_idx, error_interrupt_enable: 0, error_interrupt_offset: 0); |
383 | svnic_wq_enable(wq: &dc2c->wq); |
384 | ret = svnic_dev_alloc_desc_ring(vdev, |
385 | ring: &dc2c->results_ring, |
386 | DEVCMD2_RING_SIZE, |
387 | DEVCMD2_DESC_SIZE); |
388 | if (ret) |
389 | goto err_free_wq; |
390 | |
391 | dc2c->result = (struct devcmd2_result *) dc2c->results_ring.descs; |
392 | dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs; |
393 | dc2c->wq_ctrl = dc2c->wq.ctrl; |
394 | vdev->args[0] = (u64) dc2c->results_ring.base_addr | VNIC_PADDR_TARGET; |
395 | vdev->args[1] = DEVCMD2_RING_SIZE; |
396 | |
397 | ret = _svnic_dev_cmd2(vdev, cmd: CMD_INITIALIZE_DEVCMD2, VNIC_DVCMD_TMO); |
398 | if (ret < 0) |
399 | goto err_free_desc_ring; |
400 | |
401 | vdev->devcmd_rtn = &_svnic_dev_cmd2; |
402 | pr_info("DEVCMD2 Initialized.\n" ); |
403 | |
404 | return ret; |
405 | |
406 | err_free_desc_ring: |
407 | svnic_dev_free_desc_ring(vdev, ring: &dc2c->results_ring); |
408 | |
409 | err_free_wq: |
410 | svnic_wq_disable(wq: &dc2c->wq); |
411 | svnic_wq_free(wq: &dc2c->wq); |
412 | |
413 | err_free_devcmd2: |
414 | kfree(objp: dc2c); |
415 | vdev->devcmd2 = NULL; |
416 | |
417 | return ret; |
418 | } /* end of svnic_dev_init_devcmd2 */ |
419 | |
420 | static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev) |
421 | { |
422 | struct devcmd2_controller *dc2c = vdev->devcmd2; |
423 | |
424 | vdev->devcmd2 = NULL; |
425 | vdev->devcmd_rtn = NULL; |
426 | |
427 | svnic_dev_free_desc_ring(vdev, ring: &dc2c->results_ring); |
428 | svnic_wq_disable(wq: &dc2c->wq); |
429 | svnic_wq_free(wq: &dc2c->wq); |
430 | kfree(objp: dc2c); |
431 | } |
432 | |
433 | int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, |
434 | u64 *a0, u64 *a1, int wait) |
435 | { |
436 | int err; |
437 | |
438 | memset(vdev->args, 0, sizeof(vdev->args)); |
439 | vdev->args[0] = *a0; |
440 | vdev->args[1] = *a1; |
441 | |
442 | err = (*vdev->devcmd_rtn)(vdev, cmd, wait); |
443 | |
444 | *a0 = vdev->args[0]; |
445 | *a1 = vdev->args[1]; |
446 | |
447 | return err; |
448 | } |
449 | |
450 | int svnic_dev_fw_info(struct vnic_dev *vdev, |
451 | struct vnic_devcmd_fw_info **fw_info) |
452 | { |
453 | u64 a0, a1 = 0; |
454 | int wait = VNIC_DVCMD_TMO; |
455 | int err = 0; |
456 | |
457 | if (!vdev->fw_info) { |
458 | vdev->fw_info = dma_alloc_coherent(dev: &vdev->pdev->dev, |
459 | size: sizeof(struct vnic_devcmd_fw_info), |
460 | dma_handle: &vdev->fw_info_pa, GFP_KERNEL); |
461 | if (!vdev->fw_info) |
462 | return -ENOMEM; |
463 | |
464 | a0 = vdev->fw_info_pa; |
465 | |
466 | /* only get fw_info once and cache it */ |
467 | err = svnic_dev_cmd(vdev, cmd: CMD_MCPU_FW_INFO, a0: &a0, a1: &a1, wait); |
468 | } |
469 | |
470 | *fw_info = vdev->fw_info; |
471 | |
472 | return err; |
473 | } |
474 | |
475 | int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, |
476 | unsigned int size, void *value) |
477 | { |
478 | u64 a0, a1; |
479 | int wait = VNIC_DVCMD_TMO; |
480 | int err; |
481 | |
482 | a0 = offset; |
483 | a1 = size; |
484 | |
485 | err = svnic_dev_cmd(vdev, cmd: CMD_DEV_SPEC, a0: &a0, a1: &a1, wait); |
486 | |
487 | switch (size) { |
488 | case 1: |
489 | *(u8 *)value = (u8)a0; |
490 | break; |
491 | case 2: |
492 | *(u16 *)value = (u16)a0; |
493 | break; |
494 | case 4: |
495 | *(u32 *)value = (u32)a0; |
496 | break; |
497 | case 8: |
498 | *(u64 *)value = a0; |
499 | break; |
500 | default: |
501 | BUG(); |
502 | break; |
503 | } |
504 | |
505 | return err; |
506 | } |
507 | |
508 | int svnic_dev_stats_clear(struct vnic_dev *vdev) |
509 | { |
510 | u64 a0 = 0, a1 = 0; |
511 | int wait = VNIC_DVCMD_TMO; |
512 | |
513 | return svnic_dev_cmd(vdev, cmd: CMD_STATS_CLEAR, a0: &a0, a1: &a1, wait); |
514 | } |
515 | |
516 | int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) |
517 | { |
518 | u64 a0, a1; |
519 | int wait = VNIC_DVCMD_TMO; |
520 | |
521 | if (!vdev->stats) { |
522 | vdev->stats = dma_alloc_coherent(dev: &vdev->pdev->dev, |
523 | size: sizeof(struct vnic_stats), dma_handle: &vdev->stats_pa, GFP_KERNEL); |
524 | if (!vdev->stats) |
525 | return -ENOMEM; |
526 | } |
527 | |
528 | *stats = vdev->stats; |
529 | a0 = vdev->stats_pa; |
530 | a1 = sizeof(struct vnic_stats); |
531 | |
532 | return svnic_dev_cmd(vdev, cmd: CMD_STATS_DUMP, a0: &a0, a1: &a1, wait); |
533 | } |
534 | |
535 | int svnic_dev_close(struct vnic_dev *vdev) |
536 | { |
537 | u64 a0 = 0, a1 = 0; |
538 | int wait = VNIC_DVCMD_TMO; |
539 | |
540 | return svnic_dev_cmd(vdev, cmd: CMD_CLOSE, a0: &a0, a1: &a1, wait); |
541 | } |
542 | |
543 | int svnic_dev_enable_wait(struct vnic_dev *vdev) |
544 | { |
545 | u64 a0 = 0, a1 = 0; |
546 | int wait = VNIC_DVCMD_TMO; |
547 | int err = 0; |
548 | |
549 | err = svnic_dev_cmd(vdev, cmd: CMD_ENABLE_WAIT, a0: &a0, a1: &a1, wait); |
550 | if (err == ERR_ECMDUNKNOWN) |
551 | return svnic_dev_cmd(vdev, cmd: CMD_ENABLE, a0: &a0, a1: &a1, wait); |
552 | |
553 | return err; |
554 | } |
555 | |
556 | int svnic_dev_disable(struct vnic_dev *vdev) |
557 | { |
558 | u64 a0 = 0, a1 = 0; |
559 | int wait = VNIC_DVCMD_TMO; |
560 | |
561 | return svnic_dev_cmd(vdev, cmd: CMD_DISABLE, a0: &a0, a1: &a1, wait); |
562 | } |
563 | |
564 | int svnic_dev_open(struct vnic_dev *vdev, int arg) |
565 | { |
566 | u64 a0 = (u32)arg, a1 = 0; |
567 | int wait = VNIC_DVCMD_TMO; |
568 | |
569 | return svnic_dev_cmd(vdev, cmd: CMD_OPEN, a0: &a0, a1: &a1, wait); |
570 | } |
571 | |
572 | int svnic_dev_open_done(struct vnic_dev *vdev, int *done) |
573 | { |
574 | u64 a0 = 0, a1 = 0; |
575 | int wait = VNIC_DVCMD_TMO; |
576 | int err; |
577 | |
578 | *done = 0; |
579 | |
580 | err = svnic_dev_cmd(vdev, cmd: CMD_OPEN_STATUS, a0: &a0, a1: &a1, wait); |
581 | if (err) |
582 | return err; |
583 | |
584 | *done = (a0 == 0); |
585 | |
586 | return 0; |
587 | } |
588 | |
589 | int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) |
590 | { |
591 | u64 a0, a1; |
592 | int wait = VNIC_DVCMD_TMO; |
593 | |
594 | if (!vdev->notify) { |
595 | vdev->notify = dma_alloc_coherent(dev: &vdev->pdev->dev, |
596 | size: sizeof(struct vnic_devcmd_notify), |
597 | dma_handle: &vdev->notify_pa, GFP_KERNEL); |
598 | if (!vdev->notify) |
599 | return -ENOMEM; |
600 | } |
601 | |
602 | a0 = vdev->notify_pa; |
603 | a1 = ((u64)intr << 32) & VNIC_NOTIFY_INTR_MASK; |
604 | a1 += sizeof(struct vnic_devcmd_notify); |
605 | |
606 | return svnic_dev_cmd(vdev, cmd: CMD_NOTIFY, a0: &a0, a1: &a1, wait); |
607 | } |
608 | |
609 | void svnic_dev_notify_unset(struct vnic_dev *vdev) |
610 | { |
611 | u64 a0, a1; |
612 | int wait = VNIC_DVCMD_TMO; |
613 | |
614 | a0 = 0; /* paddr = 0 to unset notify buffer */ |
615 | a1 = VNIC_NOTIFY_INTR_MASK; /* intr num = -1 to unreg for intr */ |
616 | a1 += sizeof(struct vnic_devcmd_notify); |
617 | |
618 | svnic_dev_cmd(vdev, cmd: CMD_NOTIFY, a0: &a0, a1: &a1, wait); |
619 | } |
620 | |
621 | static int vnic_dev_notify_ready(struct vnic_dev *vdev) |
622 | { |
623 | u32 *words; |
624 | unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4; |
625 | unsigned int i; |
626 | u32 csum; |
627 | |
628 | if (!vdev->notify) |
629 | return 0; |
630 | |
631 | do { |
632 | csum = 0; |
633 | memcpy(&vdev->notify_copy, vdev->notify, |
634 | sizeof(struct vnic_devcmd_notify)); |
635 | words = (u32 *)&vdev->notify_copy; |
636 | for (i = 1; i < nwords; i++) |
637 | csum += words[i]; |
638 | } while (csum != words[0]); |
639 | |
640 | return 1; |
641 | } |
642 | |
643 | int svnic_dev_init(struct vnic_dev *vdev, int arg) |
644 | { |
645 | u64 a0 = (u32)arg, a1 = 0; |
646 | int wait = VNIC_DVCMD_TMO; |
647 | |
648 | return svnic_dev_cmd(vdev, cmd: CMD_INIT, a0: &a0, a1: &a1, wait); |
649 | } |
650 | |
651 | int svnic_dev_link_status(struct vnic_dev *vdev) |
652 | { |
653 | if (vdev->linkstatus) |
654 | return *vdev->linkstatus; |
655 | |
656 | if (!vnic_dev_notify_ready(vdev)) |
657 | return 0; |
658 | |
659 | return vdev->notify_copy.link_state; |
660 | } |
661 | |
662 | u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev) |
663 | { |
664 | if (!vnic_dev_notify_ready(vdev)) |
665 | return 0; |
666 | |
667 | return vdev->notify_copy.link_down_cnt; |
668 | } |
669 | |
670 | void svnic_dev_set_intr_mode(struct vnic_dev *vdev, |
671 | enum vnic_dev_intr_mode intr_mode) |
672 | { |
673 | vdev->intr_mode = intr_mode; |
674 | } |
675 | |
676 | enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev) |
677 | { |
678 | return vdev->intr_mode; |
679 | } |
680 | |
681 | void svnic_dev_unregister(struct vnic_dev *vdev) |
682 | { |
683 | if (vdev) { |
684 | if (vdev->notify) |
685 | dma_free_coherent(dev: &vdev->pdev->dev, |
686 | size: sizeof(struct vnic_devcmd_notify), |
687 | cpu_addr: vdev->notify, |
688 | dma_handle: vdev->notify_pa); |
689 | if (vdev->linkstatus) |
690 | dma_free_coherent(dev: &vdev->pdev->dev, |
691 | size: sizeof(u32), |
692 | cpu_addr: vdev->linkstatus, |
693 | dma_handle: vdev->linkstatus_pa); |
694 | if (vdev->stats) |
695 | dma_free_coherent(dev: &vdev->pdev->dev, |
696 | size: sizeof(struct vnic_stats), |
697 | cpu_addr: vdev->stats, dma_handle: vdev->stats_pa); |
698 | if (vdev->fw_info) |
699 | dma_free_coherent(dev: &vdev->pdev->dev, |
700 | size: sizeof(struct vnic_devcmd_fw_info), |
701 | cpu_addr: vdev->fw_info, dma_handle: vdev->fw_info_pa); |
702 | if (vdev->devcmd2) |
703 | vnic_dev_deinit_devcmd2(vdev); |
704 | kfree(objp: vdev); |
705 | } |
706 | } |
707 | |
708 | struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev, |
709 | void *priv, |
710 | struct pci_dev *pdev, |
711 | struct vnic_dev_bar *bar, |
712 | unsigned int num_bars) |
713 | { |
714 | if (!vdev) { |
715 | vdev = kzalloc(size: sizeof(struct vnic_dev), GFP_ATOMIC); |
716 | if (!vdev) |
717 | return NULL; |
718 | } |
719 | |
720 | vdev->priv = priv; |
721 | vdev->pdev = pdev; |
722 | |
723 | if (vnic_dev_discover_res(vdev, bar, num_bars)) |
724 | goto err_out; |
725 | |
726 | return vdev; |
727 | |
728 | err_out: |
729 | svnic_dev_unregister(vdev); |
730 | |
731 | return NULL; |
732 | } /* end of svnic_dev_alloc_discover */ |
733 | |
734 | /* |
735 | * fallback option is left to keep the interface common for other vnics. |
736 | */ |
737 | int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback) |
738 | { |
739 | int err = -ENODEV; |
740 | void __iomem *p; |
741 | |
742 | p = svnic_dev_get_res(vdev, type: RES_TYPE_DEVCMD2, index: 0); |
743 | if (p) |
744 | err = svnic_dev_init_devcmd2(vdev); |
745 | else |
746 | pr_err("DEVCMD2 resource not found.\n" ); |
747 | |
748 | return err; |
749 | } /* end of svnic_dev_cmd_init */ |
750 | |