1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) ST-Ericsson AB 2013 |
4 | * Authors: Vicram Arv |
5 | * Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> |
6 | * Sjur Brendeland |
7 | */ |
8 | #include <linux/module.h> |
9 | #include <linux/if_arp.h> |
10 | #include <linux/virtio.h> |
11 | #include <linux/vringh.h> |
12 | #include <linux/debugfs.h> |
13 | #include <linux/spinlock.h> |
14 | #include <linux/genalloc.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/netdevice.h> |
17 | #include <linux/rtnetlink.h> |
18 | #include <linux/virtio_ids.h> |
19 | #include <linux/virtio_caif.h> |
20 | #include <linux/virtio_ring.h> |
21 | #include <linux/dma-mapping.h> |
22 | #include <net/caif/caif_dev.h> |
23 | #include <linux/virtio_config.h> |
24 | |
25 | MODULE_LICENSE("GPL v2" ); |
26 | MODULE_AUTHOR("Vicram Arv" ); |
27 | MODULE_AUTHOR("Sjur Brendeland" ); |
28 | MODULE_DESCRIPTION("Virtio CAIF Driver" ); |
29 | |
30 | /* NAPI schedule quota */ |
31 | #define CFV_DEFAULT_QUOTA 32 |
32 | |
33 | /* Defaults used if virtio config space is unavailable */ |
34 | #define CFV_DEF_MTU_SIZE 4096 |
35 | #define CFV_DEF_HEADROOM 32 |
36 | #define CFV_DEF_TAILROOM 32 |
37 | |
38 | /* Required IP header alignment */ |
39 | #define IP_HDR_ALIGN 4 |
40 | |
41 | /* struct cfv_napi_contxt - NAPI context info |
42 | * @riov: IOV holding data read from the ring. Note that riov may |
43 | * still hold data when cfv_rx_poll() returns. |
44 | * @head: Last descriptor ID we received from vringh_getdesc_kern. |
45 | * We use this to put descriptor back on the used ring. USHRT_MAX is |
46 | * used to indicate invalid head-id. |
47 | */ |
48 | struct cfv_napi_context { |
49 | struct vringh_kiov riov; |
50 | unsigned short head; |
51 | }; |
52 | |
53 | /* struct cfv_stats - statistics for debugfs |
54 | * @rx_napi_complete: Number of NAPI completions (RX) |
55 | * @rx_napi_resched: Number of calls where the full quota was used (RX) |
56 | * @rx_nomem: Number of SKB alloc failures (RX) |
57 | * @rx_kicks: Number of RX kicks |
58 | * @tx_full_ring: Number times TX ring was full |
59 | * @tx_no_mem: Number of times TX went out of memory |
60 | * @tx_flow_on: Number of flow on (TX) |
61 | * @tx_kicks: Number of TX kicks |
62 | */ |
63 | struct cfv_stats { |
64 | u32 rx_napi_complete; |
65 | u32 rx_napi_resched; |
66 | u32 rx_nomem; |
67 | u32 rx_kicks; |
68 | u32 tx_full_ring; |
69 | u32 tx_no_mem; |
70 | u32 tx_flow_on; |
71 | u32 tx_kicks; |
72 | }; |
73 | |
74 | /* struct cfv_info - Caif Virtio control structure |
75 | * @cfdev: caif common header |
76 | * @vdev: Associated virtio device |
77 | * @vr_rx: rx/downlink host vring |
78 | * @vq_tx: tx/uplink virtqueue |
79 | * @ndev: CAIF link layer device |
80 | * @watermark_tx: indicates number of free descriptors we need |
81 | * to reopen the tx-queues after overload. |
82 | * @tx_lock: protects vq_tx from concurrent use |
83 | * @tx_release_tasklet: Tasklet for freeing consumed TX buffers |
84 | * @napi: Napi context used in cfv_rx_poll() |
85 | * @ctx: Context data used in cfv_rx_poll() |
86 | * @tx_hr: transmit headroom |
87 | * @rx_hr: receive headroom |
88 | * @tx_tr: transmit tail room |
89 | * @rx_tr: receive tail room |
90 | * @mtu: transmit max size |
91 | * @mru: receive max size |
92 | * @allocsz: size of dma memory reserved for TX buffers |
93 | * @alloc_addr: virtual address to dma memory for TX buffers |
94 | * @alloc_dma: dma address to dma memory for TX buffers |
95 | * @genpool: Gen Pool used for allocating TX buffers |
96 | * @reserved_mem: Pointer to memory reserve allocated from genpool |
97 | * @reserved_size: Size of memory reserve allocated from genpool |
98 | * @stats: Statistics exposed in sysfs |
99 | * @debugfs: Debugfs dentry for statistic counters |
100 | */ |
101 | struct cfv_info { |
102 | struct caif_dev_common cfdev; |
103 | struct virtio_device *vdev; |
104 | struct vringh *vr_rx; |
105 | struct virtqueue *vq_tx; |
106 | struct net_device *ndev; |
107 | unsigned int watermark_tx; |
108 | /* Protect access to vq_tx */ |
109 | spinlock_t tx_lock; |
110 | struct tasklet_struct tx_release_tasklet; |
111 | struct napi_struct napi; |
112 | struct cfv_napi_context ctx; |
113 | u16 tx_hr; |
114 | u16 rx_hr; |
115 | u16 tx_tr; |
116 | u16 rx_tr; |
117 | u32 mtu; |
118 | u32 mru; |
119 | size_t allocsz; |
120 | void *alloc_addr; |
121 | dma_addr_t alloc_dma; |
122 | struct gen_pool *genpool; |
123 | unsigned long reserved_mem; |
124 | size_t reserved_size; |
125 | struct cfv_stats stats; |
126 | struct dentry *debugfs; |
127 | }; |
128 | |
129 | /* struct buf_info - maintains transmit buffer data handle |
130 | * @size: size of transmit buffer |
131 | * @dma_handle: handle to allocated dma device memory area |
132 | * @vaddr: virtual address mapping to allocated memory area |
133 | */ |
134 | struct buf_info { |
135 | size_t size; |
136 | u8 *vaddr; |
137 | }; |
138 | |
139 | /* Called from virtio device, in IRQ context */ |
140 | static void cfv_release_cb(struct virtqueue *vq_tx) |
141 | { |
142 | struct cfv_info *cfv = vq_tx->vdev->priv; |
143 | |
144 | ++cfv->stats.tx_kicks; |
145 | tasklet_schedule(t: &cfv->tx_release_tasklet); |
146 | } |
147 | |
148 | static void free_buf_info(struct cfv_info *cfv, struct buf_info *buf_info) |
149 | { |
150 | if (!buf_info) |
151 | return; |
152 | gen_pool_free(pool: cfv->genpool, addr: (unsigned long) buf_info->vaddr, |
153 | size: buf_info->size); |
154 | kfree(objp: buf_info); |
155 | } |
156 | |
157 | /* This is invoked whenever the remote processor completed processing |
158 | * a TX msg we just sent, and the buffer is put back to the used ring. |
159 | */ |
160 | static void cfv_release_used_buf(struct virtqueue *vq_tx) |
161 | { |
162 | struct cfv_info *cfv = vq_tx->vdev->priv; |
163 | unsigned long flags; |
164 | |
165 | BUG_ON(vq_tx != cfv->vq_tx); |
166 | |
167 | for (;;) { |
168 | unsigned int len; |
169 | struct buf_info *buf_info; |
170 | |
171 | /* Get used buffer from used ring to recycle used descriptors */ |
172 | spin_lock_irqsave(&cfv->tx_lock, flags); |
173 | buf_info = virtqueue_get_buf(vq: vq_tx, len: &len); |
174 | spin_unlock_irqrestore(lock: &cfv->tx_lock, flags); |
175 | |
176 | /* Stop looping if there are no more buffers to free */ |
177 | if (!buf_info) |
178 | break; |
179 | |
180 | free_buf_info(cfv, buf_info); |
181 | |
182 | /* watermark_tx indicates if we previously stopped the tx |
183 | * queues. If we have enough free stots in the virtio ring, |
184 | * re-establish memory reserved and open up tx queues. |
185 | */ |
186 | if (cfv->vq_tx->num_free <= cfv->watermark_tx) |
187 | continue; |
188 | |
189 | /* Re-establish memory reserve */ |
190 | if (cfv->reserved_mem == 0 && cfv->genpool) |
191 | cfv->reserved_mem = |
192 | gen_pool_alloc(pool: cfv->genpool, |
193 | size: cfv->reserved_size); |
194 | |
195 | /* Open up the tx queues */ |
196 | if (cfv->reserved_mem) { |
197 | cfv->watermark_tx = |
198 | virtqueue_get_vring_size(vq: cfv->vq_tx); |
199 | netif_tx_wake_all_queues(dev: cfv->ndev); |
200 | /* Buffers are recycled in cfv_netdev_tx, so |
201 | * disable notifications when queues are opened. |
202 | */ |
203 | virtqueue_disable_cb(vq: cfv->vq_tx); |
204 | ++cfv->stats.tx_flow_on; |
205 | } else { |
206 | /* if no memory reserve, wait for more free slots */ |
207 | WARN_ON(cfv->watermark_tx > |
208 | virtqueue_get_vring_size(cfv->vq_tx)); |
209 | cfv->watermark_tx += |
210 | virtqueue_get_vring_size(vq: cfv->vq_tx) / 4; |
211 | } |
212 | } |
213 | } |
214 | |
215 | /* Allocate a SKB and copy packet data to it */ |
216 | static struct sk_buff *cfv_alloc_and_copy_skb(int *err, |
217 | struct cfv_info *cfv, |
218 | u8 *frm, u32 frm_len) |
219 | { |
220 | struct sk_buff *skb; |
221 | u32 cfpkt_len, pad_len; |
222 | |
223 | *err = 0; |
224 | /* Verify that packet size with down-link header and mtu size */ |
225 | if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) { |
226 | netdev_err(dev: cfv->ndev, |
227 | format: "Invalid frmlen:%u mtu:%u hr:%d tr:%d\n" , |
228 | frm_len, cfv->mru, cfv->rx_hr, |
229 | cfv->rx_tr); |
230 | *err = -EPROTO; |
231 | return NULL; |
232 | } |
233 | |
234 | cfpkt_len = frm_len - (cfv->rx_hr + cfv->rx_tr); |
235 | pad_len = (unsigned long)(frm + cfv->rx_hr) & (IP_HDR_ALIGN - 1); |
236 | |
237 | skb = netdev_alloc_skb(dev: cfv->ndev, length: frm_len + pad_len); |
238 | if (!skb) { |
239 | *err = -ENOMEM; |
240 | return NULL; |
241 | } |
242 | |
243 | skb_reserve(skb, len: cfv->rx_hr + pad_len); |
244 | |
245 | skb_put_data(skb, data: frm + cfv->rx_hr, len: cfpkt_len); |
246 | return skb; |
247 | } |
248 | |
249 | /* Get packets from the host vring */ |
250 | static int cfv_rx_poll(struct napi_struct *napi, int quota) |
251 | { |
252 | struct cfv_info *cfv = container_of(napi, struct cfv_info, napi); |
253 | int rxcnt = 0; |
254 | int err = 0; |
255 | void *buf; |
256 | struct sk_buff *skb; |
257 | struct vringh_kiov *riov = &cfv->ctx.riov; |
258 | unsigned int skb_len; |
259 | |
260 | do { |
261 | skb = NULL; |
262 | |
263 | /* Put the previous iovec back on the used ring and |
264 | * fetch a new iovec if we have processed all elements. |
265 | */ |
266 | if (riov->i == riov->used) { |
267 | if (cfv->ctx.head != USHRT_MAX) { |
268 | vringh_complete_kern(vrh: cfv->vr_rx, |
269 | head: cfv->ctx.head, |
270 | len: 0); |
271 | cfv->ctx.head = USHRT_MAX; |
272 | } |
273 | |
274 | err = vringh_getdesc_kern( |
275 | vrh: cfv->vr_rx, |
276 | riov, |
277 | NULL, |
278 | head: &cfv->ctx.head, |
279 | GFP_ATOMIC); |
280 | |
281 | if (err <= 0) |
282 | goto exit; |
283 | } |
284 | |
285 | buf = phys_to_virt(address: (unsigned long) riov->iov[riov->i].iov_base); |
286 | /* TODO: Add check on valid buffer address */ |
287 | |
288 | skb = cfv_alloc_and_copy_skb(err: &err, cfv, frm: buf, |
289 | frm_len: riov->iov[riov->i].iov_len); |
290 | if (unlikely(err)) |
291 | goto exit; |
292 | |
293 | /* Push received packet up the stack. */ |
294 | skb_len = skb->len; |
295 | skb->protocol = htons(ETH_P_CAIF); |
296 | skb_reset_mac_header(skb); |
297 | skb->dev = cfv->ndev; |
298 | err = netif_receive_skb(skb); |
299 | if (unlikely(err)) { |
300 | ++cfv->ndev->stats.rx_dropped; |
301 | } else { |
302 | ++cfv->ndev->stats.rx_packets; |
303 | cfv->ndev->stats.rx_bytes += skb_len; |
304 | } |
305 | |
306 | ++riov->i; |
307 | ++rxcnt; |
308 | } while (rxcnt < quota); |
309 | |
310 | ++cfv->stats.rx_napi_resched; |
311 | goto out; |
312 | |
313 | exit: |
314 | switch (err) { |
315 | case 0: |
316 | ++cfv->stats.rx_napi_complete; |
317 | |
318 | /* Really out of packets? (stolen from virtio_net)*/ |
319 | napi_complete(n: napi); |
320 | if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) && |
321 | napi_schedule_prep(n: napi)) { |
322 | vringh_notify_disable_kern(vrh: cfv->vr_rx); |
323 | __napi_schedule(n: napi); |
324 | } |
325 | break; |
326 | |
327 | case -ENOMEM: |
328 | ++cfv->stats.rx_nomem; |
329 | dev_kfree_skb(skb); |
330 | /* Stop NAPI poll on OOM, we hope to be polled later */ |
331 | napi_complete(n: napi); |
332 | vringh_notify_enable_kern(vrh: cfv->vr_rx); |
333 | break; |
334 | |
335 | default: |
336 | /* We're doomed, any modem fault is fatal */ |
337 | netdev_warn(dev: cfv->ndev, format: "Bad ring, disable device\n" ); |
338 | cfv->ndev->stats.rx_dropped = riov->used - riov->i; |
339 | napi_complete(n: napi); |
340 | vringh_notify_disable_kern(vrh: cfv->vr_rx); |
341 | netif_carrier_off(dev: cfv->ndev); |
342 | break; |
343 | } |
344 | out: |
345 | if (rxcnt && vringh_need_notify_kern(vrh: cfv->vr_rx) > 0) |
346 | vringh_notify(vrh: cfv->vr_rx); |
347 | return rxcnt; |
348 | } |
349 | |
350 | static void cfv_recv(struct virtio_device *vdev, struct vringh *vr_rx) |
351 | { |
352 | struct cfv_info *cfv = vdev->priv; |
353 | |
354 | ++cfv->stats.rx_kicks; |
355 | vringh_notify_disable_kern(vrh: cfv->vr_rx); |
356 | napi_schedule(n: &cfv->napi); |
357 | } |
358 | |
359 | static void cfv_destroy_genpool(struct cfv_info *cfv) |
360 | { |
361 | if (cfv->alloc_addr) |
362 | dma_free_coherent(dev: cfv->vdev->dev.parent->parent, |
363 | size: cfv->allocsz, cpu_addr: cfv->alloc_addr, |
364 | dma_handle: cfv->alloc_dma); |
365 | |
366 | if (!cfv->genpool) |
367 | return; |
368 | gen_pool_free(pool: cfv->genpool, addr: cfv->reserved_mem, |
369 | size: cfv->reserved_size); |
370 | gen_pool_destroy(cfv->genpool); |
371 | cfv->genpool = NULL; |
372 | } |
373 | |
374 | static int cfv_create_genpool(struct cfv_info *cfv) |
375 | { |
376 | int err; |
377 | |
378 | /* dma_alloc can only allocate whole pages, and we need a more |
379 | * fine graned allocation so we use genpool. We ask for space needed |
380 | * by IP and a full ring. If the dma allcoation fails we retry with a |
381 | * smaller allocation size. |
382 | */ |
383 | err = -ENOMEM; |
384 | cfv->allocsz = (virtqueue_get_vring_size(vq: cfv->vq_tx) * |
385 | (ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10; |
386 | if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu) |
387 | return -EINVAL; |
388 | |
389 | for (;;) { |
390 | if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) { |
391 | netdev_info(dev: cfv->ndev, format: "Not enough device memory\n" ); |
392 | return -ENOMEM; |
393 | } |
394 | |
395 | cfv->alloc_addr = dma_alloc_coherent( |
396 | dev: cfv->vdev->dev.parent->parent, |
397 | size: cfv->allocsz, dma_handle: &cfv->alloc_dma, |
398 | GFP_ATOMIC); |
399 | if (cfv->alloc_addr) |
400 | break; |
401 | |
402 | cfv->allocsz = (cfv->allocsz * 3) >> 2; |
403 | } |
404 | |
405 | netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n" , |
406 | cfv->allocsz); |
407 | |
408 | /* Allocate on 128 bytes boundaries (1 << 7)*/ |
409 | cfv->genpool = gen_pool_create(7, -1); |
410 | if (!cfv->genpool) |
411 | goto err; |
412 | |
413 | err = gen_pool_add_virt(pool: cfv->genpool, addr: (unsigned long)cfv->alloc_addr, |
414 | phys: (phys_addr_t)virt_to_phys(address: cfv->alloc_addr), |
415 | size: cfv->allocsz, nid: -1); |
416 | if (err) |
417 | goto err; |
418 | |
419 | /* Reserve some memory for low memory situations. If we hit the roof |
420 | * in the memory pool, we stop TX flow and release the reserve. |
421 | */ |
422 | cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu; |
423 | cfv->reserved_mem = gen_pool_alloc(pool: cfv->genpool, |
424 | size: cfv->reserved_size); |
425 | if (!cfv->reserved_mem) { |
426 | err = -ENOMEM; |
427 | goto err; |
428 | } |
429 | |
430 | cfv->watermark_tx = virtqueue_get_vring_size(vq: cfv->vq_tx); |
431 | return 0; |
432 | err: |
433 | cfv_destroy_genpool(cfv); |
434 | return err; |
435 | } |
436 | |
437 | /* Enable the CAIF interface and allocate the memory-pool */ |
438 | static int cfv_netdev_open(struct net_device *netdev) |
439 | { |
440 | struct cfv_info *cfv = netdev_priv(dev: netdev); |
441 | |
442 | if (cfv_create_genpool(cfv)) |
443 | return -ENOMEM; |
444 | |
445 | netif_carrier_on(dev: netdev); |
446 | napi_enable(n: &cfv->napi); |
447 | |
448 | /* Schedule NAPI to read any pending packets */ |
449 | napi_schedule(n: &cfv->napi); |
450 | return 0; |
451 | } |
452 | |
453 | /* Disable the CAIF interface and free the memory-pool */ |
454 | static int cfv_netdev_close(struct net_device *netdev) |
455 | { |
456 | struct cfv_info *cfv = netdev_priv(dev: netdev); |
457 | unsigned long flags; |
458 | struct buf_info *buf_info; |
459 | |
460 | /* Disable interrupts, queues and NAPI polling */ |
461 | netif_carrier_off(dev: netdev); |
462 | virtqueue_disable_cb(vq: cfv->vq_tx); |
463 | vringh_notify_disable_kern(vrh: cfv->vr_rx); |
464 | napi_disable(n: &cfv->napi); |
465 | |
466 | /* Release any TX buffers on both used and available rings */ |
467 | cfv_release_used_buf(vq_tx: cfv->vq_tx); |
468 | spin_lock_irqsave(&cfv->tx_lock, flags); |
469 | while ((buf_info = virtqueue_detach_unused_buf(vq: cfv->vq_tx))) |
470 | free_buf_info(cfv, buf_info); |
471 | spin_unlock_irqrestore(lock: &cfv->tx_lock, flags); |
472 | |
473 | /* Release all dma allocated memory and destroy the pool */ |
474 | cfv_destroy_genpool(cfv); |
475 | return 0; |
476 | } |
477 | |
478 | /* Allocate a buffer in dma-memory and copy skb to it */ |
479 | static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv, |
480 | struct sk_buff *skb, |
481 | struct scatterlist *sg) |
482 | { |
483 | struct caif_payload_info *info = (void *)&skb->cb; |
484 | struct buf_info *buf_info = NULL; |
485 | u8 pad_len, hdr_ofs; |
486 | |
487 | if (!cfv->genpool) |
488 | goto err; |
489 | |
490 | if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) { |
491 | netdev_warn(dev: cfv->ndev, format: "Invalid packet len (%d > %d)\n" , |
492 | cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu); |
493 | goto err; |
494 | } |
495 | |
496 | buf_info = kmalloc(size: sizeof(struct buf_info), GFP_ATOMIC); |
497 | if (unlikely(!buf_info)) |
498 | goto err; |
499 | |
500 | /* Make the IP header aligned in the buffer */ |
501 | hdr_ofs = cfv->tx_hr + info->hdr_len; |
502 | pad_len = hdr_ofs & (IP_HDR_ALIGN - 1); |
503 | buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len; |
504 | |
505 | /* allocate dma memory buffer */ |
506 | buf_info->vaddr = (void *)gen_pool_alloc(pool: cfv->genpool, size: buf_info->size); |
507 | if (unlikely(!buf_info->vaddr)) |
508 | goto err; |
509 | |
510 | /* copy skbuf contents to send buffer */ |
511 | skb_copy_bits(skb, offset: 0, to: buf_info->vaddr + cfv->tx_hr + pad_len, len: skb->len); |
512 | sg_init_one(sg, buf_info->vaddr + pad_len, |
513 | skb->len + cfv->tx_hr + cfv->rx_hr); |
514 | |
515 | return buf_info; |
516 | err: |
517 | kfree(objp: buf_info); |
518 | return NULL; |
519 | } |
520 | |
521 | /* Put the CAIF packet on the virtio ring and kick the receiver */ |
522 | static netdev_tx_t cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev) |
523 | { |
524 | struct cfv_info *cfv = netdev_priv(dev: netdev); |
525 | struct buf_info *buf_info; |
526 | struct scatterlist sg; |
527 | unsigned long flags; |
528 | bool flow_off = false; |
529 | int ret; |
530 | |
531 | /* garbage collect released buffers */ |
532 | cfv_release_used_buf(vq_tx: cfv->vq_tx); |
533 | spin_lock_irqsave(&cfv->tx_lock, flags); |
534 | |
535 | /* Flow-off check takes into account number of cpus to make sure |
536 | * virtqueue will not be overfilled in any possible smp conditions. |
537 | * |
538 | * Flow-on is triggered when sufficient buffers are freed |
539 | */ |
540 | if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) { |
541 | flow_off = true; |
542 | cfv->stats.tx_full_ring++; |
543 | } |
544 | |
545 | /* If we run out of memory, we release the memory reserve and retry |
546 | * allocation. |
547 | */ |
548 | buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, sg: &sg); |
549 | if (unlikely(!buf_info)) { |
550 | cfv->stats.tx_no_mem++; |
551 | flow_off = true; |
552 | |
553 | if (cfv->reserved_mem && cfv->genpool) { |
554 | gen_pool_free(pool: cfv->genpool, addr: cfv->reserved_mem, |
555 | size: cfv->reserved_size); |
556 | cfv->reserved_mem = 0; |
557 | buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, sg: &sg); |
558 | } |
559 | } |
560 | |
561 | if (unlikely(flow_off)) { |
562 | /* Turn flow on when a 1/4 of the descriptors are released */ |
563 | cfv->watermark_tx = virtqueue_get_vring_size(vq: cfv->vq_tx) / 4; |
564 | /* Enable notifications of recycled TX buffers */ |
565 | virtqueue_enable_cb(vq: cfv->vq_tx); |
566 | netif_tx_stop_all_queues(dev: netdev); |
567 | } |
568 | |
569 | if (unlikely(!buf_info)) { |
570 | /* If the memory reserve does it's job, this shouldn't happen */ |
571 | netdev_warn(dev: cfv->ndev, format: "Out of gen_pool memory\n" ); |
572 | goto err; |
573 | } |
574 | |
575 | ret = virtqueue_add_outbuf(vq: cfv->vq_tx, sg: &sg, num: 1, data: buf_info, GFP_ATOMIC); |
576 | if (unlikely((ret < 0))) { |
577 | /* If flow control works, this shouldn't happen */ |
578 | netdev_warn(dev: cfv->ndev, format: "Failed adding buffer to TX vring:%d\n" , |
579 | ret); |
580 | goto err; |
581 | } |
582 | |
583 | /* update netdev statistics */ |
584 | cfv->ndev->stats.tx_packets++; |
585 | cfv->ndev->stats.tx_bytes += skb->len; |
586 | spin_unlock_irqrestore(lock: &cfv->tx_lock, flags); |
587 | |
588 | /* tell the remote processor it has a pending message to read */ |
589 | virtqueue_kick(vq: cfv->vq_tx); |
590 | |
591 | dev_kfree_skb(skb); |
592 | return NETDEV_TX_OK; |
593 | err: |
594 | spin_unlock_irqrestore(lock: &cfv->tx_lock, flags); |
595 | cfv->ndev->stats.tx_dropped++; |
596 | free_buf_info(cfv, buf_info); |
597 | dev_kfree_skb(skb); |
598 | return NETDEV_TX_OK; |
599 | } |
600 | |
601 | static void cfv_tx_release_tasklet(struct tasklet_struct *t) |
602 | { |
603 | struct cfv_info *cfv = from_tasklet(cfv, t, tx_release_tasklet); |
604 | cfv_release_used_buf(vq_tx: cfv->vq_tx); |
605 | } |
606 | |
607 | static const struct net_device_ops cfv_netdev_ops = { |
608 | .ndo_open = cfv_netdev_open, |
609 | .ndo_stop = cfv_netdev_close, |
610 | .ndo_start_xmit = cfv_netdev_tx, |
611 | }; |
612 | |
613 | static void cfv_netdev_setup(struct net_device *netdev) |
614 | { |
615 | netdev->netdev_ops = &cfv_netdev_ops; |
616 | netdev->type = ARPHRD_CAIF; |
617 | netdev->tx_queue_len = 100; |
618 | netdev->flags = IFF_POINTOPOINT | IFF_NOARP; |
619 | netdev->mtu = CFV_DEF_MTU_SIZE; |
620 | netdev->needs_free_netdev = true; |
621 | } |
622 | |
623 | /* Create debugfs counters for the device */ |
624 | static inline void debugfs_init(struct cfv_info *cfv) |
625 | { |
626 | cfv->debugfs = debugfs_create_dir(name: netdev_name(dev: cfv->ndev), NULL); |
627 | |
628 | debugfs_create_u32(name: "rx-napi-complete" , mode: 0400, parent: cfv->debugfs, |
629 | value: &cfv->stats.rx_napi_complete); |
630 | debugfs_create_u32(name: "rx-napi-resched" , mode: 0400, parent: cfv->debugfs, |
631 | value: &cfv->stats.rx_napi_resched); |
632 | debugfs_create_u32(name: "rx-nomem" , mode: 0400, parent: cfv->debugfs, |
633 | value: &cfv->stats.rx_nomem); |
634 | debugfs_create_u32(name: "rx-kicks" , mode: 0400, parent: cfv->debugfs, |
635 | value: &cfv->stats.rx_kicks); |
636 | debugfs_create_u32(name: "tx-full-ring" , mode: 0400, parent: cfv->debugfs, |
637 | value: &cfv->stats.tx_full_ring); |
638 | debugfs_create_u32(name: "tx-no-mem" , mode: 0400, parent: cfv->debugfs, |
639 | value: &cfv->stats.tx_no_mem); |
640 | debugfs_create_u32(name: "tx-kicks" , mode: 0400, parent: cfv->debugfs, |
641 | value: &cfv->stats.tx_kicks); |
642 | debugfs_create_u32(name: "tx-flow-on" , mode: 0400, parent: cfv->debugfs, |
643 | value: &cfv->stats.tx_flow_on); |
644 | } |
645 | |
646 | /* Setup CAIF for the a virtio device */ |
647 | static int cfv_probe(struct virtio_device *vdev) |
648 | { |
649 | vq_callback_t *vq_cbs = cfv_release_cb; |
650 | vrh_callback_t *vrh_cbs = cfv_recv; |
651 | const char *names = "output" ; |
652 | const char *cfv_netdev_name = "cfvrt" ; |
653 | struct net_device *netdev; |
654 | struct cfv_info *cfv; |
655 | int err; |
656 | |
657 | netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name, |
658 | NET_NAME_UNKNOWN, cfv_netdev_setup); |
659 | if (!netdev) |
660 | return -ENOMEM; |
661 | |
662 | cfv = netdev_priv(dev: netdev); |
663 | cfv->vdev = vdev; |
664 | cfv->ndev = netdev; |
665 | |
666 | spin_lock_init(&cfv->tx_lock); |
667 | |
668 | /* Get the RX virtio ring. This is a "host side vring". */ |
669 | err = -ENODEV; |
670 | if (!vdev->vringh_config || !vdev->vringh_config->find_vrhs) |
671 | goto err; |
672 | |
673 | err = vdev->vringh_config->find_vrhs(vdev, 1, &cfv->vr_rx, &vrh_cbs); |
674 | if (err) |
675 | goto err; |
676 | |
677 | /* Get the TX virtio ring. This is a "guest side vring". */ |
678 | err = virtio_find_vqs(vdev, nvqs: 1, vqs: &cfv->vq_tx, callbacks: &vq_cbs, names: &names, NULL); |
679 | if (err) |
680 | goto err; |
681 | |
682 | /* Get the CAIF configuration from virtio config space, if available */ |
683 | if (vdev->config->get) { |
684 | virtio_cread(vdev, struct virtio_caif_transf_config, headroom, |
685 | &cfv->tx_hr); |
686 | virtio_cread(vdev, struct virtio_caif_transf_config, headroom, |
687 | &cfv->rx_hr); |
688 | virtio_cread(vdev, struct virtio_caif_transf_config, tailroom, |
689 | &cfv->tx_tr); |
690 | virtio_cread(vdev, struct virtio_caif_transf_config, tailroom, |
691 | &cfv->rx_tr); |
692 | virtio_cread(vdev, struct virtio_caif_transf_config, mtu, |
693 | &cfv->mtu); |
694 | virtio_cread(vdev, struct virtio_caif_transf_config, mtu, |
695 | &cfv->mru); |
696 | } else { |
697 | cfv->tx_hr = CFV_DEF_HEADROOM; |
698 | cfv->rx_hr = CFV_DEF_HEADROOM; |
699 | cfv->tx_tr = CFV_DEF_TAILROOM; |
700 | cfv->rx_tr = CFV_DEF_TAILROOM; |
701 | cfv->mtu = CFV_DEF_MTU_SIZE; |
702 | cfv->mru = CFV_DEF_MTU_SIZE; |
703 | } |
704 | |
705 | netdev->needed_headroom = cfv->tx_hr; |
706 | netdev->needed_tailroom = cfv->tx_tr; |
707 | |
708 | /* Disable buffer release interrupts unless we have stopped TX queues */ |
709 | virtqueue_disable_cb(vq: cfv->vq_tx); |
710 | |
711 | netdev->mtu = cfv->mtu - cfv->tx_tr; |
712 | vdev->priv = cfv; |
713 | |
714 | /* Initialize NAPI poll context data */ |
715 | vringh_kiov_init(kiov: &cfv->ctx.riov, NULL, num: 0); |
716 | cfv->ctx.head = USHRT_MAX; |
717 | netif_napi_add_weight(dev: netdev, napi: &cfv->napi, poll: cfv_rx_poll, |
718 | CFV_DEFAULT_QUOTA); |
719 | |
720 | tasklet_setup(t: &cfv->tx_release_tasklet, callback: cfv_tx_release_tasklet); |
721 | |
722 | /* Carrier is off until netdevice is opened */ |
723 | netif_carrier_off(dev: netdev); |
724 | |
725 | /* serialize netdev register + virtio_device_ready() with ndo_open() */ |
726 | rtnl_lock(); |
727 | |
728 | /* register Netdev */ |
729 | err = register_netdevice(dev: netdev); |
730 | if (err) { |
731 | rtnl_unlock(); |
732 | dev_err(&vdev->dev, "Unable to register netdev (%d)\n" , err); |
733 | goto err; |
734 | } |
735 | |
736 | virtio_device_ready(dev: vdev); |
737 | |
738 | rtnl_unlock(); |
739 | |
740 | debugfs_init(cfv); |
741 | |
742 | return 0; |
743 | err: |
744 | netdev_warn(dev: cfv->ndev, format: "CAIF Virtio probe failed:%d\n" , err); |
745 | |
746 | if (cfv->vr_rx) |
747 | vdev->vringh_config->del_vrhs(cfv->vdev); |
748 | if (cfv->vdev) |
749 | vdev->config->del_vqs(cfv->vdev); |
750 | free_netdev(dev: netdev); |
751 | return err; |
752 | } |
753 | |
754 | static void cfv_remove(struct virtio_device *vdev) |
755 | { |
756 | struct cfv_info *cfv = vdev->priv; |
757 | |
758 | rtnl_lock(); |
759 | dev_close(dev: cfv->ndev); |
760 | rtnl_unlock(); |
761 | |
762 | tasklet_kill(t: &cfv->tx_release_tasklet); |
763 | debugfs_remove_recursive(dentry: cfv->debugfs); |
764 | |
765 | vringh_kiov_cleanup(kiov: &cfv->ctx.riov); |
766 | virtio_reset_device(dev: vdev); |
767 | vdev->vringh_config->del_vrhs(cfv->vdev); |
768 | cfv->vr_rx = NULL; |
769 | vdev->config->del_vqs(cfv->vdev); |
770 | unregister_netdev(dev: cfv->ndev); |
771 | } |
772 | |
773 | static struct virtio_device_id id_table[] = { |
774 | { VIRTIO_ID_CAIF, VIRTIO_DEV_ANY_ID }, |
775 | { 0 }, |
776 | }; |
777 | |
778 | static unsigned int features[] = { |
779 | }; |
780 | |
781 | static struct virtio_driver caif_virtio_driver = { |
782 | .feature_table = features, |
783 | .feature_table_size = ARRAY_SIZE(features), |
784 | .driver.name = KBUILD_MODNAME, |
785 | .driver.owner = THIS_MODULE, |
786 | .id_table = id_table, |
787 | .probe = cfv_probe, |
788 | .remove = cfv_remove, |
789 | }; |
790 | |
791 | module_virtio_driver(caif_virtio_driver); |
792 | MODULE_DEVICE_TABLE(virtio, id_table); |
793 | |