1 | /* |
2 | * Copyright (c) 2016 Citrix Systems Inc. |
3 | * Copyright (c) 2002-2005, K A Fraser |
4 | * |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License version 2 |
7 | * as published by the Free Software Foundation; or, when distributed |
8 | * separately from the Linux kernel or incorporated into other |
9 | * software packages, subject to the following license: |
10 | * |
11 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
12 | * of this source file (the "Software"), to deal in the Software without |
13 | * restriction, including without limitation the rights to use, copy, modify, |
14 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
15 | * and to permit persons to whom the Software is furnished to do so, subject to |
16 | * the following conditions: |
17 | * |
18 | * The above copyright notice and this permission notice shall be included in |
19 | * all copies or substantial portions of the Software. |
20 | * |
21 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
22 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
23 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
24 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
25 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
26 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
27 | * IN THE SOFTWARE. |
28 | */ |
29 | #include "common.h" |
30 | |
31 | #include <linux/kthread.h> |
32 | |
33 | #include <xen/xen.h> |
34 | #include <xen/events.h> |
35 | |
36 | /* |
37 | * Update the needed ring page slots for the first SKB queued. |
38 | * Note that any call sequence outside the RX thread calling this function |
39 | * needs to wake up the RX thread via a call of xenvif_kick_thread() |
40 | * afterwards in order to avoid a race with putting the thread to sleep. |
41 | */ |
42 | static void xenvif_update_needed_slots(struct xenvif_queue *queue, |
43 | const struct sk_buff *skb) |
44 | { |
45 | unsigned int needed = 0; |
46 | |
47 | if (skb) { |
48 | needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); |
49 | if (skb_is_gso(skb)) |
50 | needed++; |
51 | if (skb->sw_hash) |
52 | needed++; |
53 | } |
54 | |
55 | WRITE_ONCE(queue->rx_slots_needed, needed); |
56 | } |
57 | |
58 | static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) |
59 | { |
60 | RING_IDX prod, cons; |
61 | unsigned int needed; |
62 | |
63 | needed = READ_ONCE(queue->rx_slots_needed); |
64 | if (!needed) |
65 | return false; |
66 | |
67 | do { |
68 | prod = queue->rx.sring->req_prod; |
69 | cons = queue->rx.req_cons; |
70 | |
71 | if (prod - cons >= needed) |
72 | return true; |
73 | |
74 | queue->rx.sring->req_event = prod + 1; |
75 | |
76 | /* Make sure event is visible before we check prod |
77 | * again. |
78 | */ |
79 | mb(); |
80 | } while (queue->rx.sring->req_prod != prod); |
81 | |
82 | return false; |
83 | } |
84 | |
85 | bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) |
86 | { |
87 | unsigned long flags; |
88 | bool ret = true; |
89 | |
90 | spin_lock_irqsave(&queue->rx_queue.lock, flags); |
91 | |
92 | if (queue->rx_queue_len >= queue->rx_queue_max) { |
93 | struct net_device *dev = queue->vif->dev; |
94 | |
95 | netif_tx_stop_queue(dev_queue: netdev_get_tx_queue(dev, index: queue->id)); |
96 | ret = false; |
97 | } else { |
98 | if (skb_queue_empty(list: &queue->rx_queue)) |
99 | xenvif_update_needed_slots(queue, skb); |
100 | |
101 | __skb_queue_tail(list: &queue->rx_queue, newsk: skb); |
102 | |
103 | queue->rx_queue_len += skb->len; |
104 | } |
105 | |
106 | spin_unlock_irqrestore(lock: &queue->rx_queue.lock, flags); |
107 | |
108 | return ret; |
109 | } |
110 | |
111 | static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) |
112 | { |
113 | struct sk_buff *skb; |
114 | |
115 | spin_lock_irq(lock: &queue->rx_queue.lock); |
116 | |
117 | skb = __skb_dequeue(list: &queue->rx_queue); |
118 | if (skb) { |
119 | xenvif_update_needed_slots(queue, skb: skb_peek(list_: &queue->rx_queue)); |
120 | |
121 | queue->rx_queue_len -= skb->len; |
122 | if (queue->rx_queue_len < queue->rx_queue_max) { |
123 | struct netdev_queue *txq; |
124 | |
125 | txq = netdev_get_tx_queue(dev: queue->vif->dev, index: queue->id); |
126 | netif_tx_wake_queue(dev_queue: txq); |
127 | } |
128 | } |
129 | |
130 | spin_unlock_irq(lock: &queue->rx_queue.lock); |
131 | |
132 | return skb; |
133 | } |
134 | |
135 | static void xenvif_rx_queue_purge(struct xenvif_queue *queue) |
136 | { |
137 | struct sk_buff *skb; |
138 | |
139 | while ((skb = xenvif_rx_dequeue(queue)) != NULL) |
140 | kfree_skb(skb); |
141 | } |
142 | |
143 | static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) |
144 | { |
145 | struct sk_buff *skb; |
146 | |
147 | for (;;) { |
148 | skb = skb_peek(list_: &queue->rx_queue); |
149 | if (!skb) |
150 | break; |
151 | if (time_before(jiffies, XENVIF_RX_CB(skb)->expires)) |
152 | break; |
153 | xenvif_rx_dequeue(queue); |
154 | kfree_skb(skb); |
155 | queue->vif->dev->stats.rx_dropped++; |
156 | } |
157 | } |
158 | |
159 | static void xenvif_rx_copy_flush(struct xenvif_queue *queue) |
160 | { |
161 | unsigned int i; |
162 | int notify; |
163 | |
164 | gnttab_batch_copy(batch: queue->rx_copy.op, count: queue->rx_copy.num); |
165 | |
166 | for (i = 0; i < queue->rx_copy.num; i++) { |
167 | struct gnttab_copy *op; |
168 | |
169 | op = &queue->rx_copy.op[i]; |
170 | |
171 | /* If the copy failed, overwrite the status field in |
172 | * the corresponding response. |
173 | */ |
174 | if (unlikely(op->status != GNTST_okay)) { |
175 | struct xen_netif_rx_response *rsp; |
176 | |
177 | rsp = RING_GET_RESPONSE(&queue->rx, |
178 | queue->rx_copy.idx[i]); |
179 | rsp->status = op->status; |
180 | } |
181 | } |
182 | |
183 | queue->rx_copy.num = 0; |
184 | |
185 | /* Push responses for all completed packets. */ |
186 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify); |
187 | if (notify) |
188 | notify_remote_via_irq(irq: queue->rx_irq); |
189 | |
190 | __skb_queue_purge(list: queue->rx_copy.completed); |
191 | } |
192 | |
193 | static void xenvif_rx_copy_add(struct xenvif_queue *queue, |
194 | struct xen_netif_rx_request *req, |
195 | unsigned int offset, void *data, size_t len) |
196 | { |
197 | struct gnttab_copy *op; |
198 | struct page *page; |
199 | struct xen_page_foreign *foreign; |
200 | |
201 | if (queue->rx_copy.num == COPY_BATCH_SIZE) |
202 | xenvif_rx_copy_flush(queue); |
203 | |
204 | op = &queue->rx_copy.op[queue->rx_copy.num]; |
205 | |
206 | page = virt_to_page(data); |
207 | |
208 | op->flags = GNTCOPY_dest_gref; |
209 | |
210 | foreign = xen_page_foreign(page); |
211 | if (foreign) { |
212 | op->source.domid = foreign->domid; |
213 | op->source.u.ref = foreign->gref; |
214 | op->flags |= GNTCOPY_source_gref; |
215 | } else { |
216 | op->source.u.gmfn = virt_to_gfn(data); |
217 | op->source.domid = DOMID_SELF; |
218 | } |
219 | |
220 | op->source.offset = xen_offset_in_page(data); |
221 | op->dest.u.ref = req->gref; |
222 | op->dest.domid = queue->vif->domid; |
223 | op->dest.offset = offset; |
224 | op->len = len; |
225 | |
226 | queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons; |
227 | queue->rx_copy.num++; |
228 | } |
229 | |
230 | static unsigned int xenvif_gso_type(struct sk_buff *skb) |
231 | { |
232 | if (skb_is_gso(skb)) { |
233 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
234 | return XEN_NETIF_GSO_TYPE_TCPV4; |
235 | else |
236 | return XEN_NETIF_GSO_TYPE_TCPV6; |
237 | } |
238 | return XEN_NETIF_GSO_TYPE_NONE; |
239 | } |
240 | |
241 | struct xenvif_pkt_state { |
242 | struct sk_buff *skb; |
243 | size_t remaining_len; |
244 | struct sk_buff *frag_iter; |
245 | int frag; /* frag == -1 => frag_iter->head */ |
246 | unsigned int frag_offset; |
247 | struct xen_netif_extra_info [XEN_NETIF_EXTRA_TYPE_MAX - 1]; |
248 | unsigned int ; |
249 | unsigned int slot; |
250 | }; |
251 | |
252 | static void xenvif_rx_next_skb(struct xenvif_queue *queue, |
253 | struct xenvif_pkt_state *pkt) |
254 | { |
255 | struct sk_buff *skb; |
256 | unsigned int gso_type; |
257 | |
258 | skb = xenvif_rx_dequeue(queue); |
259 | |
260 | queue->stats.tx_bytes += skb->len; |
261 | queue->stats.tx_packets++; |
262 | |
263 | /* Reset packet state. */ |
264 | memset(pkt, 0, sizeof(struct xenvif_pkt_state)); |
265 | |
266 | pkt->skb = skb; |
267 | pkt->frag_iter = skb; |
268 | pkt->remaining_len = skb->len; |
269 | pkt->frag = -1; |
270 | |
271 | gso_type = xenvif_gso_type(skb); |
272 | if ((1 << gso_type) & queue->vif->gso_mask) { |
273 | struct xen_netif_extra_info *; |
274 | |
275 | extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; |
276 | |
277 | extra->u.gso.type = gso_type; |
278 | extra->u.gso.size = skb_shinfo(skb)->gso_size; |
279 | extra->u.gso.pad = 0; |
280 | extra->u.gso.features = 0; |
281 | extra->type = XEN_NETIF_EXTRA_TYPE_GSO; |
282 | extra->flags = 0; |
283 | |
284 | pkt->extra_count++; |
285 | } |
286 | |
287 | if (queue->vif->xdp_headroom) { |
288 | struct xen_netif_extra_info *; |
289 | |
290 | extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1]; |
291 | |
292 | memset(extra, 0, sizeof(struct xen_netif_extra_info)); |
293 | extra->u.xdp.headroom = queue->vif->xdp_headroom; |
294 | extra->type = XEN_NETIF_EXTRA_TYPE_XDP; |
295 | extra->flags = 0; |
296 | |
297 | pkt->extra_count++; |
298 | } |
299 | |
300 | if (skb->sw_hash) { |
301 | struct xen_netif_extra_info *; |
302 | |
303 | extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1]; |
304 | |
305 | extra->u.hash.algorithm = |
306 | XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ; |
307 | |
308 | if (skb->l4_hash) |
309 | extra->u.hash.type = |
310 | skb->protocol == htons(ETH_P_IP) ? |
311 | _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP : |
312 | _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP; |
313 | else |
314 | extra->u.hash.type = |
315 | skb->protocol == htons(ETH_P_IP) ? |
316 | _XEN_NETIF_CTRL_HASH_TYPE_IPV4 : |
317 | _XEN_NETIF_CTRL_HASH_TYPE_IPV6; |
318 | |
319 | *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb); |
320 | |
321 | extra->type = XEN_NETIF_EXTRA_TYPE_HASH; |
322 | extra->flags = 0; |
323 | |
324 | pkt->extra_count++; |
325 | } |
326 | } |
327 | |
328 | static void xenvif_rx_complete(struct xenvif_queue *queue, |
329 | struct xenvif_pkt_state *pkt) |
330 | { |
331 | /* All responses are ready to be pushed. */ |
332 | queue->rx.rsp_prod_pvt = queue->rx.req_cons; |
333 | |
334 | __skb_queue_tail(list: queue->rx_copy.completed, newsk: pkt->skb); |
335 | } |
336 | |
337 | static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt) |
338 | { |
339 | struct sk_buff *frag_iter = pkt->frag_iter; |
340 | unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags; |
341 | |
342 | pkt->frag++; |
343 | pkt->frag_offset = 0; |
344 | |
345 | if (pkt->frag >= nr_frags) { |
346 | if (frag_iter == pkt->skb) |
347 | pkt->frag_iter = skb_shinfo(frag_iter)->frag_list; |
348 | else |
349 | pkt->frag_iter = frag_iter->next; |
350 | |
351 | pkt->frag = -1; |
352 | } |
353 | } |
354 | |
355 | static void xenvif_rx_next_chunk(struct xenvif_queue *queue, |
356 | struct xenvif_pkt_state *pkt, |
357 | unsigned int offset, void **data, |
358 | size_t *len) |
359 | { |
360 | struct sk_buff *frag_iter = pkt->frag_iter; |
361 | void *frag_data; |
362 | size_t frag_len, chunk_len; |
363 | |
364 | BUG_ON(!frag_iter); |
365 | |
366 | if (pkt->frag == -1) { |
367 | frag_data = frag_iter->data; |
368 | frag_len = skb_headlen(skb: frag_iter); |
369 | } else { |
370 | skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag]; |
371 | |
372 | frag_data = skb_frag_address(frag); |
373 | frag_len = skb_frag_size(frag); |
374 | } |
375 | |
376 | frag_data += pkt->frag_offset; |
377 | frag_len -= pkt->frag_offset; |
378 | |
379 | chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset); |
380 | chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE - |
381 | xen_offset_in_page(frag_data)); |
382 | |
383 | pkt->frag_offset += chunk_len; |
384 | |
385 | /* Advance to next frag? */ |
386 | if (frag_len == chunk_len) |
387 | xenvif_rx_next_frag(pkt); |
388 | |
389 | *data = frag_data; |
390 | *len = chunk_len; |
391 | } |
392 | |
393 | static void xenvif_rx_data_slot(struct xenvif_queue *queue, |
394 | struct xenvif_pkt_state *pkt, |
395 | struct xen_netif_rx_request *req, |
396 | struct xen_netif_rx_response *rsp) |
397 | { |
398 | unsigned int offset = queue->vif->xdp_headroom; |
399 | unsigned int flags; |
400 | |
401 | do { |
402 | size_t len; |
403 | void *data; |
404 | |
405 | xenvif_rx_next_chunk(queue, pkt, offset, data: &data, len: &len); |
406 | xenvif_rx_copy_add(queue, req, offset, data, len); |
407 | |
408 | offset += len; |
409 | pkt->remaining_len -= len; |
410 | |
411 | } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0); |
412 | |
413 | if (pkt->remaining_len > 0) |
414 | flags = XEN_NETRXF_more_data; |
415 | else |
416 | flags = 0; |
417 | |
418 | if (pkt->slot == 0) { |
419 | struct sk_buff *skb = pkt->skb; |
420 | |
421 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
422 | flags |= XEN_NETRXF_csum_blank | |
423 | XEN_NETRXF_data_validated; |
424 | else if (skb->ip_summed == CHECKSUM_UNNECESSARY) |
425 | flags |= XEN_NETRXF_data_validated; |
426 | |
427 | if (pkt->extra_count != 0) |
428 | flags |= XEN_NETRXF_extra_info; |
429 | } |
430 | |
431 | rsp->offset = 0; |
432 | rsp->flags = flags; |
433 | rsp->id = req->id; |
434 | rsp->status = (s16)offset; |
435 | } |
436 | |
437 | static void (struct xenvif_queue *queue, |
438 | struct xenvif_pkt_state *pkt, |
439 | struct xen_netif_rx_request *req, |
440 | struct xen_netif_rx_response *rsp) |
441 | { |
442 | struct xen_netif_extra_info * = (void *)rsp; |
443 | unsigned int i; |
444 | |
445 | pkt->extra_count--; |
446 | |
447 | for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) { |
448 | if (pkt->extras[i].type) { |
449 | *extra = pkt->extras[i]; |
450 | |
451 | if (pkt->extra_count != 0) |
452 | extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; |
453 | |
454 | pkt->extras[i].type = 0; |
455 | return; |
456 | } |
457 | } |
458 | BUG(); |
459 | } |
460 | |
461 | static void xenvif_rx_skb(struct xenvif_queue *queue) |
462 | { |
463 | struct xenvif_pkt_state pkt; |
464 | |
465 | xenvif_rx_next_skb(queue, pkt: &pkt); |
466 | |
467 | queue->last_rx_time = jiffies; |
468 | |
469 | do { |
470 | struct xen_netif_rx_request *req; |
471 | struct xen_netif_rx_response *rsp; |
472 | |
473 | req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons); |
474 | rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons); |
475 | |
476 | /* Extras must go after the first data slot */ |
477 | if (pkt.slot != 0 && pkt.extra_count != 0) |
478 | xenvif_rx_extra_slot(queue, pkt: &pkt, req, rsp); |
479 | else |
480 | xenvif_rx_data_slot(queue, pkt: &pkt, req, rsp); |
481 | |
482 | queue->rx.req_cons++; |
483 | pkt.slot++; |
484 | } while (pkt.remaining_len > 0 || pkt.extra_count != 0); |
485 | |
486 | xenvif_rx_complete(queue, pkt: &pkt); |
487 | } |
488 | |
489 | #define RX_BATCH_SIZE 64 |
490 | |
491 | static void xenvif_rx_action(struct xenvif_queue *queue) |
492 | { |
493 | struct sk_buff_head completed_skbs; |
494 | unsigned int work_done = 0; |
495 | |
496 | __skb_queue_head_init(list: &completed_skbs); |
497 | queue->rx_copy.completed = &completed_skbs; |
498 | |
499 | while (xenvif_rx_ring_slots_available(queue) && |
500 | !skb_queue_empty(list: &queue->rx_queue) && |
501 | work_done < RX_BATCH_SIZE) { |
502 | xenvif_rx_skb(queue); |
503 | work_done++; |
504 | } |
505 | |
506 | /* Flush any pending copies and complete all skbs. */ |
507 | xenvif_rx_copy_flush(queue); |
508 | } |
509 | |
510 | static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue) |
511 | { |
512 | RING_IDX prod, cons; |
513 | |
514 | prod = queue->rx.sring->req_prod; |
515 | cons = queue->rx.req_cons; |
516 | |
517 | return prod - cons; |
518 | } |
519 | |
520 | static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue) |
521 | { |
522 | unsigned int needed = READ_ONCE(queue->rx_slots_needed); |
523 | |
524 | return !queue->stalled && |
525 | xenvif_rx_queue_slots(queue) < needed && |
526 | time_after(jiffies, |
527 | queue->last_rx_time + queue->vif->stall_timeout); |
528 | } |
529 | |
530 | static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) |
531 | { |
532 | unsigned int needed = READ_ONCE(queue->rx_slots_needed); |
533 | |
534 | return queue->stalled && xenvif_rx_queue_slots(queue) >= needed; |
535 | } |
536 | |
537 | bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread) |
538 | { |
539 | return xenvif_rx_ring_slots_available(queue) || |
540 | (queue->vif->stall_timeout && |
541 | (xenvif_rx_queue_stalled(queue) || |
542 | xenvif_rx_queue_ready(queue))) || |
543 | (test_kthread && kthread_should_stop()) || |
544 | queue->vif->disabled; |
545 | } |
546 | |
547 | static long xenvif_rx_queue_timeout(struct xenvif_queue *queue) |
548 | { |
549 | struct sk_buff *skb; |
550 | long timeout; |
551 | |
552 | skb = skb_peek(list_: &queue->rx_queue); |
553 | if (!skb) |
554 | return MAX_SCHEDULE_TIMEOUT; |
555 | |
556 | timeout = XENVIF_RX_CB(skb)->expires - jiffies; |
557 | return timeout < 0 ? 0 : timeout; |
558 | } |
559 | |
560 | /* Wait until the guest Rx thread has work. |
561 | * |
562 | * The timeout needs to be adjusted based on the current head of the |
563 | * queue (and not just the head at the beginning). In particular, if |
564 | * the queue is initially empty an infinite timeout is used and this |
565 | * needs to be reduced when a skb is queued. |
566 | * |
567 | * This cannot be done with wait_event_timeout() because it only |
568 | * calculates the timeout once. |
569 | */ |
570 | static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) |
571 | { |
572 | DEFINE_WAIT(wait); |
573 | |
574 | if (xenvif_have_rx_work(queue, test_kthread: true)) |
575 | return; |
576 | |
577 | for (;;) { |
578 | long ret; |
579 | |
580 | prepare_to_wait(wq_head: &queue->wq, wq_entry: &wait, TASK_INTERRUPTIBLE); |
581 | if (xenvif_have_rx_work(queue, test_kthread: true)) |
582 | break; |
583 | if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI, |
584 | v: &queue->eoi_pending) & |
585 | (NETBK_RX_EOI | NETBK_COMMON_EOI)) |
586 | xen_irq_lateeoi(irq: queue->rx_irq, eoi_flags: 0); |
587 | |
588 | ret = schedule_timeout(timeout: xenvif_rx_queue_timeout(queue)); |
589 | if (!ret) |
590 | break; |
591 | } |
592 | finish_wait(wq_head: &queue->wq, wq_entry: &wait); |
593 | } |
594 | |
595 | static void xenvif_queue_carrier_off(struct xenvif_queue *queue) |
596 | { |
597 | struct xenvif *vif = queue->vif; |
598 | |
599 | queue->stalled = true; |
600 | |
601 | /* At least one queue has stalled? Disable the carrier. */ |
602 | spin_lock(lock: &vif->lock); |
603 | if (vif->stalled_queues++ == 0) { |
604 | netdev_info(dev: vif->dev, format: "Guest Rx stalled" ); |
605 | netif_carrier_off(dev: vif->dev); |
606 | } |
607 | spin_unlock(lock: &vif->lock); |
608 | } |
609 | |
610 | static void xenvif_queue_carrier_on(struct xenvif_queue *queue) |
611 | { |
612 | struct xenvif *vif = queue->vif; |
613 | |
614 | queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ |
615 | queue->stalled = false; |
616 | |
617 | /* All queues are ready? Enable the carrier. */ |
618 | spin_lock(lock: &vif->lock); |
619 | if (--vif->stalled_queues == 0) { |
620 | netdev_info(dev: vif->dev, format: "Guest Rx ready" ); |
621 | netif_carrier_on(dev: vif->dev); |
622 | } |
623 | spin_unlock(lock: &vif->lock); |
624 | } |
625 | |
626 | int xenvif_kthread_guest_rx(void *data) |
627 | { |
628 | struct xenvif_queue *queue = data; |
629 | struct xenvif *vif = queue->vif; |
630 | |
631 | if (!vif->stall_timeout) |
632 | xenvif_queue_carrier_on(queue); |
633 | |
634 | for (;;) { |
635 | xenvif_wait_for_rx_work(queue); |
636 | |
637 | if (kthread_should_stop()) |
638 | break; |
639 | |
640 | /* This frontend is found to be rogue, disable it in |
641 | * kthread context. Currently this is only set when |
642 | * netback finds out frontend sends malformed packet, |
643 | * but we cannot disable the interface in softirq |
644 | * context so we defer it here, if this thread is |
645 | * associated with queue 0. |
646 | */ |
647 | if (unlikely(vif->disabled && queue->id == 0)) { |
648 | xenvif_carrier_off(vif); |
649 | break; |
650 | } |
651 | |
652 | if (!skb_queue_empty(list: &queue->rx_queue)) |
653 | xenvif_rx_action(queue); |
654 | |
655 | /* If the guest hasn't provided any Rx slots for a |
656 | * while it's probably not responsive, drop the |
657 | * carrier so packets are dropped earlier. |
658 | */ |
659 | if (vif->stall_timeout) { |
660 | if (xenvif_rx_queue_stalled(queue)) |
661 | xenvif_queue_carrier_off(queue); |
662 | else if (xenvif_rx_queue_ready(queue)) |
663 | xenvif_queue_carrier_on(queue); |
664 | } |
665 | |
666 | /* Queued packets may have foreign pages from other |
667 | * domains. These cannot be queued indefinitely as |
668 | * this would starve guests of grant refs and transmit |
669 | * slots. |
670 | */ |
671 | xenvif_rx_queue_drop_expired(queue); |
672 | |
673 | cond_resched(); |
674 | } |
675 | |
676 | /* Bin any remaining skbs */ |
677 | xenvif_rx_queue_purge(queue); |
678 | |
679 | return 0; |
680 | } |
681 | |