1 | /* |
2 | * Network-device interface management. |
3 | * |
4 | * Copyright (c) 2004-2005, Keir Fraser |
5 | * |
6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License version 2 |
8 | * as published by the Free Software Foundation; or, when distributed |
9 | * separately from the Linux kernel or incorporated into other |
10 | * software packages, subject to the following license: |
11 | * |
12 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
13 | * of this source file (the "Software"), to deal in the Software without |
14 | * restriction, including without limitation the rights to use, copy, modify, |
15 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
16 | * and to permit persons to whom the Software is furnished to do so, subject to |
17 | * the following conditions: |
18 | * |
19 | * The above copyright notice and this permission notice shall be included in |
20 | * all copies or substantial portions of the Software. |
21 | * |
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
24 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
25 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
26 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
27 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
28 | * IN THE SOFTWARE. |
29 | */ |
30 | |
31 | #include "common.h" |
32 | |
33 | #include <linux/kthread.h> |
34 | #include <linux/sched/task.h> |
35 | #include <linux/ethtool.h> |
36 | #include <linux/rtnetlink.h> |
37 | #include <linux/if_vlan.h> |
38 | #include <linux/vmalloc.h> |
39 | |
40 | #include <xen/events.h> |
41 | #include <asm/xen/hypercall.h> |
42 | #include <xen/balloon.h> |
43 | |
44 | /* Number of bytes allowed on the internal guest Rx queue. */ |
45 | #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE) |
46 | |
47 | /* This function is used to set SKBFL_ZEROCOPY_ENABLE as well as |
48 | * increasing the inflight counter. We need to increase the inflight |
49 | * counter because core driver calls into xenvif_zerocopy_callback |
50 | * which calls xenvif_skb_zerocopy_complete. |
51 | */ |
52 | void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, |
53 | struct sk_buff *skb) |
54 | { |
55 | skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_ENABLE; |
56 | atomic_inc(v: &queue->inflight_packets); |
57 | } |
58 | |
59 | void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) |
60 | { |
61 | atomic_dec(v: &queue->inflight_packets); |
62 | |
63 | /* Wake the dealloc thread _after_ decrementing inflight_packets so |
64 | * that if kthread_stop() has already been called, the dealloc thread |
65 | * does not wait forever with nothing to wake it. |
66 | */ |
67 | wake_up(&queue->dealloc_wq); |
68 | } |
69 | |
70 | static int xenvif_schedulable(struct xenvif *vif) |
71 | { |
72 | return netif_running(dev: vif->dev) && |
73 | test_bit(VIF_STATUS_CONNECTED, &vif->status) && |
74 | !vif->disabled; |
75 | } |
76 | |
77 | static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) |
78 | { |
79 | bool rc; |
80 | |
81 | rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); |
82 | if (rc) |
83 | napi_schedule(n: &queue->napi); |
84 | return rc; |
85 | } |
86 | |
87 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
88 | { |
89 | struct xenvif_queue *queue = dev_id; |
90 | int old; |
91 | |
92 | old = atomic_fetch_or(NETBK_TX_EOI, v: &queue->eoi_pending); |
93 | WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n" ); |
94 | |
95 | if (!xenvif_handle_tx_interrupt(queue)) { |
96 | atomic_andnot(NETBK_TX_EOI, v: &queue->eoi_pending); |
97 | xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); |
98 | } |
99 | |
100 | return IRQ_HANDLED; |
101 | } |
102 | |
103 | static int xenvif_poll(struct napi_struct *napi, int budget) |
104 | { |
105 | struct xenvif_queue *queue = |
106 | container_of(napi, struct xenvif_queue, napi); |
107 | int work_done; |
108 | |
109 | /* This vif is rogue, we pretend we've there is nothing to do |
110 | * for this vif to deschedule it from NAPI. But this interface |
111 | * will be turned off in thread context later. |
112 | */ |
113 | if (unlikely(queue->vif->disabled)) { |
114 | napi_complete(n: napi); |
115 | return 0; |
116 | } |
117 | |
118 | work_done = xenvif_tx_action(queue, budget); |
119 | |
120 | if (work_done < budget) { |
121 | napi_complete_done(n: napi, work_done); |
122 | /* If the queue is rate-limited, it shall be |
123 | * rescheduled in the timer callback. |
124 | */ |
125 | if (likely(!queue->rate_limited)) |
126 | xenvif_napi_schedule_or_enable_events(queue); |
127 | } |
128 | |
129 | return work_done; |
130 | } |
131 | |
132 | static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue) |
133 | { |
134 | bool rc; |
135 | |
136 | rc = xenvif_have_rx_work(queue, test_kthread: false); |
137 | if (rc) |
138 | xenvif_kick_thread(queue); |
139 | return rc; |
140 | } |
141 | |
142 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) |
143 | { |
144 | struct xenvif_queue *queue = dev_id; |
145 | int old; |
146 | |
147 | old = atomic_fetch_or(NETBK_RX_EOI, v: &queue->eoi_pending); |
148 | WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n" ); |
149 | |
150 | if (!xenvif_handle_rx_interrupt(queue)) { |
151 | atomic_andnot(NETBK_RX_EOI, v: &queue->eoi_pending); |
152 | xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); |
153 | } |
154 | |
155 | return IRQ_HANDLED; |
156 | } |
157 | |
158 | irqreturn_t xenvif_interrupt(int irq, void *dev_id) |
159 | { |
160 | struct xenvif_queue *queue = dev_id; |
161 | int old; |
162 | bool has_rx, has_tx; |
163 | |
164 | old = atomic_fetch_or(NETBK_COMMON_EOI, v: &queue->eoi_pending); |
165 | WARN(old, "Interrupt while EOI pending\n" ); |
166 | |
167 | has_tx = xenvif_handle_tx_interrupt(queue); |
168 | has_rx = xenvif_handle_rx_interrupt(queue); |
169 | |
170 | if (!has_rx && !has_tx) { |
171 | atomic_andnot(NETBK_COMMON_EOI, v: &queue->eoi_pending); |
172 | xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); |
173 | } |
174 | |
175 | return IRQ_HANDLED; |
176 | } |
177 | |
178 | static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, |
179 | struct net_device *sb_dev) |
180 | { |
181 | struct xenvif *vif = netdev_priv(dev); |
182 | unsigned int size = vif->hash.size; |
183 | unsigned int num_queues; |
184 | |
185 | /* If queues are not set up internally - always return 0 |
186 | * as the packet going to be dropped anyway */ |
187 | num_queues = READ_ONCE(vif->num_queues); |
188 | if (num_queues < 1) |
189 | return 0; |
190 | |
191 | if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) |
192 | return netdev_pick_tx(dev, skb, NULL) % |
193 | dev->real_num_tx_queues; |
194 | |
195 | xenvif_set_skb_hash(vif, skb); |
196 | |
197 | if (size == 0) |
198 | return skb_get_hash_raw(skb) % dev->real_num_tx_queues; |
199 | |
200 | return vif->hash.mapping[vif->hash.mapping_sel] |
201 | [skb_get_hash_raw(skb) % size]; |
202 | } |
203 | |
204 | static netdev_tx_t |
205 | xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
206 | { |
207 | struct xenvif *vif = netdev_priv(dev); |
208 | struct xenvif_queue *queue = NULL; |
209 | unsigned int num_queues; |
210 | u16 index; |
211 | struct xenvif_rx_cb *cb; |
212 | |
213 | BUG_ON(skb->dev != dev); |
214 | |
215 | /* Drop the packet if queues are not set up. |
216 | * This handler should be called inside an RCU read section |
217 | * so we don't need to enter it here explicitly. |
218 | */ |
219 | num_queues = READ_ONCE(vif->num_queues); |
220 | if (num_queues < 1) |
221 | goto drop; |
222 | |
223 | /* Obtain the queue to be used to transmit this packet */ |
224 | index = skb_get_queue_mapping(skb); |
225 | if (index >= num_queues) { |
226 | pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n" , |
227 | index, vif->dev->name); |
228 | index %= num_queues; |
229 | } |
230 | queue = &vif->queues[index]; |
231 | |
232 | /* Drop the packet if queue is not ready */ |
233 | if (queue->task == NULL || |
234 | queue->dealloc_task == NULL || |
235 | !xenvif_schedulable(vif)) |
236 | goto drop; |
237 | |
238 | if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) { |
239 | struct ethhdr *eth = (struct ethhdr *)skb->data; |
240 | |
241 | if (!xenvif_mcast_match(vif, addr: eth->h_dest)) |
242 | goto drop; |
243 | } |
244 | |
245 | cb = XENVIF_RX_CB(skb); |
246 | cb->expires = jiffies + vif->drain_timeout; |
247 | |
248 | /* If there is no hash algorithm configured then make sure there |
249 | * is no hash information in the socket buffer otherwise it |
250 | * would be incorrectly forwarded to the frontend. |
251 | */ |
252 | if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) |
253 | skb_clear_hash(skb); |
254 | |
255 | /* timestamp packet in software */ |
256 | skb_tx_timestamp(skb); |
257 | |
258 | if (!xenvif_rx_queue_tail(queue, skb)) |
259 | goto drop; |
260 | |
261 | xenvif_kick_thread(queue); |
262 | |
263 | return NETDEV_TX_OK; |
264 | |
265 | drop: |
266 | vif->dev->stats.tx_dropped++; |
267 | dev_kfree_skb_any(skb); |
268 | return NETDEV_TX_OK; |
269 | } |
270 | |
271 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) |
272 | { |
273 | struct xenvif *vif = netdev_priv(dev); |
274 | struct xenvif_queue *queue = NULL; |
275 | unsigned int num_queues; |
276 | u64 rx_bytes = 0; |
277 | u64 rx_packets = 0; |
278 | u64 tx_bytes = 0; |
279 | u64 tx_packets = 0; |
280 | unsigned int index; |
281 | |
282 | rcu_read_lock(); |
283 | num_queues = READ_ONCE(vif->num_queues); |
284 | |
285 | /* Aggregate tx and rx stats from each queue */ |
286 | for (index = 0; index < num_queues; ++index) { |
287 | queue = &vif->queues[index]; |
288 | rx_bytes += queue->stats.rx_bytes; |
289 | rx_packets += queue->stats.rx_packets; |
290 | tx_bytes += queue->stats.tx_bytes; |
291 | tx_packets += queue->stats.tx_packets; |
292 | } |
293 | |
294 | rcu_read_unlock(); |
295 | |
296 | vif->dev->stats.rx_bytes = rx_bytes; |
297 | vif->dev->stats.rx_packets = rx_packets; |
298 | vif->dev->stats.tx_bytes = tx_bytes; |
299 | vif->dev->stats.tx_packets = tx_packets; |
300 | |
301 | return &vif->dev->stats; |
302 | } |
303 | |
304 | static void xenvif_up(struct xenvif *vif) |
305 | { |
306 | struct xenvif_queue *queue = NULL; |
307 | unsigned int num_queues = vif->num_queues; |
308 | unsigned int queue_index; |
309 | |
310 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
311 | queue = &vif->queues[queue_index]; |
312 | napi_enable(n: &queue->napi); |
313 | enable_irq(irq: queue->tx_irq); |
314 | if (queue->tx_irq != queue->rx_irq) |
315 | enable_irq(irq: queue->rx_irq); |
316 | xenvif_napi_schedule_or_enable_events(queue); |
317 | } |
318 | } |
319 | |
320 | static void xenvif_down(struct xenvif *vif) |
321 | { |
322 | struct xenvif_queue *queue = NULL; |
323 | unsigned int num_queues = vif->num_queues; |
324 | unsigned int queue_index; |
325 | |
326 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
327 | queue = &vif->queues[queue_index]; |
328 | disable_irq(irq: queue->tx_irq); |
329 | if (queue->tx_irq != queue->rx_irq) |
330 | disable_irq(irq: queue->rx_irq); |
331 | napi_disable(n: &queue->napi); |
332 | del_timer_sync(timer: &queue->credit_timeout); |
333 | } |
334 | } |
335 | |
336 | static int xenvif_open(struct net_device *dev) |
337 | { |
338 | struct xenvif *vif = netdev_priv(dev); |
339 | if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) |
340 | xenvif_up(vif); |
341 | netif_tx_start_all_queues(dev); |
342 | return 0; |
343 | } |
344 | |
345 | static int xenvif_close(struct net_device *dev) |
346 | { |
347 | struct xenvif *vif = netdev_priv(dev); |
348 | if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) |
349 | xenvif_down(vif); |
350 | netif_tx_stop_all_queues(dev); |
351 | return 0; |
352 | } |
353 | |
354 | static int xenvif_change_mtu(struct net_device *dev, int mtu) |
355 | { |
356 | struct xenvif *vif = netdev_priv(dev); |
357 | int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN; |
358 | |
359 | if (mtu > max) |
360 | return -EINVAL; |
361 | dev->mtu = mtu; |
362 | return 0; |
363 | } |
364 | |
365 | static netdev_features_t xenvif_fix_features(struct net_device *dev, |
366 | netdev_features_t features) |
367 | { |
368 | struct xenvif *vif = netdev_priv(dev); |
369 | |
370 | if (!vif->can_sg) |
371 | features &= ~NETIF_F_SG; |
372 | if (~(vif->gso_mask) & GSO_BIT(TCPV4)) |
373 | features &= ~NETIF_F_TSO; |
374 | if (~(vif->gso_mask) & GSO_BIT(TCPV6)) |
375 | features &= ~NETIF_F_TSO6; |
376 | if (!vif->ip_csum) |
377 | features &= ~NETIF_F_IP_CSUM; |
378 | if (!vif->ipv6_csum) |
379 | features &= ~NETIF_F_IPV6_CSUM; |
380 | |
381 | return features; |
382 | } |
383 | |
384 | static const struct xenvif_stat { |
385 | char name[ETH_GSTRING_LEN]; |
386 | u16 offset; |
387 | } xenvif_stats[] = { |
388 | { |
389 | "rx_gso_checksum_fixup" , |
390 | offsetof(struct xenvif_stats, rx_gso_checksum_fixup) |
391 | }, |
392 | /* If (sent != success + fail), there are probably packets never |
393 | * freed up properly! |
394 | */ |
395 | { |
396 | "tx_zerocopy_sent" , |
397 | offsetof(struct xenvif_stats, tx_zerocopy_sent), |
398 | }, |
399 | { |
400 | "tx_zerocopy_success" , |
401 | offsetof(struct xenvif_stats, tx_zerocopy_success), |
402 | }, |
403 | { |
404 | "tx_zerocopy_fail" , |
405 | offsetof(struct xenvif_stats, tx_zerocopy_fail) |
406 | }, |
407 | /* Number of packets exceeding MAX_SKB_FRAG slots. You should use |
408 | * a guest with the same MAX_SKB_FRAG |
409 | */ |
410 | { |
411 | "tx_frag_overflow" , |
412 | offsetof(struct xenvif_stats, tx_frag_overflow) |
413 | }, |
414 | }; |
415 | |
416 | static int xenvif_get_sset_count(struct net_device *dev, int string_set) |
417 | { |
418 | switch (string_set) { |
419 | case ETH_SS_STATS: |
420 | return ARRAY_SIZE(xenvif_stats); |
421 | default: |
422 | return -EINVAL; |
423 | } |
424 | } |
425 | |
426 | static void xenvif_get_ethtool_stats(struct net_device *dev, |
427 | struct ethtool_stats *stats, u64 * data) |
428 | { |
429 | struct xenvif *vif = netdev_priv(dev); |
430 | unsigned int num_queues; |
431 | int i; |
432 | unsigned int queue_index; |
433 | |
434 | rcu_read_lock(); |
435 | num_queues = READ_ONCE(vif->num_queues); |
436 | |
437 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { |
438 | unsigned long accum = 0; |
439 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
440 | void *vif_stats = &vif->queues[queue_index].stats; |
441 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); |
442 | } |
443 | data[i] = accum; |
444 | } |
445 | |
446 | rcu_read_unlock(); |
447 | } |
448 | |
449 | static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) |
450 | { |
451 | int i; |
452 | |
453 | switch (stringset) { |
454 | case ETH_SS_STATS: |
455 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) |
456 | memcpy(data + i * ETH_GSTRING_LEN, |
457 | xenvif_stats[i].name, ETH_GSTRING_LEN); |
458 | break; |
459 | } |
460 | } |
461 | |
462 | static const struct ethtool_ops xenvif_ethtool_ops = { |
463 | .get_link = ethtool_op_get_link, |
464 | .get_ts_info = ethtool_op_get_ts_info, |
465 | .get_sset_count = xenvif_get_sset_count, |
466 | .get_ethtool_stats = xenvif_get_ethtool_stats, |
467 | .get_strings = xenvif_get_strings, |
468 | }; |
469 | |
470 | static const struct net_device_ops xenvif_netdev_ops = { |
471 | .ndo_select_queue = xenvif_select_queue, |
472 | .ndo_start_xmit = xenvif_start_xmit, |
473 | .ndo_get_stats = xenvif_get_stats, |
474 | .ndo_open = xenvif_open, |
475 | .ndo_stop = xenvif_close, |
476 | .ndo_change_mtu = xenvif_change_mtu, |
477 | .ndo_fix_features = xenvif_fix_features, |
478 | .ndo_set_mac_address = eth_mac_addr, |
479 | .ndo_validate_addr = eth_validate_addr, |
480 | }; |
481 | |
482 | struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, |
483 | unsigned int handle) |
484 | { |
485 | static const u8 dummy_addr[ETH_ALEN] = { |
486 | 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, |
487 | }; |
488 | int err; |
489 | struct net_device *dev; |
490 | struct xenvif *vif; |
491 | char name[IFNAMSIZ] = {}; |
492 | |
493 | snprintf(buf: name, IFNAMSIZ - 1, fmt: "vif%u.%u" , domid, handle); |
494 | /* Allocate a netdev with the max. supported number of queues. |
495 | * When the guest selects the desired number, it will be updated |
496 | * via netif_set_real_num_*_queues(). |
497 | */ |
498 | dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN, |
499 | ether_setup, xenvif_max_queues); |
500 | if (dev == NULL) { |
501 | pr_warn("Could not allocate netdev for %s\n" , name); |
502 | return ERR_PTR(error: -ENOMEM); |
503 | } |
504 | |
505 | SET_NETDEV_DEV(dev, parent); |
506 | |
507 | vif = netdev_priv(dev); |
508 | |
509 | vif->domid = domid; |
510 | vif->handle = handle; |
511 | vif->can_sg = 1; |
512 | vif->ip_csum = 1; |
513 | vif->dev = dev; |
514 | vif->disabled = false; |
515 | vif->drain_timeout = msecs_to_jiffies(m: rx_drain_timeout_msecs); |
516 | vif->stall_timeout = msecs_to_jiffies(m: rx_stall_timeout_msecs); |
517 | |
518 | /* Start out with no queues. */ |
519 | vif->queues = NULL; |
520 | vif->num_queues = 0; |
521 | |
522 | vif->xdp_headroom = 0; |
523 | |
524 | spin_lock_init(&vif->lock); |
525 | INIT_LIST_HEAD(list: &vif->fe_mcast_addr); |
526 | |
527 | dev->netdev_ops = &xenvif_netdev_ops; |
528 | dev->hw_features = NETIF_F_SG | |
529 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
530 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST; |
531 | dev->features = dev->hw_features | NETIF_F_RXCSUM; |
532 | dev->ethtool_ops = &xenvif_ethtool_ops; |
533 | |
534 | dev->min_mtu = ETH_MIN_MTU; |
535 | dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; |
536 | |
537 | /* |
538 | * Initialise a dummy MAC address. We choose the numerically |
539 | * largest non-broadcast address to prevent the address getting |
540 | * stolen by an Ethernet bridge for STP purposes. |
541 | * (FE:FF:FF:FF:FF:FF) |
542 | */ |
543 | eth_hw_addr_set(dev, addr: dummy_addr); |
544 | |
545 | netif_carrier_off(dev); |
546 | |
547 | err = register_netdev(dev); |
548 | if (err) { |
549 | netdev_warn(dev, format: "Could not register device: err=%d\n" , err); |
550 | free_netdev(dev); |
551 | return ERR_PTR(error: err); |
552 | } |
553 | |
554 | netdev_dbg(dev, "Successfully created xenvif\n" ); |
555 | |
556 | __module_get(THIS_MODULE); |
557 | |
558 | return vif; |
559 | } |
560 | |
561 | int xenvif_init_queue(struct xenvif_queue *queue) |
562 | { |
563 | int err, i; |
564 | |
565 | queue->credit_bytes = queue->remaining_credit = ~0UL; |
566 | queue->credit_usec = 0UL; |
567 | timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0); |
568 | queue->credit_window_start = get_jiffies_64(); |
569 | |
570 | queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES; |
571 | |
572 | skb_queue_head_init(list: &queue->rx_queue); |
573 | skb_queue_head_init(list: &queue->tx_queue); |
574 | |
575 | queue->pending_cons = 0; |
576 | queue->pending_prod = MAX_PENDING_REQS; |
577 | for (i = 0; i < MAX_PENDING_REQS; ++i) |
578 | queue->pending_ring[i] = i; |
579 | |
580 | spin_lock_init(&queue->callback_lock); |
581 | spin_lock_init(&queue->response_lock); |
582 | |
583 | /* If ballooning is disabled, this will consume real memory, so you |
584 | * better enable it. The long term solution would be to use just a |
585 | * bunch of valid page descriptors, without dependency on ballooning |
586 | */ |
587 | err = gnttab_alloc_pages(MAX_PENDING_REQS, |
588 | pages: queue->mmap_pages); |
589 | if (err) { |
590 | netdev_err(dev: queue->vif->dev, format: "Could not reserve mmap_pages\n" ); |
591 | return -ENOMEM; |
592 | } |
593 | |
594 | for (i = 0; i < MAX_PENDING_REQS; i++) { |
595 | queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc) |
596 | { { .callback = xenvif_zerocopy_callback }, |
597 | { { .ctx = NULL, |
598 | .desc = i } } }; |
599 | queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; |
600 | } |
601 | |
602 | return 0; |
603 | } |
604 | |
605 | void xenvif_carrier_on(struct xenvif *vif) |
606 | { |
607 | rtnl_lock(); |
608 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) |
609 | dev_set_mtu(vif->dev, ETH_DATA_LEN); |
610 | netdev_update_features(dev: vif->dev); |
611 | set_bit(nr: VIF_STATUS_CONNECTED, addr: &vif->status); |
612 | if (netif_running(dev: vif->dev)) |
613 | xenvif_up(vif); |
614 | rtnl_unlock(); |
615 | } |
616 | |
617 | int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, |
618 | unsigned int evtchn) |
619 | { |
620 | struct net_device *dev = vif->dev; |
621 | struct xenbus_device *xendev = xenvif_to_xenbus_device(vif); |
622 | void *addr; |
623 | struct xen_netif_ctrl_sring *shared; |
624 | RING_IDX rsp_prod, req_prod; |
625 | int err; |
626 | |
627 | err = xenbus_map_ring_valloc(dev: xendev, gnt_refs: &ring_ref, nr_grefs: 1, vaddr: &addr); |
628 | if (err) |
629 | goto err; |
630 | |
631 | shared = (struct xen_netif_ctrl_sring *)addr; |
632 | rsp_prod = READ_ONCE(shared->rsp_prod); |
633 | req_prod = READ_ONCE(shared->req_prod); |
634 | |
635 | BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE); |
636 | |
637 | err = -EIO; |
638 | if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl)) |
639 | goto err_unmap; |
640 | |
641 | err = bind_interdomain_evtchn_to_irq_lateeoi(dev: xendev, remote_port: evtchn); |
642 | if (err < 0) |
643 | goto err_unmap; |
644 | |
645 | vif->ctrl_irq = err; |
646 | |
647 | xenvif_init_hash(vif); |
648 | |
649 | err = request_threaded_irq(irq: vif->ctrl_irq, NULL, thread_fn: xenvif_ctrl_irq_fn, |
650 | IRQF_ONESHOT, name: "xen-netback-ctrl" , dev: vif); |
651 | if (err) { |
652 | pr_warn("Could not setup irq handler for %s\n" , dev->name); |
653 | goto err_deinit; |
654 | } |
655 | |
656 | return 0; |
657 | |
658 | err_deinit: |
659 | xenvif_deinit_hash(vif); |
660 | unbind_from_irqhandler(irq: vif->ctrl_irq, dev_id: vif); |
661 | vif->ctrl_irq = 0; |
662 | |
663 | err_unmap: |
664 | xenbus_unmap_ring_vfree(dev: xendev, vaddr: vif->ctrl.sring); |
665 | vif->ctrl.sring = NULL; |
666 | |
667 | err: |
668 | return err; |
669 | } |
670 | |
671 | static void xenvif_disconnect_queue(struct xenvif_queue *queue) |
672 | { |
673 | if (queue->task) { |
674 | kthread_stop_put(k: queue->task); |
675 | queue->task = NULL; |
676 | } |
677 | |
678 | if (queue->dealloc_task) { |
679 | kthread_stop(k: queue->dealloc_task); |
680 | queue->dealloc_task = NULL; |
681 | } |
682 | |
683 | if (queue->napi.poll) { |
684 | netif_napi_del(napi: &queue->napi); |
685 | queue->napi.poll = NULL; |
686 | } |
687 | |
688 | if (queue->tx_irq) { |
689 | unbind_from_irqhandler(irq: queue->tx_irq, dev_id: queue); |
690 | if (queue->tx_irq == queue->rx_irq) |
691 | queue->rx_irq = 0; |
692 | queue->tx_irq = 0; |
693 | } |
694 | |
695 | if (queue->rx_irq) { |
696 | unbind_from_irqhandler(irq: queue->rx_irq, dev_id: queue); |
697 | queue->rx_irq = 0; |
698 | } |
699 | |
700 | xenvif_unmap_frontend_data_rings(queue); |
701 | } |
702 | |
703 | int xenvif_connect_data(struct xenvif_queue *queue, |
704 | unsigned long tx_ring_ref, |
705 | unsigned long rx_ring_ref, |
706 | unsigned int tx_evtchn, |
707 | unsigned int rx_evtchn) |
708 | { |
709 | struct xenbus_device *dev = xenvif_to_xenbus_device(vif: queue->vif); |
710 | struct task_struct *task; |
711 | int err; |
712 | |
713 | BUG_ON(queue->tx_irq); |
714 | BUG_ON(queue->task); |
715 | BUG_ON(queue->dealloc_task); |
716 | |
717 | err = xenvif_map_frontend_data_rings(queue, tx_ring_ref, |
718 | rx_ring_ref); |
719 | if (err < 0) |
720 | goto err; |
721 | |
722 | init_waitqueue_head(&queue->wq); |
723 | init_waitqueue_head(&queue->dealloc_wq); |
724 | atomic_set(v: &queue->inflight_packets, i: 0); |
725 | |
726 | netif_napi_add(dev: queue->vif->dev, napi: &queue->napi, poll: xenvif_poll); |
727 | |
728 | queue->stalled = true; |
729 | |
730 | task = kthread_run(xenvif_kthread_guest_rx, queue, |
731 | "%s-guest-rx" , queue->name); |
732 | if (IS_ERR(ptr: task)) |
733 | goto kthread_err; |
734 | queue->task = task; |
735 | /* |
736 | * Take a reference to the task in order to prevent it from being freed |
737 | * if the thread function returns before kthread_stop is called. |
738 | */ |
739 | get_task_struct(t: task); |
740 | |
741 | task = kthread_run(xenvif_dealloc_kthread, queue, |
742 | "%s-dealloc" , queue->name); |
743 | if (IS_ERR(ptr: task)) |
744 | goto kthread_err; |
745 | queue->dealloc_task = task; |
746 | |
747 | if (tx_evtchn == rx_evtchn) { |
748 | /* feature-split-event-channels == 0 */ |
749 | err = bind_interdomain_evtchn_to_irqhandler_lateeoi( |
750 | dev, remote_port: tx_evtchn, handler: xenvif_interrupt, irqflags: 0, |
751 | devname: queue->name, dev_id: queue); |
752 | if (err < 0) |
753 | goto err; |
754 | queue->tx_irq = queue->rx_irq = err; |
755 | disable_irq(irq: queue->tx_irq); |
756 | } else { |
757 | /* feature-split-event-channels == 1 */ |
758 | snprintf(buf: queue->tx_irq_name, size: sizeof(queue->tx_irq_name), |
759 | fmt: "%s-tx" , queue->name); |
760 | err = bind_interdomain_evtchn_to_irqhandler_lateeoi( |
761 | dev, remote_port: tx_evtchn, handler: xenvif_tx_interrupt, irqflags: 0, |
762 | devname: queue->tx_irq_name, dev_id: queue); |
763 | if (err < 0) |
764 | goto err; |
765 | queue->tx_irq = err; |
766 | disable_irq(irq: queue->tx_irq); |
767 | |
768 | snprintf(buf: queue->rx_irq_name, size: sizeof(queue->rx_irq_name), |
769 | fmt: "%s-rx" , queue->name); |
770 | err = bind_interdomain_evtchn_to_irqhandler_lateeoi( |
771 | dev, remote_port: rx_evtchn, handler: xenvif_rx_interrupt, irqflags: 0, |
772 | devname: queue->rx_irq_name, dev_id: queue); |
773 | if (err < 0) |
774 | goto err; |
775 | queue->rx_irq = err; |
776 | disable_irq(irq: queue->rx_irq); |
777 | } |
778 | |
779 | return 0; |
780 | |
781 | kthread_err: |
782 | pr_warn("Could not allocate kthread for %s\n" , queue->name); |
783 | err = PTR_ERR(ptr: task); |
784 | err: |
785 | xenvif_disconnect_queue(queue); |
786 | return err; |
787 | } |
788 | |
789 | void xenvif_carrier_off(struct xenvif *vif) |
790 | { |
791 | struct net_device *dev = vif->dev; |
792 | |
793 | rtnl_lock(); |
794 | if (test_and_clear_bit(nr: VIF_STATUS_CONNECTED, addr: &vif->status)) { |
795 | netif_carrier_off(dev); /* discard queued packets */ |
796 | if (netif_running(dev)) |
797 | xenvif_down(vif); |
798 | } |
799 | rtnl_unlock(); |
800 | } |
801 | |
802 | void xenvif_disconnect_data(struct xenvif *vif) |
803 | { |
804 | struct xenvif_queue *queue = NULL; |
805 | unsigned int num_queues = vif->num_queues; |
806 | unsigned int queue_index; |
807 | |
808 | xenvif_carrier_off(vif); |
809 | |
810 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
811 | queue = &vif->queues[queue_index]; |
812 | |
813 | xenvif_disconnect_queue(queue); |
814 | } |
815 | |
816 | xenvif_mcast_addr_list_free(vif); |
817 | } |
818 | |
819 | void xenvif_disconnect_ctrl(struct xenvif *vif) |
820 | { |
821 | if (vif->ctrl_irq) { |
822 | xenvif_deinit_hash(vif); |
823 | unbind_from_irqhandler(irq: vif->ctrl_irq, dev_id: vif); |
824 | vif->ctrl_irq = 0; |
825 | } |
826 | |
827 | if (vif->ctrl.sring) { |
828 | xenbus_unmap_ring_vfree(dev: xenvif_to_xenbus_device(vif), |
829 | vaddr: vif->ctrl.sring); |
830 | vif->ctrl.sring = NULL; |
831 | } |
832 | } |
833 | |
834 | /* Reverse the relevant parts of xenvif_init_queue(). |
835 | * Used for queue teardown from xenvif_free(), and on the |
836 | * error handling paths in xenbus.c:connect(). |
837 | */ |
838 | void xenvif_deinit_queue(struct xenvif_queue *queue) |
839 | { |
840 | gnttab_free_pages(MAX_PENDING_REQS, pages: queue->mmap_pages); |
841 | } |
842 | |
843 | void xenvif_free(struct xenvif *vif) |
844 | { |
845 | struct xenvif_queue *queues = vif->queues; |
846 | unsigned int num_queues = vif->num_queues; |
847 | unsigned int queue_index; |
848 | |
849 | unregister_netdev(dev: vif->dev); |
850 | free_netdev(dev: vif->dev); |
851 | |
852 | for (queue_index = 0; queue_index < num_queues; ++queue_index) |
853 | xenvif_deinit_queue(queue: &queues[queue_index]); |
854 | vfree(addr: queues); |
855 | |
856 | module_put(THIS_MODULE); |
857 | } |
858 | |