1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2009, Microsoft Corporation. |
4 | * |
5 | * Authors: |
6 | * Haiyang Zhang <haiyangz@microsoft.com> |
7 | * Hank Janssen <hjanssen@microsoft.com> |
8 | */ |
9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
10 | |
11 | #include <linux/init.h> |
12 | #include <linux/atomic.h> |
13 | #include <linux/ethtool.h> |
14 | #include <linux/module.h> |
15 | #include <linux/highmem.h> |
16 | #include <linux/device.h> |
17 | #include <linux/io.h> |
18 | #include <linux/delay.h> |
19 | #include <linux/netdevice.h> |
20 | #include <linux/inetdevice.h> |
21 | #include <linux/etherdevice.h> |
22 | #include <linux/pci.h> |
23 | #include <linux/skbuff.h> |
24 | #include <linux/if_vlan.h> |
25 | #include <linux/in.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/rtnetlink.h> |
28 | #include <linux/netpoll.h> |
29 | #include <linux/bpf.h> |
30 | |
31 | #include <net/arp.h> |
32 | #include <net/route.h> |
33 | #include <net/sock.h> |
34 | #include <net/pkt_sched.h> |
35 | #include <net/checksum.h> |
36 | #include <net/ip6_checksum.h> |
37 | |
38 | #include "hyperv_net.h" |
39 | |
40 | #define RING_SIZE_MIN 64 |
41 | |
42 | #define LINKCHANGE_INT (2 * HZ) |
43 | #define VF_TAKEOVER_INT (HZ / 10) |
44 | |
45 | static unsigned int ring_size __ro_after_init = 128; |
46 | module_param(ring_size, uint, 0444); |
47 | MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)" ); |
48 | unsigned int netvsc_ring_bytes __ro_after_init; |
49 | |
50 | static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | |
51 | NETIF_MSG_LINK | NETIF_MSG_IFUP | |
52 | NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | |
53 | NETIF_MSG_TX_ERR; |
54 | |
55 | static int debug = -1; |
56 | module_param(debug, int, 0444); |
57 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)" ); |
58 | |
59 | static LIST_HEAD(netvsc_dev_list); |
60 | |
61 | static void netvsc_change_rx_flags(struct net_device *net, int change) |
62 | { |
63 | struct net_device_context *ndev_ctx = netdev_priv(dev: net); |
64 | struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); |
65 | int inc; |
66 | |
67 | if (!vf_netdev) |
68 | return; |
69 | |
70 | if (change & IFF_PROMISC) { |
71 | inc = (net->flags & IFF_PROMISC) ? 1 : -1; |
72 | dev_set_promiscuity(dev: vf_netdev, inc); |
73 | } |
74 | |
75 | if (change & IFF_ALLMULTI) { |
76 | inc = (net->flags & IFF_ALLMULTI) ? 1 : -1; |
77 | dev_set_allmulti(dev: vf_netdev, inc); |
78 | } |
79 | } |
80 | |
81 | static void netvsc_set_rx_mode(struct net_device *net) |
82 | { |
83 | struct net_device_context *ndev_ctx = netdev_priv(dev: net); |
84 | struct net_device *vf_netdev; |
85 | struct netvsc_device *nvdev; |
86 | |
87 | rcu_read_lock(); |
88 | vf_netdev = rcu_dereference(ndev_ctx->vf_netdev); |
89 | if (vf_netdev) { |
90 | dev_uc_sync(to: vf_netdev, from: net); |
91 | dev_mc_sync(to: vf_netdev, from: net); |
92 | } |
93 | |
94 | nvdev = rcu_dereference(ndev_ctx->nvdev); |
95 | if (nvdev) |
96 | rndis_filter_update(nvdev); |
97 | rcu_read_unlock(); |
98 | } |
99 | |
100 | static void netvsc_tx_enable(struct netvsc_device *nvscdev, |
101 | struct net_device *ndev) |
102 | { |
103 | nvscdev->tx_disable = false; |
104 | virt_wmb(); /* ensure queue wake up mechanism is on */ |
105 | |
106 | netif_tx_wake_all_queues(dev: ndev); |
107 | } |
108 | |
109 | static int netvsc_open(struct net_device *net) |
110 | { |
111 | struct net_device_context *ndev_ctx = netdev_priv(dev: net); |
112 | struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); |
113 | struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); |
114 | struct rndis_device *rdev; |
115 | int ret = 0; |
116 | |
117 | netif_carrier_off(dev: net); |
118 | |
119 | /* Open up the device */ |
120 | ret = rndis_filter_open(nvdev); |
121 | if (ret != 0) { |
122 | netdev_err(dev: net, format: "unable to open device (ret %d).\n" , ret); |
123 | return ret; |
124 | } |
125 | |
126 | rdev = nvdev->extension; |
127 | if (!rdev->link_state) { |
128 | netif_carrier_on(dev: net); |
129 | netvsc_tx_enable(nvscdev: nvdev, ndev: net); |
130 | } |
131 | |
132 | if (vf_netdev) { |
133 | /* Setting synthetic device up transparently sets |
134 | * slave as up. If open fails, then slave will be |
135 | * still be offline (and not used). |
136 | */ |
137 | ret = dev_open(dev: vf_netdev, NULL); |
138 | if (ret) |
139 | netdev_warn(dev: net, |
140 | format: "unable to open slave: %s: %d\n" , |
141 | vf_netdev->name, ret); |
142 | } |
143 | return 0; |
144 | } |
145 | |
146 | static int netvsc_wait_until_empty(struct netvsc_device *nvdev) |
147 | { |
148 | unsigned int retry = 0; |
149 | int i; |
150 | |
151 | /* Ensure pending bytes in ring are read */ |
152 | for (;;) { |
153 | u32 aread = 0; |
154 | |
155 | for (i = 0; i < nvdev->num_chn; i++) { |
156 | struct vmbus_channel *chn |
157 | = nvdev->chan_table[i].channel; |
158 | |
159 | if (!chn) |
160 | continue; |
161 | |
162 | /* make sure receive not running now */ |
163 | napi_synchronize(n: &nvdev->chan_table[i].napi); |
164 | |
165 | aread = hv_get_bytes_to_read(rbi: &chn->inbound); |
166 | if (aread) |
167 | break; |
168 | |
169 | aread = hv_get_bytes_to_read(rbi: &chn->outbound); |
170 | if (aread) |
171 | break; |
172 | } |
173 | |
174 | if (aread == 0) |
175 | return 0; |
176 | |
177 | if (++retry > RETRY_MAX) |
178 | return -ETIMEDOUT; |
179 | |
180 | usleep_range(RETRY_US_LO, RETRY_US_HI); |
181 | } |
182 | } |
183 | |
184 | static void netvsc_tx_disable(struct netvsc_device *nvscdev, |
185 | struct net_device *ndev) |
186 | { |
187 | if (nvscdev) { |
188 | nvscdev->tx_disable = true; |
189 | virt_wmb(); /* ensure txq will not wake up after stop */ |
190 | } |
191 | |
192 | netif_tx_disable(dev: ndev); |
193 | } |
194 | |
195 | static int netvsc_close(struct net_device *net) |
196 | { |
197 | struct net_device_context *net_device_ctx = netdev_priv(dev: net); |
198 | struct net_device *vf_netdev |
199 | = rtnl_dereference(net_device_ctx->vf_netdev); |
200 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); |
201 | int ret; |
202 | |
203 | netvsc_tx_disable(nvscdev: nvdev, ndev: net); |
204 | |
205 | /* No need to close rndis filter if it is removed already */ |
206 | if (!nvdev) |
207 | return 0; |
208 | |
209 | ret = rndis_filter_close(nvdev); |
210 | if (ret != 0) { |
211 | netdev_err(dev: net, format: "unable to close device (ret %d).\n" , ret); |
212 | return ret; |
213 | } |
214 | |
215 | ret = netvsc_wait_until_empty(nvdev); |
216 | if (ret) |
217 | netdev_err(dev: net, format: "Ring buffer not empty after closing rndis\n" ); |
218 | |
219 | if (vf_netdev) |
220 | dev_close(dev: vf_netdev); |
221 | |
222 | return ret; |
223 | } |
224 | |
225 | static inline void *init_ppi_data(struct rndis_message *msg, |
226 | u32 ppi_size, u32 pkt_type) |
227 | { |
228 | struct rndis_packet *rndis_pkt = &msg->msg.pkt; |
229 | struct rndis_per_packet_info *ppi; |
230 | |
231 | rndis_pkt->data_offset += ppi_size; |
232 | ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset |
233 | + rndis_pkt->per_pkt_info_len; |
234 | |
235 | ppi->size = ppi_size; |
236 | ppi->type = pkt_type; |
237 | ppi->internal = 0; |
238 | ppi->ppi_offset = sizeof(struct rndis_per_packet_info); |
239 | |
240 | rndis_pkt->per_pkt_info_len += ppi_size; |
241 | |
242 | return ppi + 1; |
243 | } |
244 | |
245 | static inline int netvsc_get_tx_queue(struct net_device *ndev, |
246 | struct sk_buff *skb, int old_idx) |
247 | { |
248 | const struct net_device_context *ndc = netdev_priv(dev: ndev); |
249 | struct sock *sk = skb->sk; |
250 | int q_idx; |
251 | |
252 | q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) & |
253 | (VRSS_SEND_TAB_SIZE - 1)]; |
254 | |
255 | /* If queue index changed record the new value */ |
256 | if (q_idx != old_idx && |
257 | sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) |
258 | sk_tx_queue_set(sk, tx_queue: q_idx); |
259 | |
260 | return q_idx; |
261 | } |
262 | |
263 | /* |
264 | * Select queue for transmit. |
265 | * |
266 | * If a valid queue has already been assigned, then use that. |
267 | * Otherwise compute tx queue based on hash and the send table. |
268 | * |
269 | * This is basically similar to default (netdev_pick_tx) with the added step |
270 | * of using the host send_table when no other queue has been assigned. |
271 | * |
272 | * TODO support XPS - but get_xps_queue not exported |
273 | */ |
274 | static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb) |
275 | { |
276 | int q_idx = sk_tx_queue_get(sk: skb->sk); |
277 | |
278 | if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { |
279 | /* If forwarding a packet, we use the recorded queue when |
280 | * available for better cache locality. |
281 | */ |
282 | if (skb_rx_queue_recorded(skb)) |
283 | q_idx = skb_get_rx_queue(skb); |
284 | else |
285 | q_idx = netvsc_get_tx_queue(ndev, skb, old_idx: q_idx); |
286 | } |
287 | |
288 | return q_idx; |
289 | } |
290 | |
291 | static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, |
292 | struct net_device *sb_dev) |
293 | { |
294 | struct net_device_context *ndc = netdev_priv(dev: ndev); |
295 | struct net_device *vf_netdev; |
296 | u16 txq; |
297 | |
298 | rcu_read_lock(); |
299 | vf_netdev = rcu_dereference(ndc->vf_netdev); |
300 | if (vf_netdev) { |
301 | const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; |
302 | |
303 | if (vf_ops->ndo_select_queue) |
304 | txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev); |
305 | else |
306 | txq = netdev_pick_tx(dev: vf_netdev, skb, NULL); |
307 | |
308 | /* Record the queue selected by VF so that it can be |
309 | * used for common case where VF has more queues than |
310 | * the synthetic device. |
311 | */ |
312 | qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq; |
313 | } else { |
314 | txq = netvsc_pick_tx(ndev, skb); |
315 | } |
316 | rcu_read_unlock(); |
317 | |
318 | while (txq >= ndev->real_num_tx_queues) |
319 | txq -= ndev->real_num_tx_queues; |
320 | |
321 | return txq; |
322 | } |
323 | |
324 | static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len, |
325 | struct hv_page_buffer *pb) |
326 | { |
327 | int j = 0; |
328 | |
329 | hvpfn += offset >> HV_HYP_PAGE_SHIFT; |
330 | offset = offset & ~HV_HYP_PAGE_MASK; |
331 | |
332 | while (len > 0) { |
333 | unsigned long bytes; |
334 | |
335 | bytes = HV_HYP_PAGE_SIZE - offset; |
336 | if (bytes > len) |
337 | bytes = len; |
338 | pb[j].pfn = hvpfn; |
339 | pb[j].offset = offset; |
340 | pb[j].len = bytes; |
341 | |
342 | offset += bytes; |
343 | len -= bytes; |
344 | |
345 | if (offset == HV_HYP_PAGE_SIZE && len) { |
346 | hvpfn++; |
347 | offset = 0; |
348 | j++; |
349 | } |
350 | } |
351 | |
352 | return j + 1; |
353 | } |
354 | |
355 | static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, |
356 | struct hv_netvsc_packet *packet, |
357 | struct hv_page_buffer *pb) |
358 | { |
359 | u32 slots_used = 0; |
360 | char *data = skb->data; |
361 | int frags = skb_shinfo(skb)->nr_frags; |
362 | int i; |
363 | |
364 | /* The packet is laid out thus: |
365 | * 1. hdr: RNDIS header and PPI |
366 | * 2. skb linear data |
367 | * 3. skb fragment data |
368 | */ |
369 | slots_used += fill_pg_buf(hvpfn: virt_to_hvpfn(addr: hdr), |
370 | offset_in_hvpage(hdr), |
371 | len, |
372 | pb: &pb[slots_used]); |
373 | |
374 | packet->rmsg_size = len; |
375 | packet->rmsg_pgcnt = slots_used; |
376 | |
377 | slots_used += fill_pg_buf(hvpfn: virt_to_hvpfn(addr: data), |
378 | offset_in_hvpage(data), |
379 | len: skb_headlen(skb), |
380 | pb: &pb[slots_used]); |
381 | |
382 | for (i = 0; i < frags; i++) { |
383 | skb_frag_t *frag = skb_shinfo(skb)->frags + i; |
384 | |
385 | slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)), |
386 | offset: skb_frag_off(frag), |
387 | len: skb_frag_size(frag), |
388 | pb: &pb[slots_used]); |
389 | } |
390 | return slots_used; |
391 | } |
392 | |
393 | static int count_skb_frag_slots(struct sk_buff *skb) |
394 | { |
395 | int i, frags = skb_shinfo(skb)->nr_frags; |
396 | int pages = 0; |
397 | |
398 | for (i = 0; i < frags; i++) { |
399 | skb_frag_t *frag = skb_shinfo(skb)->frags + i; |
400 | unsigned long size = skb_frag_size(frag); |
401 | unsigned long offset = skb_frag_off(frag); |
402 | |
403 | /* Skip unused frames from start of page */ |
404 | offset &= ~HV_HYP_PAGE_MASK; |
405 | pages += HVPFN_UP(offset + size); |
406 | } |
407 | return pages; |
408 | } |
409 | |
410 | static int netvsc_get_slots(struct sk_buff *skb) |
411 | { |
412 | char *data = skb->data; |
413 | unsigned int offset = offset_in_hvpage(data); |
414 | unsigned int len = skb_headlen(skb); |
415 | int slots; |
416 | int frag_slots; |
417 | |
418 | slots = DIV_ROUND_UP(offset + len, HV_HYP_PAGE_SIZE); |
419 | frag_slots = count_skb_frag_slots(skb); |
420 | return slots + frag_slots; |
421 | } |
422 | |
423 | static u32 net_checksum_info(struct sk_buff *skb) |
424 | { |
425 | if (skb->protocol == htons(ETH_P_IP)) { |
426 | struct iphdr *ip = ip_hdr(skb); |
427 | |
428 | if (ip->protocol == IPPROTO_TCP) |
429 | return TRANSPORT_INFO_IPV4_TCP; |
430 | else if (ip->protocol == IPPROTO_UDP) |
431 | return TRANSPORT_INFO_IPV4_UDP; |
432 | } else { |
433 | struct ipv6hdr *ip6 = ipv6_hdr(skb); |
434 | |
435 | if (ip6->nexthdr == IPPROTO_TCP) |
436 | return TRANSPORT_INFO_IPV6_TCP; |
437 | else if (ip6->nexthdr == IPPROTO_UDP) |
438 | return TRANSPORT_INFO_IPV6_UDP; |
439 | } |
440 | |
441 | return TRANSPORT_INFO_NOT_IP; |
442 | } |
443 | |
444 | /* Send skb on the slave VF device. */ |
445 | static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, |
446 | struct sk_buff *skb) |
447 | { |
448 | struct net_device_context *ndev_ctx = netdev_priv(dev: net); |
449 | unsigned int len = skb->len; |
450 | int rc; |
451 | |
452 | skb->dev = vf_netdev; |
453 | skb_record_rx_queue(skb, rx_queue: qdisc_skb_cb(skb)->slave_dev_queue_mapping); |
454 | |
455 | rc = dev_queue_xmit(skb); |
456 | if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { |
457 | struct netvsc_vf_pcpu_stats *pcpu_stats |
458 | = this_cpu_ptr(ndev_ctx->vf_stats); |
459 | |
460 | u64_stats_update_begin(syncp: &pcpu_stats->syncp); |
461 | pcpu_stats->tx_packets++; |
462 | pcpu_stats->tx_bytes += len; |
463 | u64_stats_update_end(syncp: &pcpu_stats->syncp); |
464 | } else { |
465 | this_cpu_inc(ndev_ctx->vf_stats->tx_dropped); |
466 | } |
467 | |
468 | return rc; |
469 | } |
470 | |
471 | static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx) |
472 | { |
473 | struct net_device_context *net_device_ctx = netdev_priv(dev: net); |
474 | struct hv_netvsc_packet *packet = NULL; |
475 | int ret; |
476 | unsigned int num_data_pgs; |
477 | struct rndis_message *rndis_msg; |
478 | struct net_device *vf_netdev; |
479 | u32 rndis_msg_size; |
480 | u32 hash; |
481 | struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; |
482 | |
483 | /* If VF is present and up then redirect packets to it. |
484 | * Skip the VF if it is marked down or has no carrier. |
485 | * If netpoll is in uses, then VF can not be used either. |
486 | */ |
487 | vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); |
488 | if (vf_netdev && netif_running(dev: vf_netdev) && |
489 | netif_carrier_ok(dev: vf_netdev) && !netpoll_tx_running(dev: net) && |
490 | net_device_ctx->data_path_is_vf) |
491 | return netvsc_vf_xmit(net, vf_netdev, skb); |
492 | |
493 | /* We will atmost need two pages to describe the rndis |
494 | * header. We can only transmit MAX_PAGE_BUFFER_COUNT number |
495 | * of pages in a single packet. If skb is scattered around |
496 | * more pages we try linearizing it. |
497 | */ |
498 | |
499 | num_data_pgs = netvsc_get_slots(skb) + 2; |
500 | |
501 | if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { |
502 | ++net_device_ctx->eth_stats.tx_scattered; |
503 | |
504 | if (skb_linearize(skb)) |
505 | goto no_memory; |
506 | |
507 | num_data_pgs = netvsc_get_slots(skb) + 2; |
508 | if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { |
509 | ++net_device_ctx->eth_stats.tx_too_big; |
510 | goto drop; |
511 | } |
512 | } |
513 | |
514 | /* |
515 | * Place the rndis header in the skb head room and |
516 | * the skb->cb will be used for hv_netvsc_packet |
517 | * structure. |
518 | */ |
519 | ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE); |
520 | if (ret) |
521 | goto no_memory; |
522 | |
523 | /* Use the skb control buffer for building up the packet */ |
524 | BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) > |
525 | sizeof_field(struct sk_buff, cb)); |
526 | packet = (struct hv_netvsc_packet *)skb->cb; |
527 | |
528 | packet->q_idx = skb_get_queue_mapping(skb); |
529 | |
530 | packet->total_data_buflen = skb->len; |
531 | packet->total_bytes = skb->len; |
532 | packet->total_packets = 1; |
533 | |
534 | rndis_msg = (struct rndis_message *)skb->head; |
535 | |
536 | /* Add the rndis header */ |
537 | rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; |
538 | rndis_msg->msg_len = packet->total_data_buflen; |
539 | |
540 | rndis_msg->msg.pkt = (struct rndis_packet) { |
541 | .data_offset = sizeof(struct rndis_packet), |
542 | .data_len = packet->total_data_buflen, |
543 | .per_pkt_info_offset = sizeof(struct rndis_packet), |
544 | }; |
545 | |
546 | rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet); |
547 | |
548 | hash = skb_get_hash_raw(skb); |
549 | if (hash != 0 && net->real_num_tx_queues > 1) { |
550 | u32 *hash_info; |
551 | |
552 | rndis_msg_size += NDIS_HASH_PPI_SIZE; |
553 | hash_info = init_ppi_data(msg: rndis_msg, NDIS_HASH_PPI_SIZE, |
554 | pkt_type: NBL_HASH_VALUE); |
555 | *hash_info = hash; |
556 | } |
557 | |
558 | /* When using AF_PACKET we need to drop VLAN header from |
559 | * the frame and update the SKB to allow the HOST OS |
560 | * to transmit the 802.1Q packet |
561 | */ |
562 | if (skb->protocol == htons(ETH_P_8021Q)) { |
563 | u16 vlan_tci; |
564 | |
565 | skb_reset_mac_header(skb); |
566 | if (eth_type_vlan(ethertype: eth_hdr(skb)->h_proto)) { |
567 | if (unlikely(__skb_vlan_pop(skb, &vlan_tci) != 0)) { |
568 | ++net_device_ctx->eth_stats.vlan_error; |
569 | goto drop; |
570 | } |
571 | |
572 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); |
573 | /* Update the NDIS header pkt lengths */ |
574 | packet->total_data_buflen -= VLAN_HLEN; |
575 | packet->total_bytes -= VLAN_HLEN; |
576 | rndis_msg->msg_len = packet->total_data_buflen; |
577 | rndis_msg->msg.pkt.data_len = packet->total_data_buflen; |
578 | } |
579 | } |
580 | |
581 | if (skb_vlan_tag_present(skb)) { |
582 | struct ndis_pkt_8021q_info *vlan; |
583 | |
584 | rndis_msg_size += NDIS_VLAN_PPI_SIZE; |
585 | vlan = init_ppi_data(msg: rndis_msg, NDIS_VLAN_PPI_SIZE, |
586 | pkt_type: IEEE_8021Q_INFO); |
587 | |
588 | vlan->value = 0; |
589 | vlan->vlanid = skb_vlan_tag_get_id(skb); |
590 | vlan->cfi = skb_vlan_tag_get_cfi(skb); |
591 | vlan->pri = skb_vlan_tag_get_prio(skb); |
592 | } |
593 | |
594 | if (skb_is_gso(skb)) { |
595 | struct ndis_tcp_lso_info *lso_info; |
596 | |
597 | rndis_msg_size += NDIS_LSO_PPI_SIZE; |
598 | lso_info = init_ppi_data(msg: rndis_msg, NDIS_LSO_PPI_SIZE, |
599 | pkt_type: TCP_LARGESEND_PKTINFO); |
600 | |
601 | lso_info->value = 0; |
602 | lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; |
603 | if (skb->protocol == htons(ETH_P_IP)) { |
604 | lso_info->lso_v2_transmit.ip_version = |
605 | NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; |
606 | ip_hdr(skb)->tot_len = 0; |
607 | ip_hdr(skb)->check = 0; |
608 | tcp_hdr(skb)->check = |
609 | ~csum_tcpudp_magic(saddr: ip_hdr(skb)->saddr, |
610 | daddr: ip_hdr(skb)->daddr, len: 0, IPPROTO_TCP, sum: 0); |
611 | } else { |
612 | lso_info->lso_v2_transmit.ip_version = |
613 | NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; |
614 | tcp_v6_gso_csum_prep(skb); |
615 | } |
616 | lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb); |
617 | lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; |
618 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
619 | if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) { |
620 | struct ndis_tcp_ip_checksum_info *csum_info; |
621 | |
622 | rndis_msg_size += NDIS_CSUM_PPI_SIZE; |
623 | csum_info = init_ppi_data(msg: rndis_msg, NDIS_CSUM_PPI_SIZE, |
624 | pkt_type: TCPIP_CHKSUM_PKTINFO); |
625 | |
626 | csum_info->value = 0; |
627 | csum_info->transmit.tcp_header_offset = skb_transport_offset(skb); |
628 | |
629 | if (skb->protocol == htons(ETH_P_IP)) { |
630 | csum_info->transmit.is_ipv4 = 1; |
631 | |
632 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
633 | csum_info->transmit.tcp_checksum = 1; |
634 | else |
635 | csum_info->transmit.udp_checksum = 1; |
636 | } else { |
637 | csum_info->transmit.is_ipv6 = 1; |
638 | |
639 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) |
640 | csum_info->transmit.tcp_checksum = 1; |
641 | else |
642 | csum_info->transmit.udp_checksum = 1; |
643 | } |
644 | } else { |
645 | /* Can't do offload of this type of checksum */ |
646 | if (skb_checksum_help(skb)) |
647 | goto drop; |
648 | } |
649 | } |
650 | |
651 | /* Start filling in the page buffers with the rndis hdr */ |
652 | rndis_msg->msg_len += rndis_msg_size; |
653 | packet->total_data_buflen = rndis_msg->msg_len; |
654 | packet->page_buf_cnt = init_page_array(hdr: rndis_msg, len: rndis_msg_size, |
655 | skb, packet, pb); |
656 | |
657 | /* timestamp packet in software */ |
658 | skb_tx_timestamp(skb); |
659 | |
660 | ret = netvsc_send(net, packet, rndis_msg, page_buffer: pb, skb, xdp_tx); |
661 | if (likely(ret == 0)) |
662 | return NETDEV_TX_OK; |
663 | |
664 | if (ret == -EAGAIN) { |
665 | ++net_device_ctx->eth_stats.tx_busy; |
666 | return NETDEV_TX_BUSY; |
667 | } |
668 | |
669 | if (ret == -ENOSPC) |
670 | ++net_device_ctx->eth_stats.tx_no_space; |
671 | |
672 | drop: |
673 | dev_kfree_skb_any(skb); |
674 | net->stats.tx_dropped++; |
675 | |
676 | return NETDEV_TX_OK; |
677 | |
678 | no_memory: |
679 | ++net_device_ctx->eth_stats.tx_no_memory; |
680 | goto drop; |
681 | } |
682 | |
683 | static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb, |
684 | struct net_device *ndev) |
685 | { |
686 | return netvsc_xmit(skb, net: ndev, xdp_tx: false); |
687 | } |
688 | |
689 | /* |
690 | * netvsc_linkstatus_callback - Link up/down notification |
691 | */ |
692 | void netvsc_linkstatus_callback(struct net_device *net, |
693 | struct rndis_message *resp, |
694 | void *data, u32 data_buflen) |
695 | { |
696 | struct rndis_indicate_status *indicate = &resp->msg.indicate_status; |
697 | struct net_device_context *ndev_ctx = netdev_priv(dev: net); |
698 | struct netvsc_reconfig *event; |
699 | unsigned long flags; |
700 | |
701 | /* Ensure the packet is big enough to access its fields */ |
702 | if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_indicate_status)) { |
703 | netdev_err(dev: net, format: "invalid rndis_indicate_status packet, len: %u\n" , |
704 | resp->msg_len); |
705 | return; |
706 | } |
707 | |
708 | /* Copy the RNDIS indicate status into nvchan->recv_buf */ |
709 | memcpy(indicate, data + RNDIS_HEADER_SIZE, sizeof(*indicate)); |
710 | |
711 | /* Update the physical link speed when changing to another vSwitch */ |
712 | if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { |
713 | u32 speed; |
714 | |
715 | /* Validate status_buf_offset and status_buflen. |
716 | * |
717 | * Certain (pre-Fe) implementations of Hyper-V's vSwitch didn't account |
718 | * for the status buffer field in resp->msg_len; perform the validation |
719 | * using data_buflen (>= resp->msg_len). |
720 | */ |
721 | if (indicate->status_buflen < sizeof(speed) || |
722 | indicate->status_buf_offset < sizeof(*indicate) || |
723 | data_buflen - RNDIS_HEADER_SIZE < indicate->status_buf_offset || |
724 | data_buflen - RNDIS_HEADER_SIZE - indicate->status_buf_offset |
725 | < indicate->status_buflen) { |
726 | netdev_err(dev: net, format: "invalid rndis_indicate_status packet\n" ); |
727 | return; |
728 | } |
729 | |
730 | speed = *(u32 *)(data + RNDIS_HEADER_SIZE + indicate->status_buf_offset) / 10000; |
731 | ndev_ctx->speed = speed; |
732 | return; |
733 | } |
734 | |
735 | /* Handle these link change statuses below */ |
736 | if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE && |
737 | indicate->status != RNDIS_STATUS_MEDIA_CONNECT && |
738 | indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT) |
739 | return; |
740 | |
741 | if (net->reg_state != NETREG_REGISTERED) |
742 | return; |
743 | |
744 | event = kzalloc(size: sizeof(*event), GFP_ATOMIC); |
745 | if (!event) |
746 | return; |
747 | event->event = indicate->status; |
748 | |
749 | spin_lock_irqsave(&ndev_ctx->lock, flags); |
750 | list_add_tail(new: &event->list, head: &ndev_ctx->reconfig_events); |
751 | spin_unlock_irqrestore(lock: &ndev_ctx->lock, flags); |
752 | |
753 | schedule_delayed_work(dwork: &ndev_ctx->dwork, delay: 0); |
754 | } |
755 | |
756 | /* This function should only be called after skb_record_rx_queue() */ |
757 | void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev) |
758 | { |
759 | int rc; |
760 | |
761 | skb->queue_mapping = skb_get_rx_queue(skb); |
762 | __skb_push(skb, ETH_HLEN); |
763 | |
764 | rc = netvsc_xmit(skb, net: ndev, xdp_tx: true); |
765 | |
766 | if (dev_xmit_complete(rc)) |
767 | return; |
768 | |
769 | dev_kfree_skb_any(skb); |
770 | ndev->stats.tx_dropped++; |
771 | } |
772 | |
773 | static void netvsc_comp_ipcsum(struct sk_buff *skb) |
774 | { |
775 | struct iphdr *iph = (struct iphdr *)skb->data; |
776 | |
777 | iph->check = 0; |
778 | iph->check = ip_fast_csum(iph, ihl: iph->ihl); |
779 | } |
780 | |
781 | static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, |
782 | struct netvsc_channel *nvchan, |
783 | struct xdp_buff *xdp) |
784 | { |
785 | struct napi_struct *napi = &nvchan->napi; |
786 | const struct ndis_pkt_8021q_info *vlan = &nvchan->rsc.vlan; |
787 | const struct ndis_tcp_ip_checksum_info *csum_info = |
788 | &nvchan->rsc.csum_info; |
789 | const u32 *hash_info = &nvchan->rsc.hash_info; |
790 | u8 ppi_flags = nvchan->rsc.ppi_flags; |
791 | struct sk_buff *skb; |
792 | void *xbuf = xdp->data_hard_start; |
793 | int i; |
794 | |
795 | if (xbuf) { |
796 | unsigned int hdroom = xdp->data - xdp->data_hard_start; |
797 | unsigned int xlen = xdp->data_end - xdp->data; |
798 | unsigned int frag_size = xdp->frame_sz; |
799 | |
800 | skb = build_skb(data: xbuf, frag_size); |
801 | |
802 | if (!skb) { |
803 | __free_page(virt_to_page(xbuf)); |
804 | return NULL; |
805 | } |
806 | |
807 | skb_reserve(skb, len: hdroom); |
808 | skb_put(skb, len: xlen); |
809 | skb->dev = napi->dev; |
810 | } else { |
811 | skb = napi_alloc_skb(napi, length: nvchan->rsc.pktlen); |
812 | |
813 | if (!skb) |
814 | return NULL; |
815 | |
816 | /* Copy to skb. This copy is needed here since the memory |
817 | * pointed by hv_netvsc_packet cannot be deallocated. |
818 | */ |
819 | for (i = 0; i < nvchan->rsc.cnt; i++) |
820 | skb_put_data(skb, data: nvchan->rsc.data[i], |
821 | len: nvchan->rsc.len[i]); |
822 | } |
823 | |
824 | skb->protocol = eth_type_trans(skb, dev: net); |
825 | |
826 | /* skb is already created with CHECKSUM_NONE */ |
827 | skb_checksum_none_assert(skb); |
828 | |
829 | /* Incoming packets may have IP header checksum verified by the host. |
830 | * They may not have IP header checksum computed after coalescing. |
831 | * We compute it here if the flags are set, because on Linux, the IP |
832 | * checksum is always checked. |
833 | */ |
834 | if ((ppi_flags & NVSC_RSC_CSUM_INFO) && csum_info->receive.ip_checksum_value_invalid && |
835 | csum_info->receive.ip_checksum_succeeded && |
836 | skb->protocol == htons(ETH_P_IP)) { |
837 | /* Check that there is enough space to hold the IP header. */ |
838 | if (skb_headlen(skb) < sizeof(struct iphdr)) { |
839 | kfree_skb(skb); |
840 | return NULL; |
841 | } |
842 | netvsc_comp_ipcsum(skb); |
843 | } |
844 | |
845 | /* Do L4 checksum offload if enabled and present. */ |
846 | if ((ppi_flags & NVSC_RSC_CSUM_INFO) && (net->features & NETIF_F_RXCSUM)) { |
847 | if (csum_info->receive.tcp_checksum_succeeded || |
848 | csum_info->receive.udp_checksum_succeeded) |
849 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
850 | } |
851 | |
852 | if ((ppi_flags & NVSC_RSC_HASH_INFO) && (net->features & NETIF_F_RXHASH)) |
853 | skb_set_hash(skb, hash: *hash_info, type: PKT_HASH_TYPE_L4); |
854 | |
855 | if (ppi_flags & NVSC_RSC_VLAN) { |
856 | u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) | |
857 | (vlan->cfi ? VLAN_CFI_MASK : 0); |
858 | |
859 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
860 | vlan_tci); |
861 | } |
862 | |
863 | return skb; |
864 | } |
865 | |
866 | /* |
867 | * netvsc_recv_callback - Callback when we receive a packet from the |
868 | * "wire" on the specified device. |
869 | */ |
870 | int netvsc_recv_callback(struct net_device *net, |
871 | struct netvsc_device *net_device, |
872 | struct netvsc_channel *nvchan) |
873 | { |
874 | struct net_device_context *net_device_ctx = netdev_priv(dev: net); |
875 | struct vmbus_channel *channel = nvchan->channel; |
876 | u16 q_idx = channel->offermsg.offer.sub_channel_index; |
877 | struct sk_buff *skb; |
878 | struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats; |
879 | struct xdp_buff xdp; |
880 | u32 act; |
881 | |
882 | if (net->reg_state != NETREG_REGISTERED) |
883 | return NVSP_STAT_FAIL; |
884 | |
885 | act = netvsc_run_xdp(ndev: net, nvchan, xdp: &xdp); |
886 | |
887 | if (act == XDP_REDIRECT) |
888 | return NVSP_STAT_SUCCESS; |
889 | |
890 | if (act != XDP_PASS && act != XDP_TX) { |
891 | u64_stats_update_begin(syncp: &rx_stats->syncp); |
892 | rx_stats->xdp_drop++; |
893 | u64_stats_update_end(syncp: &rx_stats->syncp); |
894 | |
895 | return NVSP_STAT_SUCCESS; /* consumed by XDP */ |
896 | } |
897 | |
898 | /* Allocate a skb - TODO direct I/O to pages? */ |
899 | skb = netvsc_alloc_recv_skb(net, nvchan, xdp: &xdp); |
900 | |
901 | if (unlikely(!skb)) { |
902 | ++net_device_ctx->eth_stats.rx_no_memory; |
903 | return NVSP_STAT_FAIL; |
904 | } |
905 | |
906 | skb_record_rx_queue(skb, rx_queue: q_idx); |
907 | |
908 | /* |
909 | * Even if injecting the packet, record the statistics |
910 | * on the synthetic device because modifying the VF device |
911 | * statistics will not work correctly. |
912 | */ |
913 | u64_stats_update_begin(syncp: &rx_stats->syncp); |
914 | if (act == XDP_TX) |
915 | rx_stats->xdp_tx++; |
916 | |
917 | rx_stats->packets++; |
918 | rx_stats->bytes += nvchan->rsc.pktlen; |
919 | |
920 | if (skb->pkt_type == PACKET_BROADCAST) |
921 | ++rx_stats->broadcast; |
922 | else if (skb->pkt_type == PACKET_MULTICAST) |
923 | ++rx_stats->multicast; |
924 | u64_stats_update_end(syncp: &rx_stats->syncp); |
925 | |
926 | if (act == XDP_TX) { |
927 | netvsc_xdp_xmit(skb, ndev: net); |
928 | return NVSP_STAT_SUCCESS; |
929 | } |
930 | |
931 | napi_gro_receive(napi: &nvchan->napi, skb); |
932 | return NVSP_STAT_SUCCESS; |
933 | } |
934 | |
935 | static void netvsc_get_drvinfo(struct net_device *net, |
936 | struct ethtool_drvinfo *info) |
937 | { |
938 | strscpy(p: info->driver, KBUILD_MODNAME, size: sizeof(info->driver)); |
939 | strscpy(p: info->fw_version, q: "N/A" , size: sizeof(info->fw_version)); |
940 | } |
941 | |
942 | static void netvsc_get_channels(struct net_device *net, |
943 | struct ethtool_channels *channel) |
944 | { |
945 | struct net_device_context *net_device_ctx = netdev_priv(dev: net); |
946 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); |
947 | |
948 | if (nvdev) { |
949 | channel->max_combined = nvdev->max_chn; |
950 | channel->combined_count = nvdev->num_chn; |
951 | } |
952 | } |
953 | |
954 | /* Alloc struct netvsc_device_info, and initialize it from either existing |
955 | * struct netvsc_device, or from default values. |
956 | */ |
957 | static |
958 | struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev) |
959 | { |
960 | struct netvsc_device_info *dev_info; |
961 | struct bpf_prog *prog; |
962 | |
963 | dev_info = kzalloc(size: sizeof(*dev_info), GFP_ATOMIC); |
964 | |
965 | if (!dev_info) |
966 | return NULL; |
967 | |
968 | if (nvdev) { |
969 | ASSERT_RTNL(); |
970 | |
971 | dev_info->num_chn = nvdev->num_chn; |
972 | dev_info->send_sections = nvdev->send_section_cnt; |
973 | dev_info->send_section_size = nvdev->send_section_size; |
974 | dev_info->recv_sections = nvdev->recv_section_cnt; |
975 | dev_info->recv_section_size = nvdev->recv_section_size; |
976 | |
977 | memcpy(dev_info->rss_key, nvdev->extension->rss_key, |
978 | NETVSC_HASH_KEYLEN); |
979 | |
980 | prog = netvsc_xdp_get(nvdev); |
981 | if (prog) { |
982 | bpf_prog_inc(prog); |
983 | dev_info->bprog = prog; |
984 | } |
985 | } else { |
986 | dev_info->num_chn = VRSS_CHANNEL_DEFAULT; |
987 | dev_info->send_sections = NETVSC_DEFAULT_TX; |
988 | dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE; |
989 | dev_info->recv_sections = NETVSC_DEFAULT_RX; |
990 | dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE; |
991 | } |
992 | |
993 | return dev_info; |
994 | } |
995 | |
996 | /* Free struct netvsc_device_info */ |
997 | static void netvsc_devinfo_put(struct netvsc_device_info *dev_info) |
998 | { |
999 | if (dev_info->bprog) { |
1000 | ASSERT_RTNL(); |
1001 | bpf_prog_put(prog: dev_info->bprog); |
1002 | } |
1003 | |
1004 | kfree(objp: dev_info); |
1005 | } |
1006 | |
1007 | static int netvsc_detach(struct net_device *ndev, |
1008 | struct netvsc_device *nvdev) |
1009 | { |
1010 | struct net_device_context *ndev_ctx = netdev_priv(dev: ndev); |
1011 | struct hv_device *hdev = ndev_ctx->device_ctx; |
1012 | int ret; |
1013 | |
1014 | /* Don't try continuing to try and setup sub channels */ |
1015 | if (cancel_work_sync(work: &nvdev->subchan_work)) |
1016 | nvdev->num_chn = 1; |
1017 | |
1018 | netvsc_xdp_set(dev: ndev, NULL, NULL, nvdev); |
1019 | |
1020 | /* If device was up (receiving) then shutdown */ |
1021 | if (netif_running(dev: ndev)) { |
1022 | netvsc_tx_disable(nvscdev: nvdev, ndev); |
1023 | |
1024 | ret = rndis_filter_close(nvdev); |
1025 | if (ret) { |
1026 | netdev_err(dev: ndev, |
1027 | format: "unable to close device (ret %d).\n" , ret); |
1028 | return ret; |
1029 | } |
1030 | |
1031 | ret = netvsc_wait_until_empty(nvdev); |
1032 | if (ret) { |
1033 | netdev_err(dev: ndev, |
1034 | format: "Ring buffer not empty after closing rndis\n" ); |
1035 | return ret; |
1036 | } |
1037 | } |
1038 | |
1039 | netif_device_detach(dev: ndev); |
1040 | |
1041 | rndis_filter_device_remove(dev: hdev, nvdev); |
1042 | |
1043 | return 0; |
1044 | } |
1045 | |
1046 | static int netvsc_attach(struct net_device *ndev, |
1047 | struct netvsc_device_info *dev_info) |
1048 | { |
1049 | struct net_device_context *ndev_ctx = netdev_priv(dev: ndev); |
1050 | struct hv_device *hdev = ndev_ctx->device_ctx; |
1051 | struct netvsc_device *nvdev; |
1052 | struct rndis_device *rdev; |
1053 | struct bpf_prog *prog; |
1054 | int ret = 0; |
1055 | |
1056 | nvdev = rndis_filter_device_add(dev: hdev, info: dev_info); |
1057 | if (IS_ERR(ptr: nvdev)) |
1058 | return PTR_ERR(ptr: nvdev); |
1059 | |
1060 | if (nvdev->num_chn > 1) { |
1061 | ret = rndis_set_subchannel(ndev, nvdev, dev_info); |
1062 | |
1063 | /* if unavailable, just proceed with one queue */ |
1064 | if (ret) { |
1065 | nvdev->max_chn = 1; |
1066 | nvdev->num_chn = 1; |
1067 | } |
1068 | } |
1069 | |
1070 | prog = dev_info->bprog; |
1071 | if (prog) { |
1072 | bpf_prog_inc(prog); |
1073 | ret = netvsc_xdp_set(dev: ndev, prog, NULL, nvdev); |
1074 | if (ret) { |
1075 | bpf_prog_put(prog); |
1076 | goto err1; |
1077 | } |
1078 | } |
1079 | |
1080 | /* In any case device is now ready */ |
1081 | nvdev->tx_disable = false; |
1082 | netif_device_attach(dev: ndev); |
1083 | |
1084 | /* Note: enable and attach happen when sub-channels setup */ |
1085 | netif_carrier_off(dev: ndev); |
1086 | |
1087 | if (netif_running(dev: ndev)) { |
1088 | ret = rndis_filter_open(nvdev); |
1089 | if (ret) |
1090 | goto err2; |
1091 | |
1092 | rdev = nvdev->extension; |
1093 | if (!rdev->link_state) |
1094 | netif_carrier_on(dev: ndev); |
1095 | } |
1096 | |
1097 | return 0; |
1098 | |
1099 | err2: |
1100 | netif_device_detach(dev: ndev); |
1101 | |
1102 | err1: |
1103 | rndis_filter_device_remove(dev: hdev, nvdev); |
1104 | |
1105 | return ret; |
1106 | } |
1107 | |
1108 | static int netvsc_set_channels(struct net_device *net, |
1109 | struct ethtool_channels *channels) |
1110 | { |
1111 | struct net_device_context *net_device_ctx = netdev_priv(dev: net); |
1112 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); |
1113 | unsigned int orig, count = channels->combined_count; |
1114 | struct netvsc_device_info *device_info; |
1115 | int ret; |
1116 | |
1117 | /* We do not support separate count for rx, tx, or other */ |
1118 | if (count == 0 || |
1119 | channels->rx_count || channels->tx_count || channels->other_count) |
1120 | return -EINVAL; |
1121 | |
1122 | if (!nvdev || nvdev->destroy) |
1123 | return -ENODEV; |
1124 | |
1125 | if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) |
1126 | return -EINVAL; |
1127 | |
1128 | if (count > nvdev->max_chn) |
1129 | return -EINVAL; |
1130 | |
1131 | orig = nvdev->num_chn; |
1132 | |
1133 | device_info = netvsc_devinfo_get(nvdev); |
1134 | |
1135 | if (!device_info) |
1136 | return -ENOMEM; |
1137 | |
1138 | device_info->num_chn = count; |
1139 | |
1140 | ret = netvsc_detach(ndev: net, nvdev); |
1141 | if (ret) |
1142 | goto out; |
1143 | |
1144 | ret = netvsc_attach(ndev: net, dev_info: device_info); |
1145 | if (ret) { |
1146 | device_info->num_chn = orig; |
1147 | if (netvsc_attach(ndev: net, dev_info: device_info)) |
1148 | netdev_err(dev: net, format: "restoring channel setting failed\n" ); |
1149 | } |
1150 | |
1151 | out: |
1152 | netvsc_devinfo_put(dev_info: device_info); |
1153 | return ret; |
1154 | } |
1155 | |
1156 | static void netvsc_init_settings(struct net_device *dev) |
1157 | { |
1158 | struct net_device_context *ndc = netdev_priv(dev); |
1159 | |
1160 | ndc->l4_hash = HV_DEFAULT_L4HASH; |
1161 | |
1162 | ndc->speed = SPEED_UNKNOWN; |
1163 | ndc->duplex = DUPLEX_FULL; |
1164 | |
1165 | dev->features = NETIF_F_LRO; |
1166 | } |
1167 | |
1168 | static int netvsc_get_link_ksettings(struct net_device *dev, |
1169 | struct ethtool_link_ksettings *cmd) |
1170 | { |
1171 | struct net_device_context *ndc = netdev_priv(dev); |
1172 | struct net_device *vf_netdev; |
1173 | |
1174 | vf_netdev = rtnl_dereference(ndc->vf_netdev); |
1175 | |
1176 | if (vf_netdev) |
1177 | return __ethtool_get_link_ksettings(dev: vf_netdev, link_ksettings: cmd); |
1178 | |
1179 | cmd->base.speed = ndc->speed; |
1180 | cmd->base.duplex = ndc->duplex; |
1181 | cmd->base.port = PORT_OTHER; |
1182 | |
1183 | return 0; |
1184 | } |
1185 | |
1186 | static int netvsc_set_link_ksettings(struct net_device *dev, |
1187 | const struct ethtool_link_ksettings *cmd) |
1188 | { |
1189 | struct net_device_context *ndc = netdev_priv(dev); |
1190 | struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev); |
1191 | |
1192 | if (vf_netdev) { |
1193 | if (!vf_netdev->ethtool_ops->set_link_ksettings) |
1194 | return -EOPNOTSUPP; |
1195 | |
1196 | return vf_netdev->ethtool_ops->set_link_ksettings(vf_netdev, |
1197 | cmd); |
1198 | } |
1199 | |
1200 | return ethtool_virtdev_set_link_ksettings(dev, cmd, |
1201 | dev_speed: &ndc->speed, dev_duplex: &ndc->duplex); |
1202 | } |
1203 | |
1204 | static int netvsc_change_mtu(struct net_device *ndev, int mtu) |
1205 | { |
1206 | struct net_device_context *ndevctx = netdev_priv(dev: ndev); |
1207 | struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); |
1208 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
1209 | int orig_mtu = ndev->mtu; |
1210 | struct netvsc_device_info *device_info; |
1211 | int ret = 0; |
1212 | |
1213 | if (!nvdev || nvdev->destroy) |
1214 | return -ENODEV; |
1215 | |
1216 | device_info = netvsc_devinfo_get(nvdev); |
1217 | |
1218 | if (!device_info) |
1219 | return -ENOMEM; |
1220 | |
1221 | /* Change MTU of underlying VF netdev first. */ |
1222 | if (vf_netdev) { |
1223 | ret = dev_set_mtu(vf_netdev, mtu); |
1224 | if (ret) |
1225 | goto out; |
1226 | } |
1227 | |
1228 | ret = netvsc_detach(ndev, nvdev); |
1229 | if (ret) |
1230 | goto rollback_vf; |
1231 | |
1232 | ndev->mtu = mtu; |
1233 | |
1234 | ret = netvsc_attach(ndev, dev_info: device_info); |
1235 | if (!ret) |
1236 | goto out; |
1237 | |
1238 | /* Attempt rollback to original MTU */ |
1239 | ndev->mtu = orig_mtu; |
1240 | |
1241 | if (netvsc_attach(ndev, dev_info: device_info)) |
1242 | netdev_err(dev: ndev, format: "restoring mtu failed\n" ); |
1243 | rollback_vf: |
1244 | if (vf_netdev) |
1245 | dev_set_mtu(vf_netdev, orig_mtu); |
1246 | |
1247 | out: |
1248 | netvsc_devinfo_put(dev_info: device_info); |
1249 | return ret; |
1250 | } |
1251 | |
1252 | static void netvsc_get_vf_stats(struct net_device *net, |
1253 | struct netvsc_vf_pcpu_stats *tot) |
1254 | { |
1255 | struct net_device_context *ndev_ctx = netdev_priv(dev: net); |
1256 | int i; |
1257 | |
1258 | memset(tot, 0, sizeof(*tot)); |
1259 | |
1260 | for_each_possible_cpu(i) { |
1261 | const struct netvsc_vf_pcpu_stats *stats |
1262 | = per_cpu_ptr(ndev_ctx->vf_stats, i); |
1263 | u64 rx_packets, rx_bytes, tx_packets, tx_bytes; |
1264 | unsigned int start; |
1265 | |
1266 | do { |
1267 | start = u64_stats_fetch_begin(syncp: &stats->syncp); |
1268 | rx_packets = stats->rx_packets; |
1269 | tx_packets = stats->tx_packets; |
1270 | rx_bytes = stats->rx_bytes; |
1271 | tx_bytes = stats->tx_bytes; |
1272 | } while (u64_stats_fetch_retry(syncp: &stats->syncp, start)); |
1273 | |
1274 | tot->rx_packets += rx_packets; |
1275 | tot->tx_packets += tx_packets; |
1276 | tot->rx_bytes += rx_bytes; |
1277 | tot->tx_bytes += tx_bytes; |
1278 | tot->tx_dropped += stats->tx_dropped; |
1279 | } |
1280 | } |
1281 | |
1282 | static void netvsc_get_pcpu_stats(struct net_device *net, |
1283 | struct netvsc_ethtool_pcpu_stats *pcpu_tot) |
1284 | { |
1285 | struct net_device_context *ndev_ctx = netdev_priv(dev: net); |
1286 | struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); |
1287 | int i; |
1288 | |
1289 | /* fetch percpu stats of vf */ |
1290 | for_each_possible_cpu(i) { |
1291 | const struct netvsc_vf_pcpu_stats *stats = |
1292 | per_cpu_ptr(ndev_ctx->vf_stats, i); |
1293 | struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i]; |
1294 | unsigned int start; |
1295 | |
1296 | do { |
1297 | start = u64_stats_fetch_begin(syncp: &stats->syncp); |
1298 | this_tot->vf_rx_packets = stats->rx_packets; |
1299 | this_tot->vf_tx_packets = stats->tx_packets; |
1300 | this_tot->vf_rx_bytes = stats->rx_bytes; |
1301 | this_tot->vf_tx_bytes = stats->tx_bytes; |
1302 | } while (u64_stats_fetch_retry(syncp: &stats->syncp, start)); |
1303 | this_tot->rx_packets = this_tot->vf_rx_packets; |
1304 | this_tot->tx_packets = this_tot->vf_tx_packets; |
1305 | this_tot->rx_bytes = this_tot->vf_rx_bytes; |
1306 | this_tot->tx_bytes = this_tot->vf_tx_bytes; |
1307 | } |
1308 | |
1309 | /* fetch percpu stats of netvsc */ |
1310 | for (i = 0; i < nvdev->num_chn; i++) { |
1311 | const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; |
1312 | const struct netvsc_stats_tx *tx_stats; |
1313 | const struct netvsc_stats_rx *rx_stats; |
1314 | struct netvsc_ethtool_pcpu_stats *this_tot = |
1315 | &pcpu_tot[nvchan->channel->target_cpu]; |
1316 | u64 packets, bytes; |
1317 | unsigned int start; |
1318 | |
1319 | tx_stats = &nvchan->tx_stats; |
1320 | do { |
1321 | start = u64_stats_fetch_begin(syncp: &tx_stats->syncp); |
1322 | packets = tx_stats->packets; |
1323 | bytes = tx_stats->bytes; |
1324 | } while (u64_stats_fetch_retry(syncp: &tx_stats->syncp, start)); |
1325 | |
1326 | this_tot->tx_bytes += bytes; |
1327 | this_tot->tx_packets += packets; |
1328 | |
1329 | rx_stats = &nvchan->rx_stats; |
1330 | do { |
1331 | start = u64_stats_fetch_begin(syncp: &rx_stats->syncp); |
1332 | packets = rx_stats->packets; |
1333 | bytes = rx_stats->bytes; |
1334 | } while (u64_stats_fetch_retry(syncp: &rx_stats->syncp, start)); |
1335 | |
1336 | this_tot->rx_bytes += bytes; |
1337 | this_tot->rx_packets += packets; |
1338 | } |
1339 | } |
1340 | |
1341 | static void netvsc_get_stats64(struct net_device *net, |
1342 | struct rtnl_link_stats64 *t) |
1343 | { |
1344 | struct net_device_context *ndev_ctx = netdev_priv(dev: net); |
1345 | struct netvsc_device *nvdev; |
1346 | struct netvsc_vf_pcpu_stats vf_tot; |
1347 | int i; |
1348 | |
1349 | rcu_read_lock(); |
1350 | |
1351 | nvdev = rcu_dereference(ndev_ctx->nvdev); |
1352 | if (!nvdev) |
1353 | goto out; |
1354 | |
1355 | netdev_stats_to_stats64(stats64: t, netdev_stats: &net->stats); |
1356 | |
1357 | netvsc_get_vf_stats(net, tot: &vf_tot); |
1358 | t->rx_packets += vf_tot.rx_packets; |
1359 | t->tx_packets += vf_tot.tx_packets; |
1360 | t->rx_bytes += vf_tot.rx_bytes; |
1361 | t->tx_bytes += vf_tot.tx_bytes; |
1362 | t->tx_dropped += vf_tot.tx_dropped; |
1363 | |
1364 | for (i = 0; i < nvdev->num_chn; i++) { |
1365 | const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; |
1366 | const struct netvsc_stats_tx *tx_stats; |
1367 | const struct netvsc_stats_rx *rx_stats; |
1368 | u64 packets, bytes, multicast; |
1369 | unsigned int start; |
1370 | |
1371 | tx_stats = &nvchan->tx_stats; |
1372 | do { |
1373 | start = u64_stats_fetch_begin(syncp: &tx_stats->syncp); |
1374 | packets = tx_stats->packets; |
1375 | bytes = tx_stats->bytes; |
1376 | } while (u64_stats_fetch_retry(syncp: &tx_stats->syncp, start)); |
1377 | |
1378 | t->tx_bytes += bytes; |
1379 | t->tx_packets += packets; |
1380 | |
1381 | rx_stats = &nvchan->rx_stats; |
1382 | do { |
1383 | start = u64_stats_fetch_begin(syncp: &rx_stats->syncp); |
1384 | packets = rx_stats->packets; |
1385 | bytes = rx_stats->bytes; |
1386 | multicast = rx_stats->multicast + rx_stats->broadcast; |
1387 | } while (u64_stats_fetch_retry(syncp: &rx_stats->syncp, start)); |
1388 | |
1389 | t->rx_bytes += bytes; |
1390 | t->rx_packets += packets; |
1391 | t->multicast += multicast; |
1392 | } |
1393 | out: |
1394 | rcu_read_unlock(); |
1395 | } |
1396 | |
1397 | static int netvsc_set_mac_addr(struct net_device *ndev, void *p) |
1398 | { |
1399 | struct net_device_context *ndc = netdev_priv(dev: ndev); |
1400 | struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev); |
1401 | struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); |
1402 | struct sockaddr *addr = p; |
1403 | int err; |
1404 | |
1405 | err = eth_prepare_mac_addr_change(dev: ndev, p); |
1406 | if (err) |
1407 | return err; |
1408 | |
1409 | if (!nvdev) |
1410 | return -ENODEV; |
1411 | |
1412 | if (vf_netdev) { |
1413 | err = dev_set_mac_address(dev: vf_netdev, sa: addr, NULL); |
1414 | if (err) |
1415 | return err; |
1416 | } |
1417 | |
1418 | err = rndis_filter_set_device_mac(ndev: nvdev, mac: addr->sa_data); |
1419 | if (!err) { |
1420 | eth_commit_mac_addr_change(dev: ndev, p); |
1421 | } else if (vf_netdev) { |
1422 | /* rollback change on VF */ |
1423 | memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN); |
1424 | dev_set_mac_address(dev: vf_netdev, sa: addr, NULL); |
1425 | } |
1426 | |
1427 | return err; |
1428 | } |
1429 | |
1430 | static const struct { |
1431 | char name[ETH_GSTRING_LEN]; |
1432 | u16 offset; |
1433 | } netvsc_stats[] = { |
1434 | { "tx_scattered" , offsetof(struct netvsc_ethtool_stats, tx_scattered) }, |
1435 | { "tx_no_memory" , offsetof(struct netvsc_ethtool_stats, tx_no_memory) }, |
1436 | { "tx_no_space" , offsetof(struct netvsc_ethtool_stats, tx_no_space) }, |
1437 | { "tx_too_big" , offsetof(struct netvsc_ethtool_stats, tx_too_big) }, |
1438 | { "tx_busy" , offsetof(struct netvsc_ethtool_stats, tx_busy) }, |
1439 | { "tx_send_full" , offsetof(struct netvsc_ethtool_stats, tx_send_full) }, |
1440 | { "rx_comp_busy" , offsetof(struct netvsc_ethtool_stats, rx_comp_busy) }, |
1441 | { "rx_no_memory" , offsetof(struct netvsc_ethtool_stats, rx_no_memory) }, |
1442 | { "stop_queue" , offsetof(struct netvsc_ethtool_stats, stop_queue) }, |
1443 | { "wake_queue" , offsetof(struct netvsc_ethtool_stats, wake_queue) }, |
1444 | { "vlan_error" , offsetof(struct netvsc_ethtool_stats, vlan_error) }, |
1445 | }, pcpu_stats[] = { |
1446 | { "cpu%u_rx_packets" , |
1447 | offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) }, |
1448 | { "cpu%u_rx_bytes" , |
1449 | offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) }, |
1450 | { "cpu%u_tx_packets" , |
1451 | offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) }, |
1452 | { "cpu%u_tx_bytes" , |
1453 | offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) }, |
1454 | { "cpu%u_vf_rx_packets" , |
1455 | offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) }, |
1456 | { "cpu%u_vf_rx_bytes" , |
1457 | offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) }, |
1458 | { "cpu%u_vf_tx_packets" , |
1459 | offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) }, |
1460 | { "cpu%u_vf_tx_bytes" , |
1461 | offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) }, |
1462 | }, vf_stats[] = { |
1463 | { "vf_rx_packets" , offsetof(struct netvsc_vf_pcpu_stats, rx_packets) }, |
1464 | { "vf_rx_bytes" , offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) }, |
1465 | { "vf_tx_packets" , offsetof(struct netvsc_vf_pcpu_stats, tx_packets) }, |
1466 | { "vf_tx_bytes" , offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) }, |
1467 | { "vf_tx_dropped" , offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) }, |
1468 | }; |
1469 | |
1470 | #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats) |
1471 | #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats) |
1472 | |
1473 | /* statistics per queue (rx/tx packets/bytes) */ |
1474 | #define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats)) |
1475 | |
1476 | /* 8 statistics per queue (rx/tx packets/bytes, XDP actions) */ |
1477 | #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 8) |
1478 | |
1479 | static int netvsc_get_sset_count(struct net_device *dev, int string_set) |
1480 | { |
1481 | struct net_device_context *ndc = netdev_priv(dev); |
1482 | struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); |
1483 | |
1484 | if (!nvdev) |
1485 | return -ENODEV; |
1486 | |
1487 | switch (string_set) { |
1488 | case ETH_SS_STATS: |
1489 | return NETVSC_GLOBAL_STATS_LEN |
1490 | + NETVSC_VF_STATS_LEN |
1491 | + NETVSC_QUEUE_STATS_LEN(nvdev) |
1492 | + NETVSC_PCPU_STATS_LEN; |
1493 | default: |
1494 | return -EINVAL; |
1495 | } |
1496 | } |
1497 | |
1498 | static void netvsc_get_ethtool_stats(struct net_device *dev, |
1499 | struct ethtool_stats *stats, u64 *data) |
1500 | { |
1501 | struct net_device_context *ndc = netdev_priv(dev); |
1502 | struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); |
1503 | const void *nds = &ndc->eth_stats; |
1504 | const struct netvsc_stats_tx *tx_stats; |
1505 | const struct netvsc_stats_rx *rx_stats; |
1506 | struct netvsc_vf_pcpu_stats sum; |
1507 | struct netvsc_ethtool_pcpu_stats *pcpu_sum; |
1508 | unsigned int start; |
1509 | u64 packets, bytes; |
1510 | u64 xdp_drop; |
1511 | u64 xdp_redirect; |
1512 | u64 xdp_tx; |
1513 | u64 xdp_xmit; |
1514 | int i, j, cpu; |
1515 | |
1516 | if (!nvdev) |
1517 | return; |
1518 | |
1519 | for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++) |
1520 | data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); |
1521 | |
1522 | netvsc_get_vf_stats(net: dev, tot: &sum); |
1523 | for (j = 0; j < NETVSC_VF_STATS_LEN; j++) |
1524 | data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset); |
1525 | |
1526 | for (j = 0; j < nvdev->num_chn; j++) { |
1527 | tx_stats = &nvdev->chan_table[j].tx_stats; |
1528 | |
1529 | do { |
1530 | start = u64_stats_fetch_begin(syncp: &tx_stats->syncp); |
1531 | packets = tx_stats->packets; |
1532 | bytes = tx_stats->bytes; |
1533 | xdp_xmit = tx_stats->xdp_xmit; |
1534 | } while (u64_stats_fetch_retry(syncp: &tx_stats->syncp, start)); |
1535 | data[i++] = packets; |
1536 | data[i++] = bytes; |
1537 | data[i++] = xdp_xmit; |
1538 | |
1539 | rx_stats = &nvdev->chan_table[j].rx_stats; |
1540 | do { |
1541 | start = u64_stats_fetch_begin(syncp: &rx_stats->syncp); |
1542 | packets = rx_stats->packets; |
1543 | bytes = rx_stats->bytes; |
1544 | xdp_drop = rx_stats->xdp_drop; |
1545 | xdp_redirect = rx_stats->xdp_redirect; |
1546 | xdp_tx = rx_stats->xdp_tx; |
1547 | } while (u64_stats_fetch_retry(syncp: &rx_stats->syncp, start)); |
1548 | data[i++] = packets; |
1549 | data[i++] = bytes; |
1550 | data[i++] = xdp_drop; |
1551 | data[i++] = xdp_redirect; |
1552 | data[i++] = xdp_tx; |
1553 | } |
1554 | |
1555 | pcpu_sum = kvmalloc_array(num_possible_cpus(), |
1556 | size: sizeof(struct netvsc_ethtool_pcpu_stats), |
1557 | GFP_KERNEL); |
1558 | if (!pcpu_sum) |
1559 | return; |
1560 | |
1561 | netvsc_get_pcpu_stats(net: dev, pcpu_tot: pcpu_sum); |
1562 | for_each_present_cpu(cpu) { |
1563 | struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu]; |
1564 | |
1565 | for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++) |
1566 | data[i++] = *(u64 *)((void *)this_sum |
1567 | + pcpu_stats[j].offset); |
1568 | } |
1569 | kvfree(addr: pcpu_sum); |
1570 | } |
1571 | |
1572 | static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
1573 | { |
1574 | struct net_device_context *ndc = netdev_priv(dev); |
1575 | struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); |
1576 | u8 *p = data; |
1577 | int i, cpu; |
1578 | |
1579 | if (!nvdev) |
1580 | return; |
1581 | |
1582 | switch (stringset) { |
1583 | case ETH_SS_STATS: |
1584 | for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) |
1585 | ethtool_sprintf(data: &p, fmt: netvsc_stats[i].name); |
1586 | |
1587 | for (i = 0; i < ARRAY_SIZE(vf_stats); i++) |
1588 | ethtool_sprintf(data: &p, fmt: vf_stats[i].name); |
1589 | |
1590 | for (i = 0; i < nvdev->num_chn; i++) { |
1591 | ethtool_sprintf(data: &p, fmt: "tx_queue_%u_packets" , i); |
1592 | ethtool_sprintf(data: &p, fmt: "tx_queue_%u_bytes" , i); |
1593 | ethtool_sprintf(data: &p, fmt: "tx_queue_%u_xdp_xmit" , i); |
1594 | ethtool_sprintf(data: &p, fmt: "rx_queue_%u_packets" , i); |
1595 | ethtool_sprintf(data: &p, fmt: "rx_queue_%u_bytes" , i); |
1596 | ethtool_sprintf(data: &p, fmt: "rx_queue_%u_xdp_drop" , i); |
1597 | ethtool_sprintf(data: &p, fmt: "rx_queue_%u_xdp_redirect" , i); |
1598 | ethtool_sprintf(data: &p, fmt: "rx_queue_%u_xdp_tx" , i); |
1599 | } |
1600 | |
1601 | for_each_present_cpu(cpu) { |
1602 | for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) |
1603 | ethtool_sprintf(data: &p, fmt: pcpu_stats[i].name, cpu); |
1604 | } |
1605 | |
1606 | break; |
1607 | } |
1608 | } |
1609 | |
1610 | static int |
1611 | (struct net_device_context *ndc, |
1612 | struct ethtool_rxnfc *info) |
1613 | { |
1614 | const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3; |
1615 | |
1616 | info->data = RXH_IP_SRC | RXH_IP_DST; |
1617 | |
1618 | switch (info->flow_type) { |
1619 | case TCP_V4_FLOW: |
1620 | if (ndc->l4_hash & HV_TCP4_L4HASH) |
1621 | info->data |= l4_flag; |
1622 | |
1623 | break; |
1624 | |
1625 | case TCP_V6_FLOW: |
1626 | if (ndc->l4_hash & HV_TCP6_L4HASH) |
1627 | info->data |= l4_flag; |
1628 | |
1629 | break; |
1630 | |
1631 | case UDP_V4_FLOW: |
1632 | if (ndc->l4_hash & HV_UDP4_L4HASH) |
1633 | info->data |= l4_flag; |
1634 | |
1635 | break; |
1636 | |
1637 | case UDP_V6_FLOW: |
1638 | if (ndc->l4_hash & HV_UDP6_L4HASH) |
1639 | info->data |= l4_flag; |
1640 | |
1641 | break; |
1642 | |
1643 | case IPV4_FLOW: |
1644 | case IPV6_FLOW: |
1645 | break; |
1646 | default: |
1647 | info->data = 0; |
1648 | break; |
1649 | } |
1650 | |
1651 | return 0; |
1652 | } |
1653 | |
1654 | static int |
1655 | netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, |
1656 | u32 *rules) |
1657 | { |
1658 | struct net_device_context *ndc = netdev_priv(dev); |
1659 | struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); |
1660 | |
1661 | if (!nvdev) |
1662 | return -ENODEV; |
1663 | |
1664 | switch (info->cmd) { |
1665 | case ETHTOOL_GRXRINGS: |
1666 | info->data = nvdev->num_chn; |
1667 | return 0; |
1668 | |
1669 | case ETHTOOL_GRXFH: |
1670 | return netvsc_get_rss_hash_opts(ndc, info); |
1671 | } |
1672 | return -EOPNOTSUPP; |
1673 | } |
1674 | |
1675 | static int (struct net_device_context *ndc, |
1676 | struct ethtool_rxnfc *info) |
1677 | { |
1678 | if (info->data == (RXH_IP_SRC | RXH_IP_DST | |
1679 | RXH_L4_B_0_1 | RXH_L4_B_2_3)) { |
1680 | switch (info->flow_type) { |
1681 | case TCP_V4_FLOW: |
1682 | ndc->l4_hash |= HV_TCP4_L4HASH; |
1683 | break; |
1684 | |
1685 | case TCP_V6_FLOW: |
1686 | ndc->l4_hash |= HV_TCP6_L4HASH; |
1687 | break; |
1688 | |
1689 | case UDP_V4_FLOW: |
1690 | ndc->l4_hash |= HV_UDP4_L4HASH; |
1691 | break; |
1692 | |
1693 | case UDP_V6_FLOW: |
1694 | ndc->l4_hash |= HV_UDP6_L4HASH; |
1695 | break; |
1696 | |
1697 | default: |
1698 | return -EOPNOTSUPP; |
1699 | } |
1700 | |
1701 | return 0; |
1702 | } |
1703 | |
1704 | if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { |
1705 | switch (info->flow_type) { |
1706 | case TCP_V4_FLOW: |
1707 | ndc->l4_hash &= ~HV_TCP4_L4HASH; |
1708 | break; |
1709 | |
1710 | case TCP_V6_FLOW: |
1711 | ndc->l4_hash &= ~HV_TCP6_L4HASH; |
1712 | break; |
1713 | |
1714 | case UDP_V4_FLOW: |
1715 | ndc->l4_hash &= ~HV_UDP4_L4HASH; |
1716 | break; |
1717 | |
1718 | case UDP_V6_FLOW: |
1719 | ndc->l4_hash &= ~HV_UDP6_L4HASH; |
1720 | break; |
1721 | |
1722 | default: |
1723 | return -EOPNOTSUPP; |
1724 | } |
1725 | |
1726 | return 0; |
1727 | } |
1728 | |
1729 | return -EOPNOTSUPP; |
1730 | } |
1731 | |
1732 | static int |
1733 | netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info) |
1734 | { |
1735 | struct net_device_context *ndc = netdev_priv(dev: ndev); |
1736 | |
1737 | if (info->cmd == ETHTOOL_SRXFH) |
1738 | return netvsc_set_rss_hash_opts(ndc, info); |
1739 | |
1740 | return -EOPNOTSUPP; |
1741 | } |
1742 | |
1743 | static u32 netvsc_get_rxfh_key_size(struct net_device *dev) |
1744 | { |
1745 | return NETVSC_HASH_KEYLEN; |
1746 | } |
1747 | |
1748 | static u32 (struct net_device *dev) |
1749 | { |
1750 | struct net_device_context *ndc = netdev_priv(dev); |
1751 | |
1752 | return ndc->rx_table_sz; |
1753 | } |
1754 | |
1755 | static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, |
1756 | u8 *hfunc) |
1757 | { |
1758 | struct net_device_context *ndc = netdev_priv(dev); |
1759 | struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); |
1760 | struct rndis_device *rndis_dev; |
1761 | int i; |
1762 | |
1763 | if (!ndev) |
1764 | return -ENODEV; |
1765 | |
1766 | if (hfunc) |
1767 | *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ |
1768 | |
1769 | rndis_dev = ndev->extension; |
1770 | if (indir) { |
1771 | for (i = 0; i < ndc->rx_table_sz; i++) |
1772 | indir[i] = ndc->rx_table[i]; |
1773 | } |
1774 | |
1775 | if (key) |
1776 | memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN); |
1777 | |
1778 | return 0; |
1779 | } |
1780 | |
1781 | static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, |
1782 | const u8 *key, const u8 hfunc) |
1783 | { |
1784 | struct net_device_context *ndc = netdev_priv(dev); |
1785 | struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); |
1786 | struct rndis_device *rndis_dev; |
1787 | int i; |
1788 | |
1789 | if (!ndev) |
1790 | return -ENODEV; |
1791 | |
1792 | if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) |
1793 | return -EOPNOTSUPP; |
1794 | |
1795 | rndis_dev = ndev->extension; |
1796 | if (indir) { |
1797 | for (i = 0; i < ndc->rx_table_sz; i++) |
1798 | if (indir[i] >= ndev->num_chn) |
1799 | return -EINVAL; |
1800 | |
1801 | for (i = 0; i < ndc->rx_table_sz; i++) |
1802 | ndc->rx_table[i] = indir[i]; |
1803 | } |
1804 | |
1805 | if (!key) { |
1806 | if (!indir) |
1807 | return 0; |
1808 | |
1809 | key = rndis_dev->rss_key; |
1810 | } |
1811 | |
1812 | return rndis_filter_set_rss_param(rdev: rndis_dev, key); |
1813 | } |
1814 | |
1815 | /* Hyper-V RNDIS protocol does not have ring in the HW sense. |
1816 | * It does have pre-allocated receive area which is divided into sections. |
1817 | */ |
1818 | static void __netvsc_get_ringparam(struct netvsc_device *nvdev, |
1819 | struct ethtool_ringparam *ring) |
1820 | { |
1821 | u32 max_buf_size; |
1822 | |
1823 | ring->rx_pending = nvdev->recv_section_cnt; |
1824 | ring->tx_pending = nvdev->send_section_cnt; |
1825 | |
1826 | if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2) |
1827 | max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; |
1828 | else |
1829 | max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; |
1830 | |
1831 | ring->rx_max_pending = max_buf_size / nvdev->recv_section_size; |
1832 | ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE |
1833 | / nvdev->send_section_size; |
1834 | } |
1835 | |
1836 | static void netvsc_get_ringparam(struct net_device *ndev, |
1837 | struct ethtool_ringparam *ring, |
1838 | struct kernel_ethtool_ringparam *kernel_ring, |
1839 | struct netlink_ext_ack *extack) |
1840 | { |
1841 | struct net_device_context *ndevctx = netdev_priv(dev: ndev); |
1842 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
1843 | |
1844 | if (!nvdev) |
1845 | return; |
1846 | |
1847 | __netvsc_get_ringparam(nvdev, ring); |
1848 | } |
1849 | |
1850 | static int netvsc_set_ringparam(struct net_device *ndev, |
1851 | struct ethtool_ringparam *ring, |
1852 | struct kernel_ethtool_ringparam *kernel_ring, |
1853 | struct netlink_ext_ack *extack) |
1854 | { |
1855 | struct net_device_context *ndevctx = netdev_priv(dev: ndev); |
1856 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
1857 | struct netvsc_device_info *device_info; |
1858 | struct ethtool_ringparam orig; |
1859 | u32 new_tx, new_rx; |
1860 | int ret = 0; |
1861 | |
1862 | if (!nvdev || nvdev->destroy) |
1863 | return -ENODEV; |
1864 | |
1865 | memset(&orig, 0, sizeof(orig)); |
1866 | __netvsc_get_ringparam(nvdev, ring: &orig); |
1867 | |
1868 | new_tx = clamp_t(u32, ring->tx_pending, |
1869 | NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending); |
1870 | new_rx = clamp_t(u32, ring->rx_pending, |
1871 | NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending); |
1872 | |
1873 | if (new_tx == orig.tx_pending && |
1874 | new_rx == orig.rx_pending) |
1875 | return 0; /* no change */ |
1876 | |
1877 | device_info = netvsc_devinfo_get(nvdev); |
1878 | |
1879 | if (!device_info) |
1880 | return -ENOMEM; |
1881 | |
1882 | device_info->send_sections = new_tx; |
1883 | device_info->recv_sections = new_rx; |
1884 | |
1885 | ret = netvsc_detach(ndev, nvdev); |
1886 | if (ret) |
1887 | goto out; |
1888 | |
1889 | ret = netvsc_attach(ndev, dev_info: device_info); |
1890 | if (ret) { |
1891 | device_info->send_sections = orig.tx_pending; |
1892 | device_info->recv_sections = orig.rx_pending; |
1893 | |
1894 | if (netvsc_attach(ndev, dev_info: device_info)) |
1895 | netdev_err(dev: ndev, format: "restoring ringparam failed" ); |
1896 | } |
1897 | |
1898 | out: |
1899 | netvsc_devinfo_put(dev_info: device_info); |
1900 | return ret; |
1901 | } |
1902 | |
1903 | static netdev_features_t netvsc_fix_features(struct net_device *ndev, |
1904 | netdev_features_t features) |
1905 | { |
1906 | struct net_device_context *ndevctx = netdev_priv(dev: ndev); |
1907 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
1908 | |
1909 | if (!nvdev || nvdev->destroy) |
1910 | return features; |
1911 | |
1912 | if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) { |
1913 | features ^= NETIF_F_LRO; |
1914 | netdev_info(dev: ndev, format: "Skip LRO - unsupported with XDP\n" ); |
1915 | } |
1916 | |
1917 | return features; |
1918 | } |
1919 | |
1920 | static int netvsc_set_features(struct net_device *ndev, |
1921 | netdev_features_t features) |
1922 | { |
1923 | netdev_features_t change = features ^ ndev->features; |
1924 | struct net_device_context *ndevctx = netdev_priv(dev: ndev); |
1925 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
1926 | struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); |
1927 | struct ndis_offload_params offloads; |
1928 | int ret = 0; |
1929 | |
1930 | if (!nvdev || nvdev->destroy) |
1931 | return -ENODEV; |
1932 | |
1933 | if (!(change & NETIF_F_LRO)) |
1934 | goto syncvf; |
1935 | |
1936 | memset(&offloads, 0, sizeof(struct ndis_offload_params)); |
1937 | |
1938 | if (features & NETIF_F_LRO) { |
1939 | offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED; |
1940 | offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED; |
1941 | } else { |
1942 | offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED; |
1943 | offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED; |
1944 | } |
1945 | |
1946 | ret = rndis_filter_set_offload_params(ndev, nvdev, req_offloads: &offloads); |
1947 | |
1948 | if (ret) { |
1949 | features ^= NETIF_F_LRO; |
1950 | ndev->features = features; |
1951 | } |
1952 | |
1953 | syncvf: |
1954 | if (!vf_netdev) |
1955 | return ret; |
1956 | |
1957 | vf_netdev->wanted_features = features; |
1958 | netdev_update_features(dev: vf_netdev); |
1959 | |
1960 | return ret; |
1961 | } |
1962 | |
1963 | static int netvsc_get_regs_len(struct net_device *netdev) |
1964 | { |
1965 | return VRSS_SEND_TAB_SIZE * sizeof(u32); |
1966 | } |
1967 | |
1968 | static void netvsc_get_regs(struct net_device *netdev, |
1969 | struct ethtool_regs *regs, void *p) |
1970 | { |
1971 | struct net_device_context *ndc = netdev_priv(dev: netdev); |
1972 | u32 *regs_buff = p; |
1973 | |
1974 | /* increase the version, if buffer format is changed. */ |
1975 | regs->version = 1; |
1976 | |
1977 | memcpy(regs_buff, ndc->tx_table, VRSS_SEND_TAB_SIZE * sizeof(u32)); |
1978 | } |
1979 | |
1980 | static u32 netvsc_get_msglevel(struct net_device *ndev) |
1981 | { |
1982 | struct net_device_context *ndev_ctx = netdev_priv(dev: ndev); |
1983 | |
1984 | return ndev_ctx->msg_enable; |
1985 | } |
1986 | |
1987 | static void netvsc_set_msglevel(struct net_device *ndev, u32 val) |
1988 | { |
1989 | struct net_device_context *ndev_ctx = netdev_priv(dev: ndev); |
1990 | |
1991 | ndev_ctx->msg_enable = val; |
1992 | } |
1993 | |
1994 | static const struct ethtool_ops ethtool_ops = { |
1995 | .get_drvinfo = netvsc_get_drvinfo, |
1996 | .get_regs_len = netvsc_get_regs_len, |
1997 | .get_regs = netvsc_get_regs, |
1998 | .get_msglevel = netvsc_get_msglevel, |
1999 | .set_msglevel = netvsc_set_msglevel, |
2000 | .get_link = ethtool_op_get_link, |
2001 | .get_ethtool_stats = netvsc_get_ethtool_stats, |
2002 | .get_sset_count = netvsc_get_sset_count, |
2003 | .get_strings = netvsc_get_strings, |
2004 | .get_channels = netvsc_get_channels, |
2005 | .set_channels = netvsc_set_channels, |
2006 | .get_ts_info = ethtool_op_get_ts_info, |
2007 | .get_rxnfc = netvsc_get_rxnfc, |
2008 | .set_rxnfc = netvsc_set_rxnfc, |
2009 | .get_rxfh_key_size = netvsc_get_rxfh_key_size, |
2010 | .get_rxfh_indir_size = netvsc_rss_indir_size, |
2011 | .get_rxfh = netvsc_get_rxfh, |
2012 | .set_rxfh = netvsc_set_rxfh, |
2013 | .get_link_ksettings = netvsc_get_link_ksettings, |
2014 | .set_link_ksettings = netvsc_set_link_ksettings, |
2015 | .get_ringparam = netvsc_get_ringparam, |
2016 | .set_ringparam = netvsc_set_ringparam, |
2017 | }; |
2018 | |
2019 | static const struct net_device_ops device_ops = { |
2020 | .ndo_open = netvsc_open, |
2021 | .ndo_stop = netvsc_close, |
2022 | .ndo_start_xmit = netvsc_start_xmit, |
2023 | .ndo_change_rx_flags = netvsc_change_rx_flags, |
2024 | .ndo_set_rx_mode = netvsc_set_rx_mode, |
2025 | .ndo_fix_features = netvsc_fix_features, |
2026 | .ndo_set_features = netvsc_set_features, |
2027 | .ndo_change_mtu = netvsc_change_mtu, |
2028 | .ndo_validate_addr = eth_validate_addr, |
2029 | .ndo_set_mac_address = netvsc_set_mac_addr, |
2030 | .ndo_select_queue = netvsc_select_queue, |
2031 | .ndo_get_stats64 = netvsc_get_stats64, |
2032 | .ndo_bpf = netvsc_bpf, |
2033 | .ndo_xdp_xmit = netvsc_ndoxdp_xmit, |
2034 | }; |
2035 | |
2036 | /* |
2037 | * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link |
2038 | * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is |
2039 | * present send GARP packet to network peers with netif_notify_peers(). |
2040 | */ |
2041 | static void netvsc_link_change(struct work_struct *w) |
2042 | { |
2043 | struct net_device_context *ndev_ctx = |
2044 | container_of(w, struct net_device_context, dwork.work); |
2045 | struct hv_device *device_obj = ndev_ctx->device_ctx; |
2046 | struct net_device *net = hv_get_drvdata(dev: device_obj); |
2047 | unsigned long flags, next_reconfig, delay; |
2048 | struct netvsc_reconfig *event = NULL; |
2049 | struct netvsc_device *net_device; |
2050 | struct rndis_device *rdev; |
2051 | bool reschedule = false; |
2052 | |
2053 | /* if changes are happening, comeback later */ |
2054 | if (!rtnl_trylock()) { |
2055 | schedule_delayed_work(dwork: &ndev_ctx->dwork, LINKCHANGE_INT); |
2056 | return; |
2057 | } |
2058 | |
2059 | net_device = rtnl_dereference(ndev_ctx->nvdev); |
2060 | if (!net_device) |
2061 | goto out_unlock; |
2062 | |
2063 | rdev = net_device->extension; |
2064 | |
2065 | next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT; |
2066 | if (time_is_after_jiffies(next_reconfig)) { |
2067 | /* link_watch only sends one notification with current state |
2068 | * per second, avoid doing reconfig more frequently. Handle |
2069 | * wrap around. |
2070 | */ |
2071 | delay = next_reconfig - jiffies; |
2072 | delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT; |
2073 | schedule_delayed_work(dwork: &ndev_ctx->dwork, delay); |
2074 | goto out_unlock; |
2075 | } |
2076 | ndev_ctx->last_reconfig = jiffies; |
2077 | |
2078 | spin_lock_irqsave(&ndev_ctx->lock, flags); |
2079 | if (!list_empty(head: &ndev_ctx->reconfig_events)) { |
2080 | event = list_first_entry(&ndev_ctx->reconfig_events, |
2081 | struct netvsc_reconfig, list); |
2082 | list_del(entry: &event->list); |
2083 | reschedule = !list_empty(head: &ndev_ctx->reconfig_events); |
2084 | } |
2085 | spin_unlock_irqrestore(lock: &ndev_ctx->lock, flags); |
2086 | |
2087 | if (!event) |
2088 | goto out_unlock; |
2089 | |
2090 | switch (event->event) { |
2091 | /* Only the following events are possible due to the check in |
2092 | * netvsc_linkstatus_callback() |
2093 | */ |
2094 | case RNDIS_STATUS_MEDIA_CONNECT: |
2095 | if (rdev->link_state) { |
2096 | rdev->link_state = false; |
2097 | netif_carrier_on(dev: net); |
2098 | netvsc_tx_enable(nvscdev: net_device, ndev: net); |
2099 | } else { |
2100 | __netdev_notify_peers(dev: net); |
2101 | } |
2102 | kfree(objp: event); |
2103 | break; |
2104 | case RNDIS_STATUS_MEDIA_DISCONNECT: |
2105 | if (!rdev->link_state) { |
2106 | rdev->link_state = true; |
2107 | netif_carrier_off(dev: net); |
2108 | netvsc_tx_disable(nvscdev: net_device, ndev: net); |
2109 | } |
2110 | kfree(objp: event); |
2111 | break; |
2112 | case RNDIS_STATUS_NETWORK_CHANGE: |
2113 | /* Only makes sense if carrier is present */ |
2114 | if (!rdev->link_state) { |
2115 | rdev->link_state = true; |
2116 | netif_carrier_off(dev: net); |
2117 | netvsc_tx_disable(nvscdev: net_device, ndev: net); |
2118 | event->event = RNDIS_STATUS_MEDIA_CONNECT; |
2119 | spin_lock_irqsave(&ndev_ctx->lock, flags); |
2120 | list_add(new: &event->list, head: &ndev_ctx->reconfig_events); |
2121 | spin_unlock_irqrestore(lock: &ndev_ctx->lock, flags); |
2122 | reschedule = true; |
2123 | } |
2124 | break; |
2125 | } |
2126 | |
2127 | rtnl_unlock(); |
2128 | |
2129 | /* link_watch only sends one notification with current state per |
2130 | * second, handle next reconfig event in 2 seconds. |
2131 | */ |
2132 | if (reschedule) |
2133 | schedule_delayed_work(dwork: &ndev_ctx->dwork, LINKCHANGE_INT); |
2134 | |
2135 | return; |
2136 | |
2137 | out_unlock: |
2138 | rtnl_unlock(); |
2139 | } |
2140 | |
2141 | static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) |
2142 | { |
2143 | struct net_device_context *net_device_ctx; |
2144 | struct net_device *dev; |
2145 | |
2146 | dev = netdev_master_upper_dev_get(dev: vf_netdev); |
2147 | if (!dev || dev->netdev_ops != &device_ops) |
2148 | return NULL; /* not a netvsc device */ |
2149 | |
2150 | net_device_ctx = netdev_priv(dev); |
2151 | if (!rtnl_dereference(net_device_ctx->nvdev)) |
2152 | return NULL; /* device is removed */ |
2153 | |
2154 | return dev; |
2155 | } |
2156 | |
2157 | /* Called when VF is injecting data into network stack. |
2158 | * Change the associated network device from VF to netvsc. |
2159 | * note: already called with rcu_read_lock |
2160 | */ |
2161 | static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) |
2162 | { |
2163 | struct sk_buff *skb = *pskb; |
2164 | struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data); |
2165 | struct net_device_context *ndev_ctx = netdev_priv(dev: ndev); |
2166 | struct netvsc_vf_pcpu_stats *pcpu_stats |
2167 | = this_cpu_ptr(ndev_ctx->vf_stats); |
2168 | |
2169 | skb = skb_share_check(skb, GFP_ATOMIC); |
2170 | if (unlikely(!skb)) |
2171 | return RX_HANDLER_CONSUMED; |
2172 | |
2173 | *pskb = skb; |
2174 | |
2175 | skb->dev = ndev; |
2176 | |
2177 | u64_stats_update_begin(syncp: &pcpu_stats->syncp); |
2178 | pcpu_stats->rx_packets++; |
2179 | pcpu_stats->rx_bytes += skb->len; |
2180 | u64_stats_update_end(syncp: &pcpu_stats->syncp); |
2181 | |
2182 | return RX_HANDLER_ANOTHER; |
2183 | } |
2184 | |
2185 | static int netvsc_vf_join(struct net_device *vf_netdev, |
2186 | struct net_device *ndev) |
2187 | { |
2188 | struct net_device_context *ndev_ctx = netdev_priv(dev: ndev); |
2189 | int ret; |
2190 | |
2191 | ret = netdev_rx_handler_register(dev: vf_netdev, |
2192 | rx_handler: netvsc_vf_handle_frame, rx_handler_data: ndev); |
2193 | if (ret != 0) { |
2194 | netdev_err(dev: vf_netdev, |
2195 | format: "can not register netvsc VF receive handler (err = %d)\n" , |
2196 | ret); |
2197 | goto rx_handler_failed; |
2198 | } |
2199 | |
2200 | ret = netdev_master_upper_dev_link(dev: vf_netdev, upper_dev: ndev, |
2201 | NULL, NULL, NULL); |
2202 | if (ret != 0) { |
2203 | netdev_err(dev: vf_netdev, |
2204 | format: "can not set master device %s (err = %d)\n" , |
2205 | ndev->name, ret); |
2206 | goto upper_link_failed; |
2207 | } |
2208 | |
2209 | /* set slave flag before open to prevent IPv6 addrconf */ |
2210 | vf_netdev->flags |= IFF_SLAVE; |
2211 | |
2212 | schedule_delayed_work(dwork: &ndev_ctx->vf_takeover, VF_TAKEOVER_INT); |
2213 | |
2214 | call_netdevice_notifiers(val: NETDEV_JOIN, dev: vf_netdev); |
2215 | |
2216 | netdev_info(dev: vf_netdev, format: "joined to %s\n" , ndev->name); |
2217 | return 0; |
2218 | |
2219 | upper_link_failed: |
2220 | netdev_rx_handler_unregister(dev: vf_netdev); |
2221 | rx_handler_failed: |
2222 | return ret; |
2223 | } |
2224 | |
2225 | static void __netvsc_vf_setup(struct net_device *ndev, |
2226 | struct net_device *vf_netdev) |
2227 | { |
2228 | int ret; |
2229 | |
2230 | /* Align MTU of VF with master */ |
2231 | ret = dev_set_mtu(vf_netdev, ndev->mtu); |
2232 | if (ret) |
2233 | netdev_warn(dev: vf_netdev, |
2234 | format: "unable to change mtu to %u\n" , ndev->mtu); |
2235 | |
2236 | /* set multicast etc flags on VF */ |
2237 | dev_change_flags(dev: vf_netdev, flags: ndev->flags | IFF_SLAVE, NULL); |
2238 | |
2239 | /* sync address list from ndev to VF */ |
2240 | netif_addr_lock_bh(dev: ndev); |
2241 | dev_uc_sync(to: vf_netdev, from: ndev); |
2242 | dev_mc_sync(to: vf_netdev, from: ndev); |
2243 | netif_addr_unlock_bh(dev: ndev); |
2244 | |
2245 | if (netif_running(dev: ndev)) { |
2246 | ret = dev_open(dev: vf_netdev, NULL); |
2247 | if (ret) |
2248 | netdev_warn(dev: vf_netdev, |
2249 | format: "unable to open: %d\n" , ret); |
2250 | } |
2251 | } |
2252 | |
2253 | /* Setup VF as slave of the synthetic device. |
2254 | * Runs in workqueue to avoid recursion in netlink callbacks. |
2255 | */ |
2256 | static void netvsc_vf_setup(struct work_struct *w) |
2257 | { |
2258 | struct net_device_context *ndev_ctx |
2259 | = container_of(w, struct net_device_context, vf_takeover.work); |
2260 | struct net_device *ndev = hv_get_drvdata(dev: ndev_ctx->device_ctx); |
2261 | struct net_device *vf_netdev; |
2262 | |
2263 | if (!rtnl_trylock()) { |
2264 | schedule_delayed_work(dwork: &ndev_ctx->vf_takeover, delay: 0); |
2265 | return; |
2266 | } |
2267 | |
2268 | vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); |
2269 | if (vf_netdev) |
2270 | __netvsc_vf_setup(ndev, vf_netdev); |
2271 | |
2272 | rtnl_unlock(); |
2273 | } |
2274 | |
2275 | /* Find netvsc by VF serial number. |
2276 | * The PCI hyperv controller records the serial number as the slot kobj name. |
2277 | */ |
2278 | static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) |
2279 | { |
2280 | struct device *parent = vf_netdev->dev.parent; |
2281 | struct net_device_context *ndev_ctx; |
2282 | struct net_device *ndev; |
2283 | struct pci_dev *pdev; |
2284 | u32 serial; |
2285 | |
2286 | if (!parent || !dev_is_pci(parent)) |
2287 | return NULL; /* not a PCI device */ |
2288 | |
2289 | pdev = to_pci_dev(parent); |
2290 | if (!pdev->slot) { |
2291 | netdev_notice(dev: vf_netdev, format: "no PCI slot information\n" ); |
2292 | return NULL; |
2293 | } |
2294 | |
2295 | if (kstrtou32(s: pci_slot_name(slot: pdev->slot), base: 10, res: &serial)) { |
2296 | netdev_notice(dev: vf_netdev, format: "Invalid vf serial:%s\n" , |
2297 | pci_slot_name(slot: pdev->slot)); |
2298 | return NULL; |
2299 | } |
2300 | |
2301 | list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { |
2302 | if (!ndev_ctx->vf_alloc) |
2303 | continue; |
2304 | |
2305 | if (ndev_ctx->vf_serial != serial) |
2306 | continue; |
2307 | |
2308 | ndev = hv_get_drvdata(dev: ndev_ctx->device_ctx); |
2309 | if (ndev->addr_len != vf_netdev->addr_len || |
2310 | memcmp(p: ndev->perm_addr, q: vf_netdev->perm_addr, |
2311 | size: ndev->addr_len) != 0) |
2312 | continue; |
2313 | |
2314 | return ndev; |
2315 | |
2316 | } |
2317 | |
2318 | /* Fallback path to check synthetic vf with |
2319 | * help of mac addr |
2320 | */ |
2321 | list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { |
2322 | ndev = hv_get_drvdata(dev: ndev_ctx->device_ctx); |
2323 | if (ether_addr_equal(addr1: vf_netdev->perm_addr, addr2: ndev->perm_addr)) { |
2324 | netdev_notice(dev: vf_netdev, |
2325 | format: "falling back to mac addr based matching\n" ); |
2326 | return ndev; |
2327 | } |
2328 | } |
2329 | |
2330 | netdev_notice(dev: vf_netdev, |
2331 | format: "no netdev found for vf serial:%u\n" , serial); |
2332 | return NULL; |
2333 | } |
2334 | |
2335 | static int netvsc_register_vf(struct net_device *vf_netdev) |
2336 | { |
2337 | struct net_device_context *net_device_ctx; |
2338 | struct netvsc_device *netvsc_dev; |
2339 | struct bpf_prog *prog; |
2340 | struct net_device *ndev; |
2341 | int ret; |
2342 | |
2343 | if (vf_netdev->addr_len != ETH_ALEN) |
2344 | return NOTIFY_DONE; |
2345 | |
2346 | ndev = get_netvsc_byslot(vf_netdev); |
2347 | if (!ndev) |
2348 | return NOTIFY_DONE; |
2349 | |
2350 | net_device_ctx = netdev_priv(dev: ndev); |
2351 | netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); |
2352 | if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) |
2353 | return NOTIFY_DONE; |
2354 | |
2355 | /* if synthetic interface is a different namespace, |
2356 | * then move the VF to that namespace; join will be |
2357 | * done again in that context. |
2358 | */ |
2359 | if (!net_eq(net1: dev_net(dev: ndev), net2: dev_net(dev: vf_netdev))) { |
2360 | ret = dev_change_net_namespace(dev: vf_netdev, |
2361 | net: dev_net(dev: ndev), pat: "eth%d" ); |
2362 | if (ret) |
2363 | netdev_err(dev: vf_netdev, |
2364 | format: "could not move to same namespace as %s: %d\n" , |
2365 | ndev->name, ret); |
2366 | else |
2367 | netdev_info(dev: vf_netdev, |
2368 | format: "VF moved to namespace with: %s\n" , |
2369 | ndev->name); |
2370 | return NOTIFY_DONE; |
2371 | } |
2372 | |
2373 | netdev_info(dev: ndev, format: "VF registering: %s\n" , vf_netdev->name); |
2374 | |
2375 | if (netvsc_vf_join(vf_netdev, ndev) != 0) |
2376 | return NOTIFY_DONE; |
2377 | |
2378 | dev_hold(dev: vf_netdev); |
2379 | rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev); |
2380 | |
2381 | if (ndev->needed_headroom < vf_netdev->needed_headroom) |
2382 | ndev->needed_headroom = vf_netdev->needed_headroom; |
2383 | |
2384 | vf_netdev->wanted_features = ndev->features; |
2385 | netdev_update_features(dev: vf_netdev); |
2386 | |
2387 | prog = netvsc_xdp_get(nvdev: netvsc_dev); |
2388 | netvsc_vf_setxdp(vf_netdev, prog); |
2389 | |
2390 | return NOTIFY_OK; |
2391 | } |
2392 | |
2393 | /* Change the data path when VF UP/DOWN/CHANGE are detected. |
2394 | * |
2395 | * Typically a UP or DOWN event is followed by a CHANGE event, so |
2396 | * net_device_ctx->data_path_is_vf is used to cache the current data path |
2397 | * to avoid the duplicate call of netvsc_switch_datapath() and the duplicate |
2398 | * message. |
2399 | * |
2400 | * During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network |
2401 | * interface, there is only the CHANGE event and no UP or DOWN event. |
2402 | */ |
2403 | static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event) |
2404 | { |
2405 | struct net_device_context *net_device_ctx; |
2406 | struct netvsc_device *netvsc_dev; |
2407 | struct net_device *ndev; |
2408 | bool vf_is_up = false; |
2409 | int ret; |
2410 | |
2411 | if (event != NETDEV_GOING_DOWN) |
2412 | vf_is_up = netif_running(dev: vf_netdev); |
2413 | |
2414 | ndev = get_netvsc_byref(vf_netdev); |
2415 | if (!ndev) |
2416 | return NOTIFY_DONE; |
2417 | |
2418 | net_device_ctx = netdev_priv(dev: ndev); |
2419 | netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); |
2420 | if (!netvsc_dev) |
2421 | return NOTIFY_DONE; |
2422 | |
2423 | if (net_device_ctx->data_path_is_vf == vf_is_up) |
2424 | return NOTIFY_OK; |
2425 | |
2426 | if (vf_is_up && !net_device_ctx->vf_alloc) { |
2427 | netdev_info(dev: ndev, format: "Waiting for the VF association from host\n" ); |
2428 | wait_for_completion(&net_device_ctx->vf_add); |
2429 | } |
2430 | |
2431 | ret = netvsc_switch_datapath(nv_dev: ndev, vf: vf_is_up); |
2432 | |
2433 | if (ret) { |
2434 | netdev_err(dev: ndev, |
2435 | format: "Data path failed to switch %s VF: %s, err: %d\n" , |
2436 | vf_is_up ? "to" : "from" , vf_netdev->name, ret); |
2437 | return NOTIFY_DONE; |
2438 | } else { |
2439 | netdev_info(dev: ndev, format: "Data path switched %s VF: %s\n" , |
2440 | vf_is_up ? "to" : "from" , vf_netdev->name); |
2441 | } |
2442 | |
2443 | return NOTIFY_OK; |
2444 | } |
2445 | |
2446 | static int netvsc_unregister_vf(struct net_device *vf_netdev) |
2447 | { |
2448 | struct net_device *ndev; |
2449 | struct net_device_context *net_device_ctx; |
2450 | |
2451 | ndev = get_netvsc_byref(vf_netdev); |
2452 | if (!ndev) |
2453 | return NOTIFY_DONE; |
2454 | |
2455 | net_device_ctx = netdev_priv(dev: ndev); |
2456 | cancel_delayed_work_sync(dwork: &net_device_ctx->vf_takeover); |
2457 | |
2458 | netdev_info(dev: ndev, format: "VF unregistering: %s\n" , vf_netdev->name); |
2459 | |
2460 | netvsc_vf_setxdp(vf_netdev, NULL); |
2461 | |
2462 | reinit_completion(x: &net_device_ctx->vf_add); |
2463 | netdev_rx_handler_unregister(dev: vf_netdev); |
2464 | netdev_upper_dev_unlink(dev: vf_netdev, upper_dev: ndev); |
2465 | RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); |
2466 | dev_put(dev: vf_netdev); |
2467 | |
2468 | ndev->needed_headroom = RNDIS_AND_PPI_SIZE; |
2469 | |
2470 | return NOTIFY_OK; |
2471 | } |
2472 | |
2473 | static int netvsc_probe(struct hv_device *dev, |
2474 | const struct hv_vmbus_device_id *dev_id) |
2475 | { |
2476 | struct net_device *net = NULL; |
2477 | struct net_device_context *net_device_ctx; |
2478 | struct netvsc_device_info *device_info = NULL; |
2479 | struct netvsc_device *nvdev; |
2480 | int ret = -ENOMEM; |
2481 | |
2482 | net = alloc_etherdev_mq(sizeof(struct net_device_context), |
2483 | VRSS_CHANNEL_MAX); |
2484 | if (!net) |
2485 | goto no_net; |
2486 | |
2487 | netif_carrier_off(dev: net); |
2488 | |
2489 | netvsc_init_settings(dev: net); |
2490 | |
2491 | net_device_ctx = netdev_priv(dev: net); |
2492 | net_device_ctx->device_ctx = dev; |
2493 | net_device_ctx->msg_enable = netif_msg_init(debug_value: debug, default_msg_enable_bits: default_msg); |
2494 | if (netif_msg_probe(net_device_ctx)) |
2495 | netdev_dbg(net, "netvsc msg_enable: %d\n" , |
2496 | net_device_ctx->msg_enable); |
2497 | |
2498 | hv_set_drvdata(dev, data: net); |
2499 | |
2500 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); |
2501 | |
2502 | init_completion(x: &net_device_ctx->vf_add); |
2503 | spin_lock_init(&net_device_ctx->lock); |
2504 | INIT_LIST_HEAD(list: &net_device_ctx->reconfig_events); |
2505 | INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); |
2506 | |
2507 | net_device_ctx->vf_stats |
2508 | = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); |
2509 | if (!net_device_ctx->vf_stats) |
2510 | goto no_stats; |
2511 | |
2512 | net->netdev_ops = &device_ops; |
2513 | net->ethtool_ops = ðtool_ops; |
2514 | SET_NETDEV_DEV(net, &dev->device); |
2515 | dma_set_min_align_mask(dev: &dev->device, HV_HYP_PAGE_SIZE - 1); |
2516 | |
2517 | /* We always need headroom for rndis header */ |
2518 | net->needed_headroom = RNDIS_AND_PPI_SIZE; |
2519 | |
2520 | /* Initialize the number of queues to be 1, we may change it if more |
2521 | * channels are offered later. |
2522 | */ |
2523 | netif_set_real_num_tx_queues(dev: net, txq: 1); |
2524 | netif_set_real_num_rx_queues(dev: net, rxq: 1); |
2525 | |
2526 | /* Notify the netvsc driver of the new device */ |
2527 | device_info = netvsc_devinfo_get(NULL); |
2528 | |
2529 | if (!device_info) { |
2530 | ret = -ENOMEM; |
2531 | goto devinfo_failed; |
2532 | } |
2533 | |
2534 | nvdev = rndis_filter_device_add(dev, info: device_info); |
2535 | if (IS_ERR(ptr: nvdev)) { |
2536 | ret = PTR_ERR(ptr: nvdev); |
2537 | netdev_err(dev: net, format: "unable to add netvsc device (ret %d)\n" , ret); |
2538 | goto rndis_failed; |
2539 | } |
2540 | |
2541 | eth_hw_addr_set(dev: net, addr: device_info->mac_adr); |
2542 | |
2543 | /* We must get rtnl lock before scheduling nvdev->subchan_work, |
2544 | * otherwise netvsc_subchan_work() can get rtnl lock first and wait |
2545 | * all subchannels to show up, but that may not happen because |
2546 | * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() |
2547 | * -> ... -> device_add() -> ... -> __device_attach() can't get |
2548 | * the device lock, so all the subchannels can't be processed -- |
2549 | * finally netvsc_subchan_work() hangs forever. |
2550 | */ |
2551 | rtnl_lock(); |
2552 | |
2553 | if (nvdev->num_chn > 1) |
2554 | schedule_work(work: &nvdev->subchan_work); |
2555 | |
2556 | /* hw_features computed in rndis_netdev_set_hwcaps() */ |
2557 | net->features = net->hw_features | |
2558 | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | |
2559 | NETIF_F_HW_VLAN_CTAG_RX; |
2560 | net->vlan_features = net->features; |
2561 | |
2562 | netdev_lockdep_set_classes(net); |
2563 | |
2564 | net->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | |
2565 | NETDEV_XDP_ACT_NDO_XMIT; |
2566 | |
2567 | /* MTU range: 68 - 1500 or 65521 */ |
2568 | net->min_mtu = NETVSC_MTU_MIN; |
2569 | if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) |
2570 | net->max_mtu = NETVSC_MTU - ETH_HLEN; |
2571 | else |
2572 | net->max_mtu = ETH_DATA_LEN; |
2573 | |
2574 | nvdev->tx_disable = false; |
2575 | |
2576 | ret = register_netdevice(dev: net); |
2577 | if (ret != 0) { |
2578 | pr_err("Unable to register netdev.\n" ); |
2579 | goto register_failed; |
2580 | } |
2581 | |
2582 | list_add(new: &net_device_ctx->list, head: &netvsc_dev_list); |
2583 | rtnl_unlock(); |
2584 | |
2585 | netvsc_devinfo_put(dev_info: device_info); |
2586 | return 0; |
2587 | |
2588 | register_failed: |
2589 | rtnl_unlock(); |
2590 | rndis_filter_device_remove(dev, nvdev); |
2591 | rndis_failed: |
2592 | netvsc_devinfo_put(dev_info: device_info); |
2593 | devinfo_failed: |
2594 | free_percpu(pdata: net_device_ctx->vf_stats); |
2595 | no_stats: |
2596 | hv_set_drvdata(dev, NULL); |
2597 | free_netdev(dev: net); |
2598 | no_net: |
2599 | return ret; |
2600 | } |
2601 | |
2602 | static void netvsc_remove(struct hv_device *dev) |
2603 | { |
2604 | struct net_device_context *ndev_ctx; |
2605 | struct net_device *vf_netdev, *net; |
2606 | struct netvsc_device *nvdev; |
2607 | |
2608 | net = hv_get_drvdata(dev); |
2609 | if (net == NULL) { |
2610 | dev_err(&dev->device, "No net device to remove\n" ); |
2611 | return; |
2612 | } |
2613 | |
2614 | ndev_ctx = netdev_priv(dev: net); |
2615 | |
2616 | cancel_delayed_work_sync(dwork: &ndev_ctx->dwork); |
2617 | |
2618 | rtnl_lock(); |
2619 | nvdev = rtnl_dereference(ndev_ctx->nvdev); |
2620 | if (nvdev) { |
2621 | cancel_work_sync(work: &nvdev->subchan_work); |
2622 | netvsc_xdp_set(dev: net, NULL, NULL, nvdev); |
2623 | } |
2624 | |
2625 | /* |
2626 | * Call to the vsc driver to let it know that the device is being |
2627 | * removed. Also blocks mtu and channel changes. |
2628 | */ |
2629 | vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); |
2630 | if (vf_netdev) |
2631 | netvsc_unregister_vf(vf_netdev); |
2632 | |
2633 | if (nvdev) |
2634 | rndis_filter_device_remove(dev, nvdev); |
2635 | |
2636 | unregister_netdevice(dev: net); |
2637 | list_del(entry: &ndev_ctx->list); |
2638 | |
2639 | rtnl_unlock(); |
2640 | |
2641 | hv_set_drvdata(dev, NULL); |
2642 | |
2643 | free_percpu(pdata: ndev_ctx->vf_stats); |
2644 | free_netdev(dev: net); |
2645 | } |
2646 | |
2647 | static int netvsc_suspend(struct hv_device *dev) |
2648 | { |
2649 | struct net_device_context *ndev_ctx; |
2650 | struct netvsc_device *nvdev; |
2651 | struct net_device *net; |
2652 | int ret; |
2653 | |
2654 | net = hv_get_drvdata(dev); |
2655 | |
2656 | ndev_ctx = netdev_priv(dev: net); |
2657 | cancel_delayed_work_sync(dwork: &ndev_ctx->dwork); |
2658 | |
2659 | rtnl_lock(); |
2660 | |
2661 | nvdev = rtnl_dereference(ndev_ctx->nvdev); |
2662 | if (nvdev == NULL) { |
2663 | ret = -ENODEV; |
2664 | goto out; |
2665 | } |
2666 | |
2667 | /* Save the current config info */ |
2668 | ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev); |
2669 | if (!ndev_ctx->saved_netvsc_dev_info) { |
2670 | ret = -ENOMEM; |
2671 | goto out; |
2672 | } |
2673 | ret = netvsc_detach(ndev: net, nvdev); |
2674 | out: |
2675 | rtnl_unlock(); |
2676 | |
2677 | return ret; |
2678 | } |
2679 | |
2680 | static int netvsc_resume(struct hv_device *dev) |
2681 | { |
2682 | struct net_device *net = hv_get_drvdata(dev); |
2683 | struct net_device_context *net_device_ctx; |
2684 | struct netvsc_device_info *device_info; |
2685 | int ret; |
2686 | |
2687 | rtnl_lock(); |
2688 | |
2689 | net_device_ctx = netdev_priv(dev: net); |
2690 | |
2691 | /* Reset the data path to the netvsc NIC before re-opening the vmbus |
2692 | * channel. Later netvsc_netdev_event() will switch the data path to |
2693 | * the VF upon the UP or CHANGE event. |
2694 | */ |
2695 | net_device_ctx->data_path_is_vf = false; |
2696 | device_info = net_device_ctx->saved_netvsc_dev_info; |
2697 | |
2698 | ret = netvsc_attach(ndev: net, dev_info: device_info); |
2699 | |
2700 | netvsc_devinfo_put(dev_info: device_info); |
2701 | net_device_ctx->saved_netvsc_dev_info = NULL; |
2702 | |
2703 | rtnl_unlock(); |
2704 | |
2705 | return ret; |
2706 | } |
2707 | static const struct hv_vmbus_device_id id_table[] = { |
2708 | /* Network guid */ |
2709 | { HV_NIC_GUID, }, |
2710 | { }, |
2711 | }; |
2712 | |
2713 | MODULE_DEVICE_TABLE(vmbus, id_table); |
2714 | |
2715 | /* The one and only one */ |
2716 | static struct hv_driver netvsc_drv = { |
2717 | .name = KBUILD_MODNAME, |
2718 | .id_table = id_table, |
2719 | .probe = netvsc_probe, |
2720 | .remove = netvsc_remove, |
2721 | .suspend = netvsc_suspend, |
2722 | .resume = netvsc_resume, |
2723 | .driver = { |
2724 | .probe_type = PROBE_FORCE_SYNCHRONOUS, |
2725 | }, |
2726 | }; |
2727 | |
2728 | /* |
2729 | * On Hyper-V, every VF interface is matched with a corresponding |
2730 | * synthetic interface. The synthetic interface is presented first |
2731 | * to the guest. When the corresponding VF instance is registered, |
2732 | * we will take care of switching the data path. |
2733 | */ |
2734 | static int netvsc_netdev_event(struct notifier_block *this, |
2735 | unsigned long event, void *ptr) |
2736 | { |
2737 | struct net_device *event_dev = netdev_notifier_info_to_dev(info: ptr); |
2738 | |
2739 | /* Skip our own events */ |
2740 | if (event_dev->netdev_ops == &device_ops) |
2741 | return NOTIFY_DONE; |
2742 | |
2743 | /* Avoid non-Ethernet type devices */ |
2744 | if (event_dev->type != ARPHRD_ETHER) |
2745 | return NOTIFY_DONE; |
2746 | |
2747 | /* Avoid Vlan dev with same MAC registering as VF */ |
2748 | if (is_vlan_dev(dev: event_dev)) |
2749 | return NOTIFY_DONE; |
2750 | |
2751 | /* Avoid Bonding master dev with same MAC registering as VF */ |
2752 | if (netif_is_bond_master(dev: event_dev)) |
2753 | return NOTIFY_DONE; |
2754 | |
2755 | switch (event) { |
2756 | case NETDEV_REGISTER: |
2757 | return netvsc_register_vf(vf_netdev: event_dev); |
2758 | case NETDEV_UNREGISTER: |
2759 | return netvsc_unregister_vf(vf_netdev: event_dev); |
2760 | case NETDEV_UP: |
2761 | case NETDEV_DOWN: |
2762 | case NETDEV_CHANGE: |
2763 | case NETDEV_GOING_DOWN: |
2764 | return netvsc_vf_changed(vf_netdev: event_dev, event); |
2765 | default: |
2766 | return NOTIFY_DONE; |
2767 | } |
2768 | } |
2769 | |
2770 | static struct notifier_block netvsc_netdev_notifier = { |
2771 | .notifier_call = netvsc_netdev_event, |
2772 | }; |
2773 | |
2774 | static void __exit netvsc_drv_exit(void) |
2775 | { |
2776 | unregister_netdevice_notifier(nb: &netvsc_netdev_notifier); |
2777 | vmbus_driver_unregister(hv_driver: &netvsc_drv); |
2778 | } |
2779 | |
2780 | static int __init netvsc_drv_init(void) |
2781 | { |
2782 | int ret; |
2783 | |
2784 | if (ring_size < RING_SIZE_MIN) { |
2785 | ring_size = RING_SIZE_MIN; |
2786 | pr_info("Increased ring_size to %u (min allowed)\n" , |
2787 | ring_size); |
2788 | } |
2789 | netvsc_ring_bytes = ring_size * PAGE_SIZE; |
2790 | |
2791 | ret = vmbus_driver_register(&netvsc_drv); |
2792 | if (ret) |
2793 | return ret; |
2794 | |
2795 | register_netdevice_notifier(nb: &netvsc_netdev_notifier); |
2796 | return 0; |
2797 | } |
2798 | |
2799 | MODULE_LICENSE("GPL" ); |
2800 | MODULE_DESCRIPTION("Microsoft Hyper-V network driver" ); |
2801 | |
2802 | module_init(netvsc_drv_init); |
2803 | module_exit(netvsc_drv_exit); |
2804 | |