1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * net busy poll support |
4 | * Copyright(c) 2013 Intel Corporation. |
5 | * |
6 | * Author: Eliezer Tamir |
7 | * |
8 | * Contact Information: |
9 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
10 | */ |
11 | |
12 | #ifndef _LINUX_NET_BUSY_POLL_H |
13 | #define _LINUX_NET_BUSY_POLL_H |
14 | |
15 | #include <linux/netdevice.h> |
16 | #include <linux/sched/clock.h> |
17 | #include <linux/sched/signal.h> |
18 | #include <net/ip.h> |
19 | #include <net/xdp.h> |
20 | |
21 | /* 0 - Reserved to indicate value not set |
22 | * 1..NR_CPUS - Reserved for sender_cpu |
23 | * NR_CPUS+1..~0 - Region available for NAPI IDs |
24 | */ |
25 | #define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1)) |
26 | |
27 | #define BUSY_POLL_BUDGET 8 |
28 | |
29 | #ifdef CONFIG_NET_RX_BUSY_POLL |
30 | |
31 | struct napi_struct; |
32 | extern unsigned int sysctl_net_busy_read __read_mostly; |
33 | extern unsigned int sysctl_net_busy_poll __read_mostly; |
34 | |
35 | static inline bool net_busy_loop_on(void) |
36 | { |
37 | return READ_ONCE(sysctl_net_busy_poll); |
38 | } |
39 | |
40 | static inline bool sk_can_busy_loop(const struct sock *sk) |
41 | { |
42 | return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current); |
43 | } |
44 | |
45 | bool sk_busy_loop_end(void *p, unsigned long start_time); |
46 | |
47 | void napi_busy_loop(unsigned int napi_id, |
48 | bool (*loop_end)(void *, unsigned long), |
49 | void *loop_end_arg, bool prefer_busy_poll, u16 budget); |
50 | |
51 | #else /* CONFIG_NET_RX_BUSY_POLL */ |
52 | static inline unsigned long net_busy_loop_on(void) |
53 | { |
54 | return 0; |
55 | } |
56 | |
57 | static inline bool sk_can_busy_loop(struct sock *sk) |
58 | { |
59 | return false; |
60 | } |
61 | |
62 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
63 | |
64 | static inline unsigned long busy_loop_current_time(void) |
65 | { |
66 | #ifdef CONFIG_NET_RX_BUSY_POLL |
67 | return (unsigned long)(local_clock() >> 10); |
68 | #else |
69 | return 0; |
70 | #endif |
71 | } |
72 | |
73 | /* in poll/select we use the global sysctl_net_ll_poll value */ |
74 | static inline bool busy_loop_timeout(unsigned long start_time) |
75 | { |
76 | #ifdef CONFIG_NET_RX_BUSY_POLL |
77 | unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll); |
78 | |
79 | if (bp_usec) { |
80 | unsigned long end_time = start_time + bp_usec; |
81 | unsigned long now = busy_loop_current_time(); |
82 | |
83 | return time_after(now, end_time); |
84 | } |
85 | #endif |
86 | return true; |
87 | } |
88 | |
89 | static inline bool sk_busy_loop_timeout(struct sock *sk, |
90 | unsigned long start_time) |
91 | { |
92 | #ifdef CONFIG_NET_RX_BUSY_POLL |
93 | unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec); |
94 | |
95 | if (bp_usec) { |
96 | unsigned long end_time = start_time + bp_usec; |
97 | unsigned long now = busy_loop_current_time(); |
98 | |
99 | return time_after(now, end_time); |
100 | } |
101 | #endif |
102 | return true; |
103 | } |
104 | |
105 | static inline void sk_busy_loop(struct sock *sk, int nonblock) |
106 | { |
107 | #ifdef CONFIG_NET_RX_BUSY_POLL |
108 | unsigned int napi_id = READ_ONCE(sk->sk_napi_id); |
109 | |
110 | if (napi_id >= MIN_NAPI_ID) |
111 | napi_busy_loop(napi_id, loop_end: nonblock ? NULL : sk_busy_loop_end, loop_end_arg: sk, |
112 | READ_ONCE(sk->sk_prefer_busy_poll), |
113 | READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET); |
114 | #endif |
115 | } |
116 | |
117 | /* used in the NIC receive handler to mark the skb */ |
118 | static inline void skb_mark_napi_id(struct sk_buff *skb, |
119 | struct napi_struct *napi) |
120 | { |
121 | #ifdef CONFIG_NET_RX_BUSY_POLL |
122 | /* If the skb was already marked with a valid NAPI ID, avoid overwriting |
123 | * it. |
124 | */ |
125 | if (skb->napi_id < MIN_NAPI_ID) |
126 | skb->napi_id = napi->napi_id; |
127 | #endif |
128 | } |
129 | |
130 | /* used in the protocol hanlder to propagate the napi_id to the socket */ |
131 | static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) |
132 | { |
133 | #ifdef CONFIG_NET_RX_BUSY_POLL |
134 | if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id)) |
135 | WRITE_ONCE(sk->sk_napi_id, skb->napi_id); |
136 | #endif |
137 | sk_rx_queue_update(sk, skb); |
138 | } |
139 | |
140 | /* Variant of sk_mark_napi_id() for passive flow setup, |
141 | * as sk->sk_napi_id and sk->sk_rx_queue_mapping content |
142 | * needs to be set. |
143 | */ |
144 | static inline void sk_mark_napi_id_set(struct sock *sk, |
145 | const struct sk_buff *skb) |
146 | { |
147 | #ifdef CONFIG_NET_RX_BUSY_POLL |
148 | WRITE_ONCE(sk->sk_napi_id, skb->napi_id); |
149 | #endif |
150 | sk_rx_queue_set(sk, skb); |
151 | } |
152 | |
153 | static inline void __sk_mark_napi_id_once(struct sock *sk, unsigned int napi_id) |
154 | { |
155 | #ifdef CONFIG_NET_RX_BUSY_POLL |
156 | if (!READ_ONCE(sk->sk_napi_id)) |
157 | WRITE_ONCE(sk->sk_napi_id, napi_id); |
158 | #endif |
159 | } |
160 | |
161 | /* variant used for unconnected sockets */ |
162 | static inline void sk_mark_napi_id_once(struct sock *sk, |
163 | const struct sk_buff *skb) |
164 | { |
165 | #ifdef CONFIG_NET_RX_BUSY_POLL |
166 | __sk_mark_napi_id_once(sk, napi_id: skb->napi_id); |
167 | #endif |
168 | } |
169 | |
170 | static inline void sk_mark_napi_id_once_xdp(struct sock *sk, |
171 | const struct xdp_buff *xdp) |
172 | { |
173 | #ifdef CONFIG_NET_RX_BUSY_POLL |
174 | __sk_mark_napi_id_once(sk, napi_id: xdp->rxq->napi_id); |
175 | #endif |
176 | } |
177 | |
178 | #endif /* _LINUX_NET_BUSY_POLL_H */ |
179 | |