1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __NET_PKT_SCHED_H |
3 | #define __NET_PKT_SCHED_H |
4 | |
5 | #include <linux/jiffies.h> |
6 | #include <linux/ktime.h> |
7 | #include <linux/if_vlan.h> |
8 | #include <linux/netdevice.h> |
9 | #include <net/sch_generic.h> |
10 | #include <net/net_namespace.h> |
11 | #include <uapi/linux/pkt_sched.h> |
12 | |
13 | #define DEFAULT_TX_QUEUE_LEN 1000 |
14 | #define STAB_SIZE_LOG_MAX 30 |
15 | |
16 | struct qdisc_walker { |
17 | int stop; |
18 | int skip; |
19 | int count; |
20 | int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *); |
21 | }; |
22 | |
23 | #define qdisc_priv(q) \ |
24 | _Generic(q, \ |
25 | const struct Qdisc * : (const void *)&q->privdata, \ |
26 | struct Qdisc * : (void *)&q->privdata) |
27 | |
28 | static inline struct Qdisc *qdisc_from_priv(void *priv) |
29 | { |
30 | return container_of(priv, struct Qdisc, privdata); |
31 | } |
32 | |
33 | /* |
34 | Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth |
35 | |
36 | Normal IP packet size ~ 512byte, hence: |
37 | |
38 | 0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for |
39 | 10Mbit ethernet. |
40 | |
41 | 10msec resolution -> <50Kbit/sec. |
42 | |
43 | The result: [34]86 is not good choice for QoS router :-( |
44 | |
45 | The things are not so bad, because we may use artificial |
46 | clock evaluated by integration of network data flow |
47 | in the most critical places. |
48 | */ |
49 | |
50 | typedef u64 psched_time_t; |
51 | typedef long psched_tdiff_t; |
52 | |
53 | /* Avoid doing 64 bit divide */ |
54 | #define PSCHED_SHIFT 6 |
55 | #define PSCHED_TICKS2NS(x) ((s64)(x) << PSCHED_SHIFT) |
56 | #define PSCHED_NS2TICKS(x) ((x) >> PSCHED_SHIFT) |
57 | |
58 | #define PSCHED_TICKS_PER_SEC PSCHED_NS2TICKS(NSEC_PER_SEC) |
59 | #define PSCHED_PASTPERFECT 0 |
60 | |
61 | static inline psched_time_t psched_get_time(void) |
62 | { |
63 | return PSCHED_NS2TICKS(ktime_get_ns()); |
64 | } |
65 | |
66 | struct qdisc_watchdog { |
67 | struct hrtimer timer; |
68 | struct Qdisc *qdisc; |
69 | }; |
70 | |
71 | void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc, |
72 | clockid_t clockid); |
73 | void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); |
74 | |
75 | void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires, |
76 | u64 delta_ns); |
77 | |
78 | static inline void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, |
79 | u64 expires) |
80 | { |
81 | return qdisc_watchdog_schedule_range_ns(wd, expires, delta_ns: 0ULL); |
82 | } |
83 | |
84 | static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, |
85 | psched_time_t expires) |
86 | { |
87 | qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires)); |
88 | } |
89 | |
90 | void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); |
91 | |
92 | extern struct Qdisc_ops pfifo_qdisc_ops; |
93 | extern struct Qdisc_ops bfifo_qdisc_ops; |
94 | extern struct Qdisc_ops pfifo_head_drop_qdisc_ops; |
95 | |
96 | int fifo_set_limit(struct Qdisc *q, unsigned int limit); |
97 | struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, |
98 | unsigned int limit, |
99 | struct netlink_ext_ack *extack); |
100 | |
101 | int register_qdisc(struct Qdisc_ops *qops); |
102 | void unregister_qdisc(struct Qdisc_ops *qops); |
103 | void qdisc_get_default(char *id, size_t len); |
104 | int qdisc_set_default(const char *id); |
105 | |
106 | void qdisc_hash_add(struct Qdisc *q, bool invisible); |
107 | void qdisc_hash_del(struct Qdisc *q); |
108 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); |
109 | struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle); |
110 | struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, |
111 | struct nlattr *tab, |
112 | struct netlink_ext_ack *extack); |
113 | void qdisc_put_rtab(struct qdisc_rate_table *tab); |
114 | void qdisc_put_stab(struct qdisc_size_table *tab); |
115 | void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); |
116 | bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, |
117 | struct net_device *dev, struct netdev_queue *txq, |
118 | spinlock_t *root_lock, bool validate); |
119 | |
120 | void __qdisc_run(struct Qdisc *q); |
121 | |
122 | static inline void qdisc_run(struct Qdisc *q) |
123 | { |
124 | if (qdisc_run_begin(qdisc: q)) { |
125 | __qdisc_run(q); |
126 | qdisc_run_end(qdisc: q); |
127 | } |
128 | } |
129 | |
130 | extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; |
131 | |
132 | /* Calculate maximal size of packet seen by hard_start_xmit |
133 | routine of this device. |
134 | */ |
135 | static inline unsigned int psched_mtu(const struct net_device *dev) |
136 | { |
137 | return READ_ONCE(dev->mtu) + dev->hard_header_len; |
138 | } |
139 | |
140 | static inline struct net *qdisc_net(struct Qdisc *q) |
141 | { |
142 | return dev_net(dev: q->dev_queue->dev); |
143 | } |
144 | |
145 | struct tc_query_caps_base { |
146 | enum tc_setup_type type; |
147 | void *caps; |
148 | }; |
149 | |
150 | struct tc_cbs_qopt_offload { |
151 | u8 enable; |
152 | s32 queue; |
153 | s32 hicredit; |
154 | s32 locredit; |
155 | s32 idleslope; |
156 | s32 sendslope; |
157 | }; |
158 | |
159 | struct tc_etf_qopt_offload { |
160 | u8 enable; |
161 | s32 queue; |
162 | }; |
163 | |
164 | struct tc_mqprio_caps { |
165 | bool validate_queue_counts:1; |
166 | }; |
167 | |
168 | struct tc_mqprio_qopt_offload { |
169 | /* struct tc_mqprio_qopt must always be the first element */ |
170 | struct tc_mqprio_qopt qopt; |
171 | struct netlink_ext_ack *extack; |
172 | u16 mode; |
173 | u16 shaper; |
174 | u32 flags; |
175 | u64 min_rate[TC_QOPT_MAX_QUEUE]; |
176 | u64 max_rate[TC_QOPT_MAX_QUEUE]; |
177 | unsigned long preemptible_tcs; |
178 | }; |
179 | |
180 | struct tc_taprio_caps { |
181 | bool supports_queue_max_sdu:1; |
182 | bool gate_mask_per_txq:1; |
183 | /* Device expects lower TXQ numbers to have higher priority over higher |
184 | * TXQs, regardless of their TC mapping. DO NOT USE FOR NEW DRIVERS, |
185 | * INSTEAD ENFORCE A PROPER TC:TXQ MAPPING COMING FROM USER SPACE. |
186 | */ |
187 | bool broken_mqprio:1; |
188 | }; |
189 | |
190 | enum tc_taprio_qopt_cmd { |
191 | TAPRIO_CMD_REPLACE, |
192 | TAPRIO_CMD_DESTROY, |
193 | TAPRIO_CMD_STATS, |
194 | TAPRIO_CMD_QUEUE_STATS, |
195 | }; |
196 | |
197 | /** |
198 | * struct tc_taprio_qopt_stats - IEEE 802.1Qbv statistics |
199 | * @window_drops: Frames that were dropped because they were too large to be |
200 | * transmitted in any of the allotted time windows (open gates) for their |
201 | * traffic class. |
202 | * @tx_overruns: Frames still being transmitted by the MAC after the |
203 | * transmission gate associated with their traffic class has closed. |
204 | * Equivalent to `12.29.1.1.2 TransmissionOverrun` from 802.1Q-2018. |
205 | */ |
206 | struct tc_taprio_qopt_stats { |
207 | u64 window_drops; |
208 | u64 tx_overruns; |
209 | }; |
210 | |
211 | struct tc_taprio_qopt_queue_stats { |
212 | int queue; |
213 | struct tc_taprio_qopt_stats stats; |
214 | }; |
215 | |
216 | struct tc_taprio_sched_entry { |
217 | u8 command; /* TC_TAPRIO_CMD_* */ |
218 | |
219 | /* The gate_mask in the offloading side refers to traffic classes */ |
220 | u32 gate_mask; |
221 | u32 interval; |
222 | }; |
223 | |
224 | struct tc_taprio_qopt_offload { |
225 | enum tc_taprio_qopt_cmd cmd; |
226 | |
227 | union { |
228 | /* TAPRIO_CMD_STATS */ |
229 | struct tc_taprio_qopt_stats stats; |
230 | /* TAPRIO_CMD_QUEUE_STATS */ |
231 | struct tc_taprio_qopt_queue_stats queue_stats; |
232 | /* TAPRIO_CMD_REPLACE */ |
233 | struct { |
234 | struct tc_mqprio_qopt_offload mqprio; |
235 | struct netlink_ext_ack *extack; |
236 | ktime_t base_time; |
237 | u64 cycle_time; |
238 | u64 cycle_time_extension; |
239 | u32 max_sdu[TC_MAX_QUEUE]; |
240 | |
241 | size_t num_entries; |
242 | struct tc_taprio_sched_entry entries[]; |
243 | }; |
244 | }; |
245 | }; |
246 | |
247 | #if IS_ENABLED(CONFIG_NET_SCH_TAPRIO) |
248 | |
249 | /* Reference counting */ |
250 | struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload |
251 | *offload); |
252 | void taprio_offload_free(struct tc_taprio_qopt_offload *offload); |
253 | |
254 | #else |
255 | |
256 | /* Reference counting */ |
257 | static inline struct tc_taprio_qopt_offload * |
258 | taprio_offload_get(struct tc_taprio_qopt_offload *offload) |
259 | { |
260 | return NULL; |
261 | } |
262 | |
263 | static inline void taprio_offload_free(struct tc_taprio_qopt_offload *offload) |
264 | { |
265 | } |
266 | |
267 | #endif |
268 | |
269 | /* Ensure skb_mstamp_ns, which might have been populated with the txtime, is |
270 | * not mistaken for a software timestamp, because this will otherwise prevent |
271 | * the dispatch of hardware timestamps to the socket. |
272 | */ |
273 | static inline void skb_txtime_consumed(struct sk_buff *skb) |
274 | { |
275 | skb->tstamp = ktime_set(secs: 0, nsecs: 0); |
276 | } |
277 | |
278 | struct tc_skb_cb { |
279 | struct qdisc_skb_cb qdisc_cb; |
280 | |
281 | u16 mru; |
282 | u8 post_ct:1; |
283 | u8 post_ct_snat:1; |
284 | u8 post_ct_dnat:1; |
285 | u16 zone; /* Only valid if post_ct = true */ |
286 | }; |
287 | |
288 | static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb) |
289 | { |
290 | struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb; |
291 | |
292 | BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb)); |
293 | return cb; |
294 | } |
295 | |
296 | static inline bool tc_qdisc_stats_dump(struct Qdisc *sch, |
297 | unsigned long cl, |
298 | struct qdisc_walker *arg) |
299 | { |
300 | if (arg->count >= arg->skip && arg->fn(sch, cl, arg) < 0) { |
301 | arg->stop = 1; |
302 | return false; |
303 | } |
304 | |
305 | arg->count++; |
306 | return true; |
307 | } |
308 | |
309 | #endif |
310 | |