1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * net/sched/sch_netem.c Network emulator |
4 | * |
5 | * Many of the algorithms and ideas for this came from |
6 | * NIST Net which is not copyrighted. |
7 | * |
8 | * Authors: Stephen Hemminger <shemminger@osdl.org> |
9 | * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> |
10 | */ |
11 | |
12 | #include <linux/mm.h> |
13 | #include <linux/module.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/types.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/errno.h> |
18 | #include <linux/skbuff.h> |
19 | #include <linux/vmalloc.h> |
20 | #include <linux/rtnetlink.h> |
21 | #include <linux/reciprocal_div.h> |
22 | #include <linux/rbtree.h> |
23 | |
24 | #include <net/gso.h> |
25 | #include <net/netlink.h> |
26 | #include <net/pkt_sched.h> |
27 | #include <net/inet_ecn.h> |
28 | |
29 | #define VERSION "1.3" |
30 | |
31 | /* Network Emulation Queuing algorithm. |
32 | ==================================== |
33 | |
34 | Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based |
35 | Network Emulation Tool |
36 | [2] Luigi Rizzo, DummyNet for FreeBSD |
37 | |
38 | ---------------------------------------------------------------- |
39 | |
40 | This started out as a simple way to delay outgoing packets to |
41 | test TCP but has grown to include most of the functionality |
42 | of a full blown network emulator like NISTnet. It can delay |
43 | packets and add random jitter (and correlation). The random |
44 | distribution can be loaded from a table as well to provide |
45 | normal, Pareto, or experimental curves. Packet loss, |
46 | duplication, and reordering can also be emulated. |
47 | |
48 | This qdisc does not do classification that can be handled in |
49 | layering other disciplines. It does not need to do bandwidth |
50 | control either since that can be handled by using token |
51 | bucket or other rate control. |
52 | |
53 | Correlated Loss Generator models |
54 | |
55 | Added generation of correlated loss according to the |
56 | "Gilbert-Elliot" model, a 4-state markov model. |
57 | |
58 | References: |
59 | [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG |
60 | [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general |
61 | and intuitive loss model for packet networks and its implementation |
62 | in the Netem module in the Linux kernel", available in [1] |
63 | |
64 | Authors: Stefano Salsano <stefano.salsano at uniroma2.it |
65 | Fabio Ludovici <fabio.ludovici at yahoo.it> |
66 | */ |
67 | |
68 | struct disttable { |
69 | u32 size; |
70 | s16 table[] __counted_by(size); |
71 | }; |
72 | |
73 | struct netem_sched_data { |
74 | /* internal t(ime)fifo qdisc uses t_root and sch->limit */ |
75 | struct rb_root t_root; |
76 | |
77 | /* a linear queue; reduces rbtree rebalancing when jitter is low */ |
78 | struct sk_buff *t_head; |
79 | struct sk_buff *t_tail; |
80 | |
81 | /* optional qdisc for classful handling (NULL at netem init) */ |
82 | struct Qdisc *qdisc; |
83 | |
84 | struct qdisc_watchdog watchdog; |
85 | |
86 | s64 latency; |
87 | s64 jitter; |
88 | |
89 | u32 loss; |
90 | u32 ecn; |
91 | u32 limit; |
92 | u32 counter; |
93 | u32 gap; |
94 | u32 duplicate; |
95 | u32 reorder; |
96 | u32 corrupt; |
97 | u64 rate; |
98 | s32 packet_overhead; |
99 | u32 cell_size; |
100 | struct reciprocal_value cell_size_reciprocal; |
101 | s32 cell_overhead; |
102 | |
103 | struct crndstate { |
104 | u32 last; |
105 | u32 rho; |
106 | } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; |
107 | |
108 | struct prng { |
109 | u64 seed; |
110 | struct rnd_state prng_state; |
111 | } prng; |
112 | |
113 | struct disttable *delay_dist; |
114 | |
115 | enum { |
116 | CLG_RANDOM, |
117 | CLG_4_STATES, |
118 | CLG_GILB_ELL, |
119 | } loss_model; |
120 | |
121 | enum { |
122 | TX_IN_GAP_PERIOD = 1, |
123 | TX_IN_BURST_PERIOD, |
124 | LOST_IN_GAP_PERIOD, |
125 | LOST_IN_BURST_PERIOD, |
126 | } _4_state_model; |
127 | |
128 | enum { |
129 | GOOD_STATE = 1, |
130 | BAD_STATE, |
131 | } GE_state_model; |
132 | |
133 | /* Correlated Loss Generation models */ |
134 | struct clgstate { |
135 | /* state of the Markov chain */ |
136 | u8 state; |
137 | |
138 | /* 4-states and Gilbert-Elliot models */ |
139 | u32 a1; /* p13 for 4-states or p for GE */ |
140 | u32 a2; /* p31 for 4-states or r for GE */ |
141 | u32 a3; /* p32 for 4-states or h for GE */ |
142 | u32 a4; /* p14 for 4-states or 1-k for GE */ |
143 | u32 a5; /* p23 used only in 4-states */ |
144 | } clg; |
145 | |
146 | struct tc_netem_slot slot_config; |
147 | struct slotstate { |
148 | u64 slot_next; |
149 | s32 packets_left; |
150 | s32 bytes_left; |
151 | } slot; |
152 | |
153 | struct disttable *slot_dist; |
154 | }; |
155 | |
156 | /* Time stamp put into socket buffer control block |
157 | * Only valid when skbs are in our internal t(ime)fifo queue. |
158 | * |
159 | * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp, |
160 | * and skb->next & skb->prev are scratch space for a qdisc, |
161 | * we save skb->tstamp value in skb->cb[] before destroying it. |
162 | */ |
163 | struct netem_skb_cb { |
164 | u64 time_to_send; |
165 | }; |
166 | |
167 | static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) |
168 | { |
169 | /* we assume we can use skb next/prev/tstamp as storage for rb_node */ |
170 | qdisc_cb_private_validate(skb, sz: sizeof(struct netem_skb_cb)); |
171 | return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; |
172 | } |
173 | |
174 | /* init_crandom - initialize correlated random number generator |
175 | * Use entropy source for initial seed. |
176 | */ |
177 | static void init_crandom(struct crndstate *state, unsigned long rho) |
178 | { |
179 | state->rho = rho; |
180 | state->last = get_random_u32(); |
181 | } |
182 | |
183 | /* get_crandom - correlated random number generator |
184 | * Next number depends on last value. |
185 | * rho is scaled to avoid floating point. |
186 | */ |
187 | static u32 get_crandom(struct crndstate *state, struct prng *p) |
188 | { |
189 | u64 value, rho; |
190 | unsigned long answer; |
191 | struct rnd_state *s = &p->prng_state; |
192 | |
193 | if (!state || state->rho == 0) /* no correlation */ |
194 | return prandom_u32_state(state: s); |
195 | |
196 | value = prandom_u32_state(state: s); |
197 | rho = (u64)state->rho + 1; |
198 | answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; |
199 | state->last = answer; |
200 | return answer; |
201 | } |
202 | |
203 | /* loss_4state - 4-state model loss generator |
204 | * Generates losses according to the 4-state Markov chain adopted in |
205 | * the GI (General and Intuitive) loss model. |
206 | */ |
207 | static bool loss_4state(struct netem_sched_data *q) |
208 | { |
209 | struct clgstate *clg = &q->clg; |
210 | u32 rnd = prandom_u32_state(state: &q->prng.prng_state); |
211 | |
212 | /* |
213 | * Makes a comparison between rnd and the transition |
214 | * probabilities outgoing from the current state, then decides the |
215 | * next state and if the next packet has to be transmitted or lost. |
216 | * The four states correspond to: |
217 | * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period |
218 | * LOST_IN_GAP_PERIOD => isolated losses within a gap period |
219 | * LOST_IN_BURST_PERIOD => lost packets within a burst period |
220 | * TX_IN_BURST_PERIOD => successfully transmitted packets within a burst period |
221 | */ |
222 | switch (clg->state) { |
223 | case TX_IN_GAP_PERIOD: |
224 | if (rnd < clg->a4) { |
225 | clg->state = LOST_IN_GAP_PERIOD; |
226 | return true; |
227 | } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { |
228 | clg->state = LOST_IN_BURST_PERIOD; |
229 | return true; |
230 | } else if (clg->a1 + clg->a4 < rnd) { |
231 | clg->state = TX_IN_GAP_PERIOD; |
232 | } |
233 | |
234 | break; |
235 | case TX_IN_BURST_PERIOD: |
236 | if (rnd < clg->a5) { |
237 | clg->state = LOST_IN_BURST_PERIOD; |
238 | return true; |
239 | } else { |
240 | clg->state = TX_IN_BURST_PERIOD; |
241 | } |
242 | |
243 | break; |
244 | case LOST_IN_BURST_PERIOD: |
245 | if (rnd < clg->a3) |
246 | clg->state = TX_IN_BURST_PERIOD; |
247 | else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { |
248 | clg->state = TX_IN_GAP_PERIOD; |
249 | } else if (clg->a2 + clg->a3 < rnd) { |
250 | clg->state = LOST_IN_BURST_PERIOD; |
251 | return true; |
252 | } |
253 | break; |
254 | case LOST_IN_GAP_PERIOD: |
255 | clg->state = TX_IN_GAP_PERIOD; |
256 | break; |
257 | } |
258 | |
259 | return false; |
260 | } |
261 | |
262 | /* loss_gilb_ell - Gilbert-Elliot model loss generator |
263 | * Generates losses according to the Gilbert-Elliot loss model or |
264 | * its special cases (Gilbert or Simple Gilbert) |
265 | * |
266 | * Makes a comparison between random number and the transition |
267 | * probabilities outgoing from the current state, then decides the |
268 | * next state. A second random number is extracted and the comparison |
269 | * with the loss probability of the current state decides if the next |
270 | * packet will be transmitted or lost. |
271 | */ |
272 | static bool loss_gilb_ell(struct netem_sched_data *q) |
273 | { |
274 | struct clgstate *clg = &q->clg; |
275 | struct rnd_state *s = &q->prng.prng_state; |
276 | |
277 | switch (clg->state) { |
278 | case GOOD_STATE: |
279 | if (prandom_u32_state(state: s) < clg->a1) |
280 | clg->state = BAD_STATE; |
281 | if (prandom_u32_state(state: s) < clg->a4) |
282 | return true; |
283 | break; |
284 | case BAD_STATE: |
285 | if (prandom_u32_state(state: s) < clg->a2) |
286 | clg->state = GOOD_STATE; |
287 | if (prandom_u32_state(state: s) > clg->a3) |
288 | return true; |
289 | } |
290 | |
291 | return false; |
292 | } |
293 | |
294 | static bool loss_event(struct netem_sched_data *q) |
295 | { |
296 | switch (q->loss_model) { |
297 | case CLG_RANDOM: |
298 | /* Random packet drop 0 => none, ~0 => all */ |
299 | return q->loss && q->loss >= get_crandom(state: &q->loss_cor, p: &q->prng); |
300 | |
301 | case CLG_4_STATES: |
302 | /* 4state loss model algorithm (used also for GI model) |
303 | * Extracts a value from the markov 4 state loss generator, |
304 | * if it is 1 drops a packet and if needed writes the event in |
305 | * the kernel logs |
306 | */ |
307 | return loss_4state(q); |
308 | |
309 | case CLG_GILB_ELL: |
310 | /* Gilbert-Elliot loss model algorithm |
311 | * Extracts a value from the Gilbert-Elliot loss generator, |
312 | * if it is 1 drops a packet and if needed writes the event in |
313 | * the kernel logs |
314 | */ |
315 | return loss_gilb_ell(q); |
316 | } |
317 | |
318 | return false; /* not reached */ |
319 | } |
320 | |
321 | |
322 | /* tabledist - return a pseudo-randomly distributed value with mean mu and |
323 | * std deviation sigma. Uses table lookup to approximate the desired |
324 | * distribution, and a uniformly-distributed pseudo-random source. |
325 | */ |
326 | static s64 tabledist(s64 mu, s32 sigma, |
327 | struct crndstate *state, |
328 | struct prng *prng, |
329 | const struct disttable *dist) |
330 | { |
331 | s64 x; |
332 | long t; |
333 | u32 rnd; |
334 | |
335 | if (sigma == 0) |
336 | return mu; |
337 | |
338 | rnd = get_crandom(state, p: prng); |
339 | |
340 | /* default uniform distribution */ |
341 | if (dist == NULL) |
342 | return ((rnd % (2 * (u32)sigma)) + mu) - sigma; |
343 | |
344 | t = dist->table[rnd % dist->size]; |
345 | x = (sigma % NETEM_DIST_SCALE) * t; |
346 | if (x >= 0) |
347 | x += NETEM_DIST_SCALE/2; |
348 | else |
349 | x -= NETEM_DIST_SCALE/2; |
350 | |
351 | return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; |
352 | } |
353 | |
354 | static u64 packet_time_ns(u64 len, const struct netem_sched_data *q) |
355 | { |
356 | len += q->packet_overhead; |
357 | |
358 | if (q->cell_size) { |
359 | u32 cells = reciprocal_divide(a: len, R: q->cell_size_reciprocal); |
360 | |
361 | if (len > cells * q->cell_size) /* extra cell needed for remainder */ |
362 | cells++; |
363 | len = cells * (q->cell_size + q->cell_overhead); |
364 | } |
365 | |
366 | return div64_u64(dividend: len * NSEC_PER_SEC, divisor: q->rate); |
367 | } |
368 | |
369 | static void tfifo_reset(struct Qdisc *sch) |
370 | { |
371 | struct netem_sched_data *q = qdisc_priv(sch); |
372 | struct rb_node *p = rb_first(&q->t_root); |
373 | |
374 | while (p) { |
375 | struct sk_buff *skb = rb_to_skb(p); |
376 | |
377 | p = rb_next(p); |
378 | rb_erase(&skb->rbnode, &q->t_root); |
379 | rtnl_kfree_skbs(head: skb, tail: skb); |
380 | } |
381 | |
382 | rtnl_kfree_skbs(head: q->t_head, tail: q->t_tail); |
383 | q->t_head = NULL; |
384 | q->t_tail = NULL; |
385 | } |
386 | |
387 | static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) |
388 | { |
389 | struct netem_sched_data *q = qdisc_priv(sch); |
390 | u64 tnext = netem_skb_cb(skb: nskb)->time_to_send; |
391 | |
392 | if (!q->t_tail || tnext >= netem_skb_cb(skb: q->t_tail)->time_to_send) { |
393 | if (q->t_tail) |
394 | q->t_tail->next = nskb; |
395 | else |
396 | q->t_head = nskb; |
397 | q->t_tail = nskb; |
398 | } else { |
399 | struct rb_node **p = &q->t_root.rb_node, *parent = NULL; |
400 | |
401 | while (*p) { |
402 | struct sk_buff *skb; |
403 | |
404 | parent = *p; |
405 | skb = rb_to_skb(parent); |
406 | if (tnext >= netem_skb_cb(skb)->time_to_send) |
407 | p = &parent->rb_right; |
408 | else |
409 | p = &parent->rb_left; |
410 | } |
411 | rb_link_node(node: &nskb->rbnode, parent, rb_link: p); |
412 | rb_insert_color(&nskb->rbnode, &q->t_root); |
413 | } |
414 | sch->q.qlen++; |
415 | } |
416 | |
417 | /* netem can't properly corrupt a megapacket (like we get from GSO), so instead |
418 | * when we statistically choose to corrupt one, we instead segment it, returning |
419 | * the first packet to be corrupted, and re-enqueue the remaining frames |
420 | */ |
421 | static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, |
422 | struct sk_buff **to_free) |
423 | { |
424 | struct sk_buff *segs; |
425 | netdev_features_t features = netif_skb_features(skb); |
426 | |
427 | segs = skb_gso_segment(skb, features: features & ~NETIF_F_GSO_MASK); |
428 | |
429 | if (IS_ERR_OR_NULL(ptr: segs)) { |
430 | qdisc_drop(skb, sch, to_free); |
431 | return NULL; |
432 | } |
433 | consume_skb(skb); |
434 | return segs; |
435 | } |
436 | |
437 | /* |
438 | * Insert one skb into qdisc. |
439 | * Note: parent depends on return value to account for queue length. |
440 | * NET_XMIT_DROP: queue length didn't change. |
441 | * NET_XMIT_SUCCESS: one skb was queued. |
442 | */ |
443 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
444 | struct sk_buff **to_free) |
445 | { |
446 | struct netem_sched_data *q = qdisc_priv(sch); |
447 | /* We don't fill cb now as skb_unshare() may invalidate it */ |
448 | struct netem_skb_cb *cb; |
449 | struct sk_buff *skb2; |
450 | struct sk_buff *segs = NULL; |
451 | unsigned int prev_len = qdisc_pkt_len(skb); |
452 | int count = 1; |
453 | int rc = NET_XMIT_SUCCESS; |
454 | int rc_drop = NET_XMIT_DROP; |
455 | |
456 | /* Do not fool qdisc_drop_all() */ |
457 | skb->prev = NULL; |
458 | |
459 | /* Random duplication */ |
460 | if (q->duplicate && q->duplicate >= get_crandom(state: &q->dup_cor, p: &q->prng)) |
461 | ++count; |
462 | |
463 | /* Drop packet? */ |
464 | if (loss_event(q)) { |
465 | if (q->ecn && INET_ECN_set_ce(skb)) |
466 | qdisc_qstats_drop(sch); /* mark packet */ |
467 | else |
468 | --count; |
469 | } |
470 | if (count == 0) { |
471 | qdisc_qstats_drop(sch); |
472 | __qdisc_drop(skb, to_free); |
473 | return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
474 | } |
475 | |
476 | /* If a delay is expected, orphan the skb. (orphaning usually takes |
477 | * place at TX completion time, so _before_ the link transit delay) |
478 | */ |
479 | if (q->latency || q->jitter || q->rate) |
480 | skb_orphan_partial(skb); |
481 | |
482 | /* |
483 | * If we need to duplicate packet, then re-insert at top of the |
484 | * qdisc tree, since parent queuer expects that only one |
485 | * skb will be queued. |
486 | */ |
487 | if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { |
488 | struct Qdisc *rootq = qdisc_root_bh(qdisc: sch); |
489 | u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ |
490 | |
491 | q->duplicate = 0; |
492 | rootq->enqueue(skb2, rootq, to_free); |
493 | q->duplicate = dupsave; |
494 | rc_drop = NET_XMIT_SUCCESS; |
495 | } |
496 | |
497 | /* |
498 | * Randomized packet corruption. |
499 | * Make copy if needed since we are modifying |
500 | * If packet is going to be hardware checksummed, then |
501 | * do it now in software before we mangle it. |
502 | */ |
503 | if (q->corrupt && q->corrupt >= get_crandom(state: &q->corrupt_cor, p: &q->prng)) { |
504 | if (skb_is_gso(skb)) { |
505 | skb = netem_segment(skb, sch, to_free); |
506 | if (!skb) |
507 | return rc_drop; |
508 | segs = skb->next; |
509 | skb_mark_not_on_list(skb); |
510 | qdisc_skb_cb(skb)->pkt_len = skb->len; |
511 | } |
512 | |
513 | skb = skb_unshare(skb, GFP_ATOMIC); |
514 | if (unlikely(!skb)) { |
515 | qdisc_qstats_drop(sch); |
516 | goto finish_segs; |
517 | } |
518 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
519 | skb_checksum_help(skb)) { |
520 | qdisc_drop(skb, sch, to_free); |
521 | skb = NULL; |
522 | goto finish_segs; |
523 | } |
524 | |
525 | skb->data[get_random_u32_below(ceil: skb_headlen(skb))] ^= |
526 | 1<<get_random_u32_below(ceil: 8); |
527 | } |
528 | |
529 | if (unlikely(sch->q.qlen >= sch->limit)) { |
530 | /* re-link segs, so that qdisc_drop_all() frees them all */ |
531 | skb->next = segs; |
532 | qdisc_drop_all(skb, sch, to_free); |
533 | return rc_drop; |
534 | } |
535 | |
536 | qdisc_qstats_backlog_inc(sch, skb); |
537 | |
538 | cb = netem_skb_cb(skb); |
539 | if (q->gap == 0 || /* not doing reordering */ |
540 | q->counter < q->gap - 1 || /* inside last reordering gap */ |
541 | q->reorder < get_crandom(state: &q->reorder_cor, p: &q->prng)) { |
542 | u64 now; |
543 | s64 delay; |
544 | |
545 | delay = tabledist(mu: q->latency, sigma: q->jitter, |
546 | state: &q->delay_cor, prng: &q->prng, dist: q->delay_dist); |
547 | |
548 | now = ktime_get_ns(); |
549 | |
550 | if (q->rate) { |
551 | struct netem_skb_cb *last = NULL; |
552 | |
553 | if (sch->q.tail) |
554 | last = netem_skb_cb(skb: sch->q.tail); |
555 | if (q->t_root.rb_node) { |
556 | struct sk_buff *t_skb; |
557 | struct netem_skb_cb *t_last; |
558 | |
559 | t_skb = skb_rb_last(&q->t_root); |
560 | t_last = netem_skb_cb(skb: t_skb); |
561 | if (!last || |
562 | t_last->time_to_send > last->time_to_send) |
563 | last = t_last; |
564 | } |
565 | if (q->t_tail) { |
566 | struct netem_skb_cb *t_last = |
567 | netem_skb_cb(skb: q->t_tail); |
568 | |
569 | if (!last || |
570 | t_last->time_to_send > last->time_to_send) |
571 | last = t_last; |
572 | } |
573 | |
574 | if (last) { |
575 | /* |
576 | * Last packet in queue is reference point (now), |
577 | * calculate this time bonus and subtract |
578 | * from delay. |
579 | */ |
580 | delay -= last->time_to_send - now; |
581 | delay = max_t(s64, 0, delay); |
582 | now = last->time_to_send; |
583 | } |
584 | |
585 | delay += packet_time_ns(len: qdisc_pkt_len(skb), q); |
586 | } |
587 | |
588 | cb->time_to_send = now + delay; |
589 | ++q->counter; |
590 | tfifo_enqueue(nskb: skb, sch); |
591 | } else { |
592 | /* |
593 | * Do re-ordering by putting one out of N packets at the front |
594 | * of the queue. |
595 | */ |
596 | cb->time_to_send = ktime_get_ns(); |
597 | q->counter = 0; |
598 | |
599 | __qdisc_enqueue_head(skb, qh: &sch->q); |
600 | sch->qstats.requeues++; |
601 | } |
602 | |
603 | finish_segs: |
604 | if (segs) { |
605 | unsigned int len, last_len; |
606 | int nb; |
607 | |
608 | len = skb ? skb->len : 0; |
609 | nb = skb ? 1 : 0; |
610 | |
611 | while (segs) { |
612 | skb2 = segs->next; |
613 | skb_mark_not_on_list(skb: segs); |
614 | qdisc_skb_cb(skb: segs)->pkt_len = segs->len; |
615 | last_len = segs->len; |
616 | rc = qdisc_enqueue(skb: segs, sch, to_free); |
617 | if (rc != NET_XMIT_SUCCESS) { |
618 | if (net_xmit_drop_count(rc)) |
619 | qdisc_qstats_drop(sch); |
620 | } else { |
621 | nb++; |
622 | len += last_len; |
623 | } |
624 | segs = skb2; |
625 | } |
626 | /* Parent qdiscs accounted for 1 skb of size @prev_len */ |
627 | qdisc_tree_reduce_backlog(qdisc: sch, n: -(nb - 1), len: -(len - prev_len)); |
628 | } else if (!skb) { |
629 | return NET_XMIT_DROP; |
630 | } |
631 | return NET_XMIT_SUCCESS; |
632 | } |
633 | |
634 | /* Delay the next round with a new future slot with a |
635 | * correct number of bytes and packets. |
636 | */ |
637 | |
638 | static void get_slot_next(struct netem_sched_data *q, u64 now) |
639 | { |
640 | s64 next_delay; |
641 | |
642 | if (!q->slot_dist) |
643 | next_delay = q->slot_config.min_delay + |
644 | (get_random_u32() * |
645 | (q->slot_config.max_delay - |
646 | q->slot_config.min_delay) >> 32); |
647 | else |
648 | next_delay = tabledist(mu: q->slot_config.dist_delay, |
649 | sigma: (s32)(q->slot_config.dist_jitter), |
650 | NULL, prng: &q->prng, dist: q->slot_dist); |
651 | |
652 | q->slot.slot_next = now + next_delay; |
653 | q->slot.packets_left = q->slot_config.max_packets; |
654 | q->slot.bytes_left = q->slot_config.max_bytes; |
655 | } |
656 | |
657 | static struct sk_buff *netem_peek(struct netem_sched_data *q) |
658 | { |
659 | struct sk_buff *skb = skb_rb_first(&q->t_root); |
660 | u64 t1, t2; |
661 | |
662 | if (!skb) |
663 | return q->t_head; |
664 | if (!q->t_head) |
665 | return skb; |
666 | |
667 | t1 = netem_skb_cb(skb)->time_to_send; |
668 | t2 = netem_skb_cb(skb: q->t_head)->time_to_send; |
669 | if (t1 < t2) |
670 | return skb; |
671 | return q->t_head; |
672 | } |
673 | |
674 | static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb) |
675 | { |
676 | if (skb == q->t_head) { |
677 | q->t_head = skb->next; |
678 | if (!q->t_head) |
679 | q->t_tail = NULL; |
680 | } else { |
681 | rb_erase(&skb->rbnode, &q->t_root); |
682 | } |
683 | } |
684 | |
685 | static struct sk_buff *netem_dequeue(struct Qdisc *sch) |
686 | { |
687 | struct netem_sched_data *q = qdisc_priv(sch); |
688 | struct sk_buff *skb; |
689 | |
690 | tfifo_dequeue: |
691 | skb = __qdisc_dequeue_head(qh: &sch->q); |
692 | if (skb) { |
693 | qdisc_qstats_backlog_dec(sch, skb); |
694 | deliver: |
695 | qdisc_bstats_update(sch, skb); |
696 | return skb; |
697 | } |
698 | skb = netem_peek(q); |
699 | if (skb) { |
700 | u64 time_to_send; |
701 | u64 now = ktime_get_ns(); |
702 | |
703 | /* if more time remaining? */ |
704 | time_to_send = netem_skb_cb(skb)->time_to_send; |
705 | if (q->slot.slot_next && q->slot.slot_next < time_to_send) |
706 | get_slot_next(q, now); |
707 | |
708 | if (time_to_send <= now && q->slot.slot_next <= now) { |
709 | netem_erase_head(q, skb); |
710 | sch->q.qlen--; |
711 | qdisc_qstats_backlog_dec(sch, skb); |
712 | skb->next = NULL; |
713 | skb->prev = NULL; |
714 | /* skb->dev shares skb->rbnode area, |
715 | * we need to restore its value. |
716 | */ |
717 | skb->dev = qdisc_dev(qdisc: sch); |
718 | |
719 | if (q->slot.slot_next) { |
720 | q->slot.packets_left--; |
721 | q->slot.bytes_left -= qdisc_pkt_len(skb); |
722 | if (q->slot.packets_left <= 0 || |
723 | q->slot.bytes_left <= 0) |
724 | get_slot_next(q, now); |
725 | } |
726 | |
727 | if (q->qdisc) { |
728 | unsigned int pkt_len = qdisc_pkt_len(skb); |
729 | struct sk_buff *to_free = NULL; |
730 | int err; |
731 | |
732 | err = qdisc_enqueue(skb, sch: q->qdisc, to_free: &to_free); |
733 | kfree_skb_list(segs: to_free); |
734 | if (err != NET_XMIT_SUCCESS && |
735 | net_xmit_drop_count(err)) { |
736 | qdisc_qstats_drop(sch); |
737 | qdisc_tree_reduce_backlog(qdisc: sch, n: 1, |
738 | len: pkt_len); |
739 | } |
740 | goto tfifo_dequeue; |
741 | } |
742 | goto deliver; |
743 | } |
744 | |
745 | if (q->qdisc) { |
746 | skb = q->qdisc->ops->dequeue(q->qdisc); |
747 | if (skb) |
748 | goto deliver; |
749 | } |
750 | |
751 | qdisc_watchdog_schedule_ns(wd: &q->watchdog, |
752 | max(time_to_send, |
753 | q->slot.slot_next)); |
754 | } |
755 | |
756 | if (q->qdisc) { |
757 | skb = q->qdisc->ops->dequeue(q->qdisc); |
758 | if (skb) |
759 | goto deliver; |
760 | } |
761 | return NULL; |
762 | } |
763 | |
764 | static void netem_reset(struct Qdisc *sch) |
765 | { |
766 | struct netem_sched_data *q = qdisc_priv(sch); |
767 | |
768 | qdisc_reset_queue(sch); |
769 | tfifo_reset(sch); |
770 | if (q->qdisc) |
771 | qdisc_reset(qdisc: q->qdisc); |
772 | qdisc_watchdog_cancel(wd: &q->watchdog); |
773 | } |
774 | |
775 | static void dist_free(struct disttable *d) |
776 | { |
777 | kvfree(addr: d); |
778 | } |
779 | |
780 | /* |
781 | * Distribution data is a variable size payload containing |
782 | * signed 16 bit values. |
783 | */ |
784 | |
785 | static int get_dist_table(struct disttable **tbl, const struct nlattr *attr) |
786 | { |
787 | size_t n = nla_len(nla: attr)/sizeof(__s16); |
788 | const __s16 *data = nla_data(nla: attr); |
789 | struct disttable *d; |
790 | int i; |
791 | |
792 | if (!n || n > NETEM_DIST_MAX) |
793 | return -EINVAL; |
794 | |
795 | d = kvmalloc(struct_size(d, table, n), GFP_KERNEL); |
796 | if (!d) |
797 | return -ENOMEM; |
798 | |
799 | d->size = n; |
800 | for (i = 0; i < n; i++) |
801 | d->table[i] = data[i]; |
802 | |
803 | *tbl = d; |
804 | return 0; |
805 | } |
806 | |
807 | static void get_slot(struct netem_sched_data *q, const struct nlattr *attr) |
808 | { |
809 | const struct tc_netem_slot *c = nla_data(nla: attr); |
810 | |
811 | q->slot_config = *c; |
812 | if (q->slot_config.max_packets == 0) |
813 | q->slot_config.max_packets = INT_MAX; |
814 | if (q->slot_config.max_bytes == 0) |
815 | q->slot_config.max_bytes = INT_MAX; |
816 | |
817 | /* capping dist_jitter to the range acceptable by tabledist() */ |
818 | q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter)); |
819 | |
820 | q->slot.packets_left = q->slot_config.max_packets; |
821 | q->slot.bytes_left = q->slot_config.max_bytes; |
822 | if (q->slot_config.min_delay | q->slot_config.max_delay | |
823 | q->slot_config.dist_jitter) |
824 | q->slot.slot_next = ktime_get_ns(); |
825 | else |
826 | q->slot.slot_next = 0; |
827 | } |
828 | |
829 | static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr) |
830 | { |
831 | const struct tc_netem_corr *c = nla_data(nla: attr); |
832 | |
833 | init_crandom(state: &q->delay_cor, rho: c->delay_corr); |
834 | init_crandom(state: &q->loss_cor, rho: c->loss_corr); |
835 | init_crandom(state: &q->dup_cor, rho: c->dup_corr); |
836 | } |
837 | |
838 | static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr) |
839 | { |
840 | const struct tc_netem_reorder *r = nla_data(nla: attr); |
841 | |
842 | q->reorder = r->probability; |
843 | init_crandom(state: &q->reorder_cor, rho: r->correlation); |
844 | } |
845 | |
846 | static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr) |
847 | { |
848 | const struct tc_netem_corrupt *r = nla_data(nla: attr); |
849 | |
850 | q->corrupt = r->probability; |
851 | init_crandom(state: &q->corrupt_cor, rho: r->correlation); |
852 | } |
853 | |
854 | static void get_rate(struct netem_sched_data *q, const struct nlattr *attr) |
855 | { |
856 | const struct tc_netem_rate *r = nla_data(nla: attr); |
857 | |
858 | q->rate = r->rate; |
859 | q->packet_overhead = r->packet_overhead; |
860 | q->cell_size = r->cell_size; |
861 | q->cell_overhead = r->cell_overhead; |
862 | if (q->cell_size) |
863 | q->cell_size_reciprocal = reciprocal_value(d: q->cell_size); |
864 | else |
865 | q->cell_size_reciprocal = (struct reciprocal_value) { 0 }; |
866 | } |
867 | |
868 | static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr) |
869 | { |
870 | const struct nlattr *la; |
871 | int rem; |
872 | |
873 | nla_for_each_nested(la, attr, rem) { |
874 | u16 type = nla_type(nla: la); |
875 | |
876 | switch (type) { |
877 | case NETEM_LOSS_GI: { |
878 | const struct tc_netem_gimodel *gi = nla_data(nla: la); |
879 | |
880 | if (nla_len(nla: la) < sizeof(struct tc_netem_gimodel)) { |
881 | pr_info("netem: incorrect gi model size\n" ); |
882 | return -EINVAL; |
883 | } |
884 | |
885 | q->loss_model = CLG_4_STATES; |
886 | |
887 | q->clg.state = TX_IN_GAP_PERIOD; |
888 | q->clg.a1 = gi->p13; |
889 | q->clg.a2 = gi->p31; |
890 | q->clg.a3 = gi->p32; |
891 | q->clg.a4 = gi->p14; |
892 | q->clg.a5 = gi->p23; |
893 | break; |
894 | } |
895 | |
896 | case NETEM_LOSS_GE: { |
897 | const struct tc_netem_gemodel *ge = nla_data(nla: la); |
898 | |
899 | if (nla_len(nla: la) < sizeof(struct tc_netem_gemodel)) { |
900 | pr_info("netem: incorrect ge model size\n" ); |
901 | return -EINVAL; |
902 | } |
903 | |
904 | q->loss_model = CLG_GILB_ELL; |
905 | q->clg.state = GOOD_STATE; |
906 | q->clg.a1 = ge->p; |
907 | q->clg.a2 = ge->r; |
908 | q->clg.a3 = ge->h; |
909 | q->clg.a4 = ge->k1; |
910 | break; |
911 | } |
912 | |
913 | default: |
914 | pr_info("netem: unknown loss type %u\n" , type); |
915 | return -EINVAL; |
916 | } |
917 | } |
918 | |
919 | return 0; |
920 | } |
921 | |
922 | static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { |
923 | [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, |
924 | [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, |
925 | [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, |
926 | [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, |
927 | [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, |
928 | [TCA_NETEM_ECN] = { .type = NLA_U32 }, |
929 | [TCA_NETEM_RATE64] = { .type = NLA_U64 }, |
930 | [TCA_NETEM_LATENCY64] = { .type = NLA_S64 }, |
931 | [TCA_NETEM_JITTER64] = { .type = NLA_S64 }, |
932 | [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) }, |
933 | [TCA_NETEM_PRNG_SEED] = { .type = NLA_U64 }, |
934 | }; |
935 | |
936 | static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, |
937 | const struct nla_policy *policy, int len) |
938 | { |
939 | int nested_len = nla_len(nla) - NLA_ALIGN(len); |
940 | |
941 | if (nested_len < 0) { |
942 | pr_info("netem: invalid attributes len %d\n" , nested_len); |
943 | return -EINVAL; |
944 | } |
945 | |
946 | if (nested_len >= nla_attr_size(payload: 0)) |
947 | return nla_parse_deprecated(tb, maxtype, |
948 | head: nla_data(nla) + NLA_ALIGN(len), |
949 | len: nested_len, policy, NULL); |
950 | |
951 | memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); |
952 | return 0; |
953 | } |
954 | |
955 | /* Parse netlink message to set options */ |
956 | static int netem_change(struct Qdisc *sch, struct nlattr *opt, |
957 | struct netlink_ext_ack *extack) |
958 | { |
959 | struct netem_sched_data *q = qdisc_priv(sch); |
960 | struct nlattr *tb[TCA_NETEM_MAX + 1]; |
961 | struct disttable *delay_dist = NULL; |
962 | struct disttable *slot_dist = NULL; |
963 | struct tc_netem_qopt *qopt; |
964 | struct clgstate old_clg; |
965 | int old_loss_model = CLG_RANDOM; |
966 | int ret; |
967 | |
968 | qopt = nla_data(nla: opt); |
969 | ret = parse_attr(tb, TCA_NETEM_MAX, nla: opt, policy: netem_policy, len: sizeof(*qopt)); |
970 | if (ret < 0) |
971 | return ret; |
972 | |
973 | if (tb[TCA_NETEM_DELAY_DIST]) { |
974 | ret = get_dist_table(tbl: &delay_dist, attr: tb[TCA_NETEM_DELAY_DIST]); |
975 | if (ret) |
976 | goto table_free; |
977 | } |
978 | |
979 | if (tb[TCA_NETEM_SLOT_DIST]) { |
980 | ret = get_dist_table(tbl: &slot_dist, attr: tb[TCA_NETEM_SLOT_DIST]); |
981 | if (ret) |
982 | goto table_free; |
983 | } |
984 | |
985 | sch_tree_lock(q: sch); |
986 | /* backup q->clg and q->loss_model */ |
987 | old_clg = q->clg; |
988 | old_loss_model = q->loss_model; |
989 | |
990 | if (tb[TCA_NETEM_LOSS]) { |
991 | ret = get_loss_clg(q, attr: tb[TCA_NETEM_LOSS]); |
992 | if (ret) { |
993 | q->loss_model = old_loss_model; |
994 | q->clg = old_clg; |
995 | goto unlock; |
996 | } |
997 | } else { |
998 | q->loss_model = CLG_RANDOM; |
999 | } |
1000 | |
1001 | if (delay_dist) |
1002 | swap(q->delay_dist, delay_dist); |
1003 | if (slot_dist) |
1004 | swap(q->slot_dist, slot_dist); |
1005 | sch->limit = qopt->limit; |
1006 | |
1007 | q->latency = PSCHED_TICKS2NS(qopt->latency); |
1008 | q->jitter = PSCHED_TICKS2NS(qopt->jitter); |
1009 | q->limit = qopt->limit; |
1010 | q->gap = qopt->gap; |
1011 | q->counter = 0; |
1012 | q->loss = qopt->loss; |
1013 | q->duplicate = qopt->duplicate; |
1014 | |
1015 | /* for compatibility with earlier versions. |
1016 | * if gap is set, need to assume 100% probability |
1017 | */ |
1018 | if (q->gap) |
1019 | q->reorder = ~0; |
1020 | |
1021 | if (tb[TCA_NETEM_CORR]) |
1022 | get_correlation(q, attr: tb[TCA_NETEM_CORR]); |
1023 | |
1024 | if (tb[TCA_NETEM_REORDER]) |
1025 | get_reorder(q, attr: tb[TCA_NETEM_REORDER]); |
1026 | |
1027 | if (tb[TCA_NETEM_CORRUPT]) |
1028 | get_corrupt(q, attr: tb[TCA_NETEM_CORRUPT]); |
1029 | |
1030 | if (tb[TCA_NETEM_RATE]) |
1031 | get_rate(q, attr: tb[TCA_NETEM_RATE]); |
1032 | |
1033 | if (tb[TCA_NETEM_RATE64]) |
1034 | q->rate = max_t(u64, q->rate, |
1035 | nla_get_u64(tb[TCA_NETEM_RATE64])); |
1036 | |
1037 | if (tb[TCA_NETEM_LATENCY64]) |
1038 | q->latency = nla_get_s64(nla: tb[TCA_NETEM_LATENCY64]); |
1039 | |
1040 | if (tb[TCA_NETEM_JITTER64]) |
1041 | q->jitter = nla_get_s64(nla: tb[TCA_NETEM_JITTER64]); |
1042 | |
1043 | if (tb[TCA_NETEM_ECN]) |
1044 | q->ecn = nla_get_u32(nla: tb[TCA_NETEM_ECN]); |
1045 | |
1046 | if (tb[TCA_NETEM_SLOT]) |
1047 | get_slot(q, attr: tb[TCA_NETEM_SLOT]); |
1048 | |
1049 | /* capping jitter to the range acceptable by tabledist() */ |
1050 | q->jitter = min_t(s64, abs(q->jitter), INT_MAX); |
1051 | |
1052 | if (tb[TCA_NETEM_PRNG_SEED]) |
1053 | q->prng.seed = nla_get_u64(nla: tb[TCA_NETEM_PRNG_SEED]); |
1054 | else |
1055 | q->prng.seed = get_random_u64(); |
1056 | prandom_seed_state(state: &q->prng.prng_state, seed: q->prng.seed); |
1057 | |
1058 | unlock: |
1059 | sch_tree_unlock(q: sch); |
1060 | |
1061 | table_free: |
1062 | dist_free(d: delay_dist); |
1063 | dist_free(d: slot_dist); |
1064 | return ret; |
1065 | } |
1066 | |
1067 | static int netem_init(struct Qdisc *sch, struct nlattr *opt, |
1068 | struct netlink_ext_ack *extack) |
1069 | { |
1070 | struct netem_sched_data *q = qdisc_priv(sch); |
1071 | int ret; |
1072 | |
1073 | qdisc_watchdog_init(wd: &q->watchdog, qdisc: sch); |
1074 | |
1075 | if (!opt) |
1076 | return -EINVAL; |
1077 | |
1078 | q->loss_model = CLG_RANDOM; |
1079 | ret = netem_change(sch, opt, extack); |
1080 | if (ret) |
1081 | pr_info("netem: change failed\n" ); |
1082 | return ret; |
1083 | } |
1084 | |
1085 | static void netem_destroy(struct Qdisc *sch) |
1086 | { |
1087 | struct netem_sched_data *q = qdisc_priv(sch); |
1088 | |
1089 | qdisc_watchdog_cancel(wd: &q->watchdog); |
1090 | if (q->qdisc) |
1091 | qdisc_put(qdisc: q->qdisc); |
1092 | dist_free(d: q->delay_dist); |
1093 | dist_free(d: q->slot_dist); |
1094 | } |
1095 | |
1096 | static int dump_loss_model(const struct netem_sched_data *q, |
1097 | struct sk_buff *skb) |
1098 | { |
1099 | struct nlattr *nest; |
1100 | |
1101 | nest = nla_nest_start_noflag(skb, attrtype: TCA_NETEM_LOSS); |
1102 | if (nest == NULL) |
1103 | goto nla_put_failure; |
1104 | |
1105 | switch (q->loss_model) { |
1106 | case CLG_RANDOM: |
1107 | /* legacy loss model */ |
1108 | nla_nest_cancel(skb, start: nest); |
1109 | return 0; /* no data */ |
1110 | |
1111 | case CLG_4_STATES: { |
1112 | struct tc_netem_gimodel gi = { |
1113 | .p13 = q->clg.a1, |
1114 | .p31 = q->clg.a2, |
1115 | .p32 = q->clg.a3, |
1116 | .p14 = q->clg.a4, |
1117 | .p23 = q->clg.a5, |
1118 | }; |
1119 | |
1120 | if (nla_put(skb, attrtype: NETEM_LOSS_GI, attrlen: sizeof(gi), data: &gi)) |
1121 | goto nla_put_failure; |
1122 | break; |
1123 | } |
1124 | case CLG_GILB_ELL: { |
1125 | struct tc_netem_gemodel ge = { |
1126 | .p = q->clg.a1, |
1127 | .r = q->clg.a2, |
1128 | .h = q->clg.a3, |
1129 | .k1 = q->clg.a4, |
1130 | }; |
1131 | |
1132 | if (nla_put(skb, attrtype: NETEM_LOSS_GE, attrlen: sizeof(ge), data: &ge)) |
1133 | goto nla_put_failure; |
1134 | break; |
1135 | } |
1136 | } |
1137 | |
1138 | nla_nest_end(skb, start: nest); |
1139 | return 0; |
1140 | |
1141 | nla_put_failure: |
1142 | nla_nest_cancel(skb, start: nest); |
1143 | return -1; |
1144 | } |
1145 | |
1146 | static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) |
1147 | { |
1148 | const struct netem_sched_data *q = qdisc_priv(sch); |
1149 | struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb); |
1150 | struct tc_netem_qopt qopt; |
1151 | struct tc_netem_corr cor; |
1152 | struct tc_netem_reorder reorder; |
1153 | struct tc_netem_corrupt corrupt; |
1154 | struct tc_netem_rate rate; |
1155 | struct tc_netem_slot slot; |
1156 | |
1157 | qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency), |
1158 | UINT_MAX); |
1159 | qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter), |
1160 | UINT_MAX); |
1161 | qopt.limit = q->limit; |
1162 | qopt.loss = q->loss; |
1163 | qopt.gap = q->gap; |
1164 | qopt.duplicate = q->duplicate; |
1165 | if (nla_put(skb, attrtype: TCA_OPTIONS, attrlen: sizeof(qopt), data: &qopt)) |
1166 | goto nla_put_failure; |
1167 | |
1168 | if (nla_put(skb, attrtype: TCA_NETEM_LATENCY64, attrlen: sizeof(q->latency), data: &q->latency)) |
1169 | goto nla_put_failure; |
1170 | |
1171 | if (nla_put(skb, attrtype: TCA_NETEM_JITTER64, attrlen: sizeof(q->jitter), data: &q->jitter)) |
1172 | goto nla_put_failure; |
1173 | |
1174 | cor.delay_corr = q->delay_cor.rho; |
1175 | cor.loss_corr = q->loss_cor.rho; |
1176 | cor.dup_corr = q->dup_cor.rho; |
1177 | if (nla_put(skb, attrtype: TCA_NETEM_CORR, attrlen: sizeof(cor), data: &cor)) |
1178 | goto nla_put_failure; |
1179 | |
1180 | reorder.probability = q->reorder; |
1181 | reorder.correlation = q->reorder_cor.rho; |
1182 | if (nla_put(skb, attrtype: TCA_NETEM_REORDER, attrlen: sizeof(reorder), data: &reorder)) |
1183 | goto nla_put_failure; |
1184 | |
1185 | corrupt.probability = q->corrupt; |
1186 | corrupt.correlation = q->corrupt_cor.rho; |
1187 | if (nla_put(skb, attrtype: TCA_NETEM_CORRUPT, attrlen: sizeof(corrupt), data: &corrupt)) |
1188 | goto nla_put_failure; |
1189 | |
1190 | if (q->rate >= (1ULL << 32)) { |
1191 | if (nla_put_u64_64bit(skb, attrtype: TCA_NETEM_RATE64, value: q->rate, |
1192 | padattr: TCA_NETEM_PAD)) |
1193 | goto nla_put_failure; |
1194 | rate.rate = ~0U; |
1195 | } else { |
1196 | rate.rate = q->rate; |
1197 | } |
1198 | rate.packet_overhead = q->packet_overhead; |
1199 | rate.cell_size = q->cell_size; |
1200 | rate.cell_overhead = q->cell_overhead; |
1201 | if (nla_put(skb, attrtype: TCA_NETEM_RATE, attrlen: sizeof(rate), data: &rate)) |
1202 | goto nla_put_failure; |
1203 | |
1204 | if (q->ecn && nla_put_u32(skb, attrtype: TCA_NETEM_ECN, value: q->ecn)) |
1205 | goto nla_put_failure; |
1206 | |
1207 | if (dump_loss_model(q, skb) != 0) |
1208 | goto nla_put_failure; |
1209 | |
1210 | if (q->slot_config.min_delay | q->slot_config.max_delay | |
1211 | q->slot_config.dist_jitter) { |
1212 | slot = q->slot_config; |
1213 | if (slot.max_packets == INT_MAX) |
1214 | slot.max_packets = 0; |
1215 | if (slot.max_bytes == INT_MAX) |
1216 | slot.max_bytes = 0; |
1217 | if (nla_put(skb, attrtype: TCA_NETEM_SLOT, attrlen: sizeof(slot), data: &slot)) |
1218 | goto nla_put_failure; |
1219 | } |
1220 | |
1221 | if (nla_put_u64_64bit(skb, attrtype: TCA_NETEM_PRNG_SEED, value: q->prng.seed, |
1222 | padattr: TCA_NETEM_PAD)) |
1223 | goto nla_put_failure; |
1224 | |
1225 | return nla_nest_end(skb, start: nla); |
1226 | |
1227 | nla_put_failure: |
1228 | nlmsg_trim(skb, mark: nla); |
1229 | return -1; |
1230 | } |
1231 | |
1232 | static int netem_dump_class(struct Qdisc *sch, unsigned long cl, |
1233 | struct sk_buff *skb, struct tcmsg *tcm) |
1234 | { |
1235 | struct netem_sched_data *q = qdisc_priv(sch); |
1236 | |
1237 | if (cl != 1 || !q->qdisc) /* only one class */ |
1238 | return -ENOENT; |
1239 | |
1240 | tcm->tcm_handle |= TC_H_MIN(1); |
1241 | tcm->tcm_info = q->qdisc->handle; |
1242 | |
1243 | return 0; |
1244 | } |
1245 | |
1246 | static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, |
1247 | struct Qdisc **old, struct netlink_ext_ack *extack) |
1248 | { |
1249 | struct netem_sched_data *q = qdisc_priv(sch); |
1250 | |
1251 | *old = qdisc_replace(sch, new, pold: &q->qdisc); |
1252 | return 0; |
1253 | } |
1254 | |
1255 | static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg) |
1256 | { |
1257 | struct netem_sched_data *q = qdisc_priv(sch); |
1258 | return q->qdisc; |
1259 | } |
1260 | |
1261 | static unsigned long netem_find(struct Qdisc *sch, u32 classid) |
1262 | { |
1263 | return 1; |
1264 | } |
1265 | |
1266 | static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) |
1267 | { |
1268 | if (!walker->stop) { |
1269 | if (!tc_qdisc_stats_dump(sch, cl: 1, arg: walker)) |
1270 | return; |
1271 | } |
1272 | } |
1273 | |
1274 | static const struct Qdisc_class_ops netem_class_ops = { |
1275 | .graft = netem_graft, |
1276 | .leaf = netem_leaf, |
1277 | .find = netem_find, |
1278 | .walk = netem_walk, |
1279 | .dump = netem_dump_class, |
1280 | }; |
1281 | |
1282 | static struct Qdisc_ops netem_qdisc_ops __read_mostly = { |
1283 | .id = "netem" , |
1284 | .cl_ops = &netem_class_ops, |
1285 | .priv_size = sizeof(struct netem_sched_data), |
1286 | .enqueue = netem_enqueue, |
1287 | .dequeue = netem_dequeue, |
1288 | .peek = qdisc_peek_dequeued, |
1289 | .init = netem_init, |
1290 | .reset = netem_reset, |
1291 | .destroy = netem_destroy, |
1292 | .change = netem_change, |
1293 | .dump = netem_dump, |
1294 | .owner = THIS_MODULE, |
1295 | }; |
1296 | |
1297 | |
1298 | static int __init netem_module_init(void) |
1299 | { |
1300 | pr_info("netem: version " VERSION "\n" ); |
1301 | return register_qdisc(qops: &netem_qdisc_ops); |
1302 | } |
1303 | static void __exit netem_module_exit(void) |
1304 | { |
1305 | unregister_qdisc(qops: &netem_qdisc_ops); |
1306 | } |
1307 | module_init(netem_module_init) |
1308 | module_exit(netem_module_exit) |
1309 | MODULE_LICENSE("GPL" ); |
1310 | |