1 | /* |
2 | * TCP HYBLA |
3 | * |
4 | * TCP-HYBLA Congestion control algorithm, based on: |
5 | * C.Caini, R.Firrincieli, "TCP-Hybla: A TCP Enhancement |
6 | * for Heterogeneous Networks", |
7 | * International Journal on satellite Communications, |
8 | * September 2004 |
9 | * Daniele Lacamera |
10 | * root at danielinux.net |
11 | */ |
12 | |
13 | #include <linux/module.h> |
14 | #include <net/tcp.h> |
15 | |
16 | /* Tcp Hybla structure. */ |
17 | struct hybla { |
18 | bool hybla_en; |
19 | u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */ |
20 | u32 rho; /* Rho parameter, integer part */ |
21 | u32 rho2; /* Rho * Rho, integer part */ |
22 | u32 rho_3ls; /* Rho parameter, <<3 */ |
23 | u32 rho2_7ls; /* Rho^2, <<7 */ |
24 | u32 minrtt_us; /* Minimum smoothed round trip time value seen */ |
25 | }; |
26 | |
27 | /* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */ |
28 | static int rtt0 = 25; |
29 | module_param(rtt0, int, 0644); |
30 | MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)" ); |
31 | |
32 | /* This is called to refresh values for hybla parameters */ |
33 | static inline void hybla_recalc_param (struct sock *sk) |
34 | { |
35 | struct hybla *ca = inet_csk_ca(sk); |
36 | |
37 | ca->rho_3ls = max_t(u32, |
38 | tcp_sk(sk)->srtt_us / (rtt0 * USEC_PER_MSEC), |
39 | 8U); |
40 | ca->rho = ca->rho_3ls >> 3; |
41 | ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1; |
42 | ca->rho2 = ca->rho2_7ls >> 7; |
43 | } |
44 | |
45 | static void hybla_init(struct sock *sk) |
46 | { |
47 | struct tcp_sock *tp = tcp_sk(sk); |
48 | struct hybla *ca = inet_csk_ca(sk); |
49 | |
50 | ca->rho = 0; |
51 | ca->rho2 = 0; |
52 | ca->rho_3ls = 0; |
53 | ca->rho2_7ls = 0; |
54 | ca->snd_cwnd_cents = 0; |
55 | ca->hybla_en = true; |
56 | tp->snd_cwnd = 2; |
57 | tp->snd_cwnd_clamp = 65535; |
58 | |
59 | /* 1st Rho measurement based on initial srtt */ |
60 | hybla_recalc_param(sk); |
61 | |
62 | /* set minimum rtt as this is the 1st ever seen */ |
63 | ca->minrtt_us = tp->srtt_us; |
64 | tp->snd_cwnd = ca->rho; |
65 | } |
66 | |
67 | static void hybla_state(struct sock *sk, u8 ca_state) |
68 | { |
69 | struct hybla *ca = inet_csk_ca(sk); |
70 | |
71 | ca->hybla_en = (ca_state == TCP_CA_Open); |
72 | } |
73 | |
74 | static inline u32 hybla_fraction(u32 odds) |
75 | { |
76 | static const u32 fractions[] = { |
77 | 128, 139, 152, 165, 181, 197, 215, 234, |
78 | }; |
79 | |
80 | return (odds < ARRAY_SIZE(fractions)) ? fractions[odds] : 128; |
81 | } |
82 | |
83 | /* TCP Hybla main routine. |
84 | * This is the algorithm behavior: |
85 | * o Recalc Hybla parameters if min_rtt has changed |
86 | * o Give cwnd a new value based on the model proposed |
87 | * o remember increments <1 |
88 | */ |
89 | static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked) |
90 | { |
91 | struct tcp_sock *tp = tcp_sk(sk); |
92 | struct hybla *ca = inet_csk_ca(sk); |
93 | u32 increment, odd, rho_fractions; |
94 | int is_slowstart = 0; |
95 | |
96 | /* Recalculate rho only if this srtt is the lowest */ |
97 | if (tp->srtt_us < ca->minrtt_us) { |
98 | hybla_recalc_param(sk); |
99 | ca->minrtt_us = tp->srtt_us; |
100 | } |
101 | |
102 | if (!tcp_is_cwnd_limited(sk)) |
103 | return; |
104 | |
105 | if (!ca->hybla_en) { |
106 | tcp_reno_cong_avoid(sk, ack, acked); |
107 | return; |
108 | } |
109 | |
110 | if (ca->rho == 0) |
111 | hybla_recalc_param(sk); |
112 | |
113 | rho_fractions = ca->rho_3ls - (ca->rho << 3); |
114 | |
115 | if (tcp_in_slow_start(tp)) { |
116 | /* |
117 | * slow start |
118 | * INC = 2^RHO - 1 |
119 | * This is done by splitting the rho parameter |
120 | * into 2 parts: an integer part and a fraction part. |
121 | * Inrement<<7 is estimated by doing: |
122 | * [2^(int+fract)]<<7 |
123 | * that is equal to: |
124 | * (2^int) * [(2^fract) <<7] |
125 | * 2^int is straightly computed as 1<<int, |
126 | * while we will use hybla_slowstart_fraction_increment() to |
127 | * calculate 2^fract in a <<7 value. |
128 | */ |
129 | is_slowstart = 1; |
130 | increment = ((1 << min(ca->rho, 16U)) * |
131 | hybla_fraction(rho_fractions)) - 128; |
132 | } else { |
133 | /* |
134 | * congestion avoidance |
135 | * INC = RHO^2 / W |
136 | * as long as increment is estimated as (rho<<7)/window |
137 | * it already is <<7 and we can easily count its fractions. |
138 | */ |
139 | increment = ca->rho2_7ls / tp->snd_cwnd; |
140 | if (increment < 128) |
141 | tp->snd_cwnd_cnt++; |
142 | } |
143 | |
144 | odd = increment % 128; |
145 | tp->snd_cwnd += increment >> 7; |
146 | ca->snd_cwnd_cents += odd; |
147 | |
148 | /* check when fractions goes >=128 and increase cwnd by 1. */ |
149 | while (ca->snd_cwnd_cents >= 128) { |
150 | tp->snd_cwnd++; |
151 | ca->snd_cwnd_cents -= 128; |
152 | tp->snd_cwnd_cnt = 0; |
153 | } |
154 | /* check when cwnd has not been incremented for a while */ |
155 | if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tp->snd_cwnd) { |
156 | tp->snd_cwnd++; |
157 | tp->snd_cwnd_cnt = 0; |
158 | } |
159 | /* clamp down slowstart cwnd to ssthresh value. */ |
160 | if (is_slowstart) |
161 | tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); |
162 | |
163 | tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp); |
164 | } |
165 | |
166 | static struct tcp_congestion_ops tcp_hybla __read_mostly = { |
167 | .init = hybla_init, |
168 | .ssthresh = tcp_reno_ssthresh, |
169 | .undo_cwnd = tcp_reno_undo_cwnd, |
170 | .cong_avoid = hybla_cong_avoid, |
171 | .set_state = hybla_state, |
172 | |
173 | .owner = THIS_MODULE, |
174 | .name = "hybla" |
175 | }; |
176 | |
177 | static int __init hybla_register(void) |
178 | { |
179 | BUILD_BUG_ON(sizeof(struct hybla) > ICSK_CA_PRIV_SIZE); |
180 | return tcp_register_congestion_control(&tcp_hybla); |
181 | } |
182 | |
183 | static void __exit hybla_unregister(void) |
184 | { |
185 | tcp_unregister_congestion_control(&tcp_hybla); |
186 | } |
187 | |
188 | module_init(hybla_register); |
189 | module_exit(hybla_unregister); |
190 | |
191 | MODULE_AUTHOR("Daniele Lacamera" ); |
192 | MODULE_LICENSE("GPL" ); |
193 | MODULE_DESCRIPTION("TCP Hybla" ); |
194 | |