1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* MHI Network driver - Network over MHI bus |
3 | * |
4 | * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org> |
5 | */ |
6 | |
7 | #include <linux/if_arp.h> |
8 | #include <linux/mhi.h> |
9 | #include <linux/mod_devicetable.h> |
10 | #include <linux/module.h> |
11 | #include <linux/netdevice.h> |
12 | #include <linux/skbuff.h> |
13 | #include <linux/u64_stats_sync.h> |
14 | |
15 | #define MHI_NET_MIN_MTU ETH_MIN_MTU |
16 | #define MHI_NET_MAX_MTU 0xffff |
17 | #define MHI_NET_DEFAULT_MTU 0x4000 |
18 | |
19 | struct mhi_net_stats { |
20 | u64_stats_t rx_packets; |
21 | u64_stats_t rx_bytes; |
22 | u64_stats_t rx_errors; |
23 | u64_stats_t tx_packets; |
24 | u64_stats_t tx_bytes; |
25 | u64_stats_t tx_errors; |
26 | u64_stats_t tx_dropped; |
27 | struct u64_stats_sync tx_syncp; |
28 | struct u64_stats_sync rx_syncp; |
29 | }; |
30 | |
31 | struct mhi_net_dev { |
32 | struct mhi_device *mdev; |
33 | struct net_device *ndev; |
34 | struct sk_buff *skbagg_head; |
35 | struct sk_buff *skbagg_tail; |
36 | struct delayed_work rx_refill; |
37 | struct mhi_net_stats stats; |
38 | u32 rx_queue_sz; |
39 | int msg_enable; |
40 | unsigned int mru; |
41 | }; |
42 | |
43 | struct mhi_device_info { |
44 | const char *netname; |
45 | }; |
46 | |
47 | static int mhi_ndo_open(struct net_device *ndev) |
48 | { |
49 | struct mhi_net_dev *mhi_netdev = netdev_priv(dev: ndev); |
50 | |
51 | /* Feed the rx buffer pool */ |
52 | schedule_delayed_work(dwork: &mhi_netdev->rx_refill, delay: 0); |
53 | |
54 | /* Carrier is established via out-of-band channel (e.g. qmi) */ |
55 | netif_carrier_on(dev: ndev); |
56 | |
57 | netif_start_queue(dev: ndev); |
58 | |
59 | return 0; |
60 | } |
61 | |
62 | static int mhi_ndo_stop(struct net_device *ndev) |
63 | { |
64 | struct mhi_net_dev *mhi_netdev = netdev_priv(dev: ndev); |
65 | |
66 | netif_stop_queue(dev: ndev); |
67 | netif_carrier_off(dev: ndev); |
68 | cancel_delayed_work_sync(dwork: &mhi_netdev->rx_refill); |
69 | |
70 | return 0; |
71 | } |
72 | |
73 | static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) |
74 | { |
75 | struct mhi_net_dev *mhi_netdev = netdev_priv(dev: ndev); |
76 | struct mhi_device *mdev = mhi_netdev->mdev; |
77 | int err; |
78 | |
79 | err = mhi_queue_skb(mhi_dev: mdev, dir: DMA_TO_DEVICE, skb, len: skb->len, mflags: MHI_EOT); |
80 | if (unlikely(err)) { |
81 | net_err_ratelimited("%s: Failed to queue TX buf (%d)\n" , |
82 | ndev->name, err); |
83 | dev_kfree_skb_any(skb); |
84 | goto exit_drop; |
85 | } |
86 | |
87 | if (mhi_queue_is_full(mhi_dev: mdev, dir: DMA_TO_DEVICE)) |
88 | netif_stop_queue(dev: ndev); |
89 | |
90 | return NETDEV_TX_OK; |
91 | |
92 | exit_drop: |
93 | u64_stats_update_begin(syncp: &mhi_netdev->stats.tx_syncp); |
94 | u64_stats_inc(p: &mhi_netdev->stats.tx_dropped); |
95 | u64_stats_update_end(syncp: &mhi_netdev->stats.tx_syncp); |
96 | |
97 | return NETDEV_TX_OK; |
98 | } |
99 | |
100 | static void mhi_ndo_get_stats64(struct net_device *ndev, |
101 | struct rtnl_link_stats64 *stats) |
102 | { |
103 | struct mhi_net_dev *mhi_netdev = netdev_priv(dev: ndev); |
104 | unsigned int start; |
105 | |
106 | do { |
107 | start = u64_stats_fetch_begin(syncp: &mhi_netdev->stats.rx_syncp); |
108 | stats->rx_packets = u64_stats_read(p: &mhi_netdev->stats.rx_packets); |
109 | stats->rx_bytes = u64_stats_read(p: &mhi_netdev->stats.rx_bytes); |
110 | stats->rx_errors = u64_stats_read(p: &mhi_netdev->stats.rx_errors); |
111 | } while (u64_stats_fetch_retry(syncp: &mhi_netdev->stats.rx_syncp, start)); |
112 | |
113 | do { |
114 | start = u64_stats_fetch_begin(syncp: &mhi_netdev->stats.tx_syncp); |
115 | stats->tx_packets = u64_stats_read(p: &mhi_netdev->stats.tx_packets); |
116 | stats->tx_bytes = u64_stats_read(p: &mhi_netdev->stats.tx_bytes); |
117 | stats->tx_errors = u64_stats_read(p: &mhi_netdev->stats.tx_errors); |
118 | stats->tx_dropped = u64_stats_read(p: &mhi_netdev->stats.tx_dropped); |
119 | } while (u64_stats_fetch_retry(syncp: &mhi_netdev->stats.tx_syncp, start)); |
120 | } |
121 | |
122 | static const struct net_device_ops mhi_netdev_ops = { |
123 | .ndo_open = mhi_ndo_open, |
124 | .ndo_stop = mhi_ndo_stop, |
125 | .ndo_start_xmit = mhi_ndo_xmit, |
126 | .ndo_get_stats64 = mhi_ndo_get_stats64, |
127 | }; |
128 | |
129 | static void mhi_net_setup(struct net_device *ndev) |
130 | { |
131 | ndev->header_ops = NULL; /* No header */ |
132 | ndev->type = ARPHRD_RAWIP; |
133 | ndev->hard_header_len = 0; |
134 | ndev->addr_len = 0; |
135 | ndev->flags = IFF_POINTOPOINT | IFF_NOARP; |
136 | ndev->netdev_ops = &mhi_netdev_ops; |
137 | ndev->mtu = MHI_NET_DEFAULT_MTU; |
138 | ndev->min_mtu = MHI_NET_MIN_MTU; |
139 | ndev->max_mtu = MHI_NET_MAX_MTU; |
140 | ndev->tx_queue_len = 1000; |
141 | } |
142 | |
143 | static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev, |
144 | struct sk_buff *skb) |
145 | { |
146 | struct sk_buff *head = mhi_netdev->skbagg_head; |
147 | struct sk_buff *tail = mhi_netdev->skbagg_tail; |
148 | |
149 | /* This is non-paged skb chaining using frag_list */ |
150 | if (!head) { |
151 | mhi_netdev->skbagg_head = skb; |
152 | return skb; |
153 | } |
154 | |
155 | if (!skb_shinfo(head)->frag_list) |
156 | skb_shinfo(head)->frag_list = skb; |
157 | else |
158 | tail->next = skb; |
159 | |
160 | head->len += skb->len; |
161 | head->data_len += skb->len; |
162 | head->truesize += skb->truesize; |
163 | |
164 | mhi_netdev->skbagg_tail = skb; |
165 | |
166 | return mhi_netdev->skbagg_head; |
167 | } |
168 | |
169 | static void mhi_net_dl_callback(struct mhi_device *mhi_dev, |
170 | struct mhi_result *mhi_res) |
171 | { |
172 | struct mhi_net_dev *mhi_netdev = dev_get_drvdata(dev: &mhi_dev->dev); |
173 | struct sk_buff *skb = mhi_res->buf_addr; |
174 | int free_desc_count; |
175 | |
176 | free_desc_count = mhi_get_free_desc_count(mhi_dev, dir: DMA_FROM_DEVICE); |
177 | |
178 | if (unlikely(mhi_res->transaction_status)) { |
179 | switch (mhi_res->transaction_status) { |
180 | case -EOVERFLOW: |
181 | /* Packet can not fit in one MHI buffer and has been |
182 | * split over multiple MHI transfers, do re-aggregation. |
183 | * That usually means the device side MTU is larger than |
184 | * the host side MTU/MRU. Since this is not optimal, |
185 | * print a warning (once). |
186 | */ |
187 | netdev_warn_once(mhi_netdev->ndev, |
188 | "Fragmented packets received, fix MTU?\n" ); |
189 | skb_put(skb, len: mhi_res->bytes_xferd); |
190 | mhi_net_skb_agg(mhi_netdev, skb); |
191 | break; |
192 | case -ENOTCONN: |
193 | /* MHI layer stopping/resetting the DL channel */ |
194 | dev_kfree_skb_any(skb); |
195 | return; |
196 | default: |
197 | /* Unknown error, simply drop */ |
198 | dev_kfree_skb_any(skb); |
199 | u64_stats_update_begin(syncp: &mhi_netdev->stats.rx_syncp); |
200 | u64_stats_inc(p: &mhi_netdev->stats.rx_errors); |
201 | u64_stats_update_end(syncp: &mhi_netdev->stats.rx_syncp); |
202 | } |
203 | } else { |
204 | skb_put(skb, len: mhi_res->bytes_xferd); |
205 | |
206 | if (mhi_netdev->skbagg_head) { |
207 | /* Aggregate the final fragment */ |
208 | skb = mhi_net_skb_agg(mhi_netdev, skb); |
209 | mhi_netdev->skbagg_head = NULL; |
210 | } |
211 | |
212 | switch (skb->data[0] & 0xf0) { |
213 | case 0x40: |
214 | skb->protocol = htons(ETH_P_IP); |
215 | break; |
216 | case 0x60: |
217 | skb->protocol = htons(ETH_P_IPV6); |
218 | break; |
219 | default: |
220 | skb->protocol = htons(ETH_P_MAP); |
221 | break; |
222 | } |
223 | |
224 | u64_stats_update_begin(syncp: &mhi_netdev->stats.rx_syncp); |
225 | u64_stats_inc(p: &mhi_netdev->stats.rx_packets); |
226 | u64_stats_add(p: &mhi_netdev->stats.rx_bytes, val: skb->len); |
227 | u64_stats_update_end(syncp: &mhi_netdev->stats.rx_syncp); |
228 | __netif_rx(skb); |
229 | } |
230 | |
231 | /* Refill if RX buffers queue becomes low */ |
232 | if (free_desc_count >= mhi_netdev->rx_queue_sz / 2) |
233 | schedule_delayed_work(dwork: &mhi_netdev->rx_refill, delay: 0); |
234 | } |
235 | |
236 | static void mhi_net_ul_callback(struct mhi_device *mhi_dev, |
237 | struct mhi_result *mhi_res) |
238 | { |
239 | struct mhi_net_dev *mhi_netdev = dev_get_drvdata(dev: &mhi_dev->dev); |
240 | struct net_device *ndev = mhi_netdev->ndev; |
241 | struct mhi_device *mdev = mhi_netdev->mdev; |
242 | struct sk_buff *skb = mhi_res->buf_addr; |
243 | |
244 | /* Hardware has consumed the buffer, so free the skb (which is not |
245 | * freed by the MHI stack) and perform accounting. |
246 | */ |
247 | dev_consume_skb_any(skb); |
248 | |
249 | u64_stats_update_begin(syncp: &mhi_netdev->stats.tx_syncp); |
250 | if (unlikely(mhi_res->transaction_status)) { |
251 | /* MHI layer stopping/resetting the UL channel */ |
252 | if (mhi_res->transaction_status == -ENOTCONN) { |
253 | u64_stats_update_end(syncp: &mhi_netdev->stats.tx_syncp); |
254 | return; |
255 | } |
256 | |
257 | u64_stats_inc(p: &mhi_netdev->stats.tx_errors); |
258 | } else { |
259 | u64_stats_inc(p: &mhi_netdev->stats.tx_packets); |
260 | u64_stats_add(p: &mhi_netdev->stats.tx_bytes, val: mhi_res->bytes_xferd); |
261 | } |
262 | u64_stats_update_end(syncp: &mhi_netdev->stats.tx_syncp); |
263 | |
264 | if (netif_queue_stopped(dev: ndev) && !mhi_queue_is_full(mhi_dev: mdev, dir: DMA_TO_DEVICE)) |
265 | netif_wake_queue(dev: ndev); |
266 | } |
267 | |
268 | static void mhi_net_rx_refill_work(struct work_struct *work) |
269 | { |
270 | struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev, |
271 | rx_refill.work); |
272 | struct net_device *ndev = mhi_netdev->ndev; |
273 | struct mhi_device *mdev = mhi_netdev->mdev; |
274 | struct sk_buff *skb; |
275 | unsigned int size; |
276 | int err; |
277 | |
278 | size = mhi_netdev->mru ? mhi_netdev->mru : READ_ONCE(ndev->mtu); |
279 | |
280 | while (!mhi_queue_is_full(mhi_dev: mdev, dir: DMA_FROM_DEVICE)) { |
281 | skb = netdev_alloc_skb(dev: ndev, length: size); |
282 | if (unlikely(!skb)) |
283 | break; |
284 | |
285 | err = mhi_queue_skb(mhi_dev: mdev, dir: DMA_FROM_DEVICE, skb, len: size, mflags: MHI_EOT); |
286 | if (unlikely(err)) { |
287 | net_err_ratelimited("%s: Failed to queue RX buf (%d)\n" , |
288 | ndev->name, err); |
289 | kfree_skb(skb); |
290 | break; |
291 | } |
292 | |
293 | /* Do not hog the CPU if rx buffers are consumed faster than |
294 | * queued (unlikely). |
295 | */ |
296 | cond_resched(); |
297 | } |
298 | |
299 | /* If we're still starved of rx buffers, reschedule later */ |
300 | if (mhi_get_free_desc_count(mhi_dev: mdev, dir: DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz) |
301 | schedule_delayed_work(dwork: &mhi_netdev->rx_refill, HZ / 2); |
302 | } |
303 | |
304 | static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev) |
305 | { |
306 | struct mhi_net_dev *mhi_netdev; |
307 | int err; |
308 | |
309 | mhi_netdev = netdev_priv(dev: ndev); |
310 | |
311 | dev_set_drvdata(dev: &mhi_dev->dev, data: mhi_netdev); |
312 | mhi_netdev->ndev = ndev; |
313 | mhi_netdev->mdev = mhi_dev; |
314 | mhi_netdev->skbagg_head = NULL; |
315 | mhi_netdev->mru = mhi_dev->mhi_cntrl->mru; |
316 | |
317 | INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work); |
318 | u64_stats_init(syncp: &mhi_netdev->stats.rx_syncp); |
319 | u64_stats_init(syncp: &mhi_netdev->stats.tx_syncp); |
320 | |
321 | /* Start MHI channels */ |
322 | err = mhi_prepare_for_transfer(mhi_dev); |
323 | if (err) |
324 | return err; |
325 | |
326 | /* Number of transfer descriptors determines size of the queue */ |
327 | mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, dir: DMA_FROM_DEVICE); |
328 | |
329 | err = register_netdev(dev: ndev); |
330 | if (err) |
331 | return err; |
332 | |
333 | return 0; |
334 | } |
335 | |
336 | static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev) |
337 | { |
338 | struct mhi_net_dev *mhi_netdev = netdev_priv(dev: ndev); |
339 | |
340 | unregister_netdev(dev: ndev); |
341 | |
342 | mhi_unprepare_from_transfer(mhi_dev); |
343 | |
344 | kfree_skb(skb: mhi_netdev->skbagg_head); |
345 | |
346 | free_netdev(dev: ndev); |
347 | |
348 | dev_set_drvdata(dev: &mhi_dev->dev, NULL); |
349 | } |
350 | |
351 | static int mhi_net_probe(struct mhi_device *mhi_dev, |
352 | const struct mhi_device_id *id) |
353 | { |
354 | const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data; |
355 | struct net_device *ndev; |
356 | int err; |
357 | |
358 | ndev = alloc_netdev(sizeof(struct mhi_net_dev), info->netname, |
359 | NET_NAME_PREDICTABLE, mhi_net_setup); |
360 | if (!ndev) |
361 | return -ENOMEM; |
362 | |
363 | SET_NETDEV_DEV(ndev, &mhi_dev->dev); |
364 | |
365 | err = mhi_net_newlink(mhi_dev, ndev); |
366 | if (err) { |
367 | free_netdev(dev: ndev); |
368 | return err; |
369 | } |
370 | |
371 | return 0; |
372 | } |
373 | |
374 | static void mhi_net_remove(struct mhi_device *mhi_dev) |
375 | { |
376 | struct mhi_net_dev *mhi_netdev = dev_get_drvdata(dev: &mhi_dev->dev); |
377 | |
378 | mhi_net_dellink(mhi_dev, ndev: mhi_netdev->ndev); |
379 | } |
380 | |
381 | static const struct mhi_device_info mhi_hwip0 = { |
382 | .netname = "mhi_hwip%d" , |
383 | }; |
384 | |
385 | static const struct mhi_device_info mhi_swip0 = { |
386 | .netname = "mhi_swip%d" , |
387 | }; |
388 | |
389 | static const struct mhi_device_id mhi_net_id_table[] = { |
390 | /* Hardware accelerated data PATH (to modem IPA), protocol agnostic */ |
391 | { .chan = "IP_HW0" , .driver_data = (kernel_ulong_t)&mhi_hwip0 }, |
392 | /* Software data PATH (to modem CPU) */ |
393 | { .chan = "IP_SW0" , .driver_data = (kernel_ulong_t)&mhi_swip0 }, |
394 | {} |
395 | }; |
396 | MODULE_DEVICE_TABLE(mhi, mhi_net_id_table); |
397 | |
398 | static struct mhi_driver mhi_net_driver = { |
399 | .probe = mhi_net_probe, |
400 | .remove = mhi_net_remove, |
401 | .dl_xfer_cb = mhi_net_dl_callback, |
402 | .ul_xfer_cb = mhi_net_ul_callback, |
403 | .id_table = mhi_net_id_table, |
404 | .driver = { |
405 | .name = "mhi_net" , |
406 | }, |
407 | }; |
408 | |
409 | module_mhi_driver(mhi_net_driver); |
410 | |
411 | MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>" ); |
412 | MODULE_DESCRIPTION("Network over MHI" ); |
413 | MODULE_LICENSE("GPL v2" ); |
414 | |