1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (c) 2014 Protonic Holland, |
3 | * David Jander |
4 | * Copyright (C) 2014-2021, 2023 Pengutronix, |
5 | * Marc Kleine-Budde <kernel@pengutronix.de> |
6 | */ |
7 | |
8 | #include <linux/can/dev.h> |
9 | #include <linux/can/rx-offload.h> |
10 | |
11 | struct can_rx_offload_cb { |
12 | u32 timestamp; |
13 | }; |
14 | |
15 | static inline struct can_rx_offload_cb * |
16 | can_rx_offload_get_cb(struct sk_buff *skb) |
17 | { |
18 | BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb)); |
19 | |
20 | return (struct can_rx_offload_cb *)skb->cb; |
21 | } |
22 | |
23 | static inline bool |
24 | can_rx_offload_le(struct can_rx_offload *offload, |
25 | unsigned int a, unsigned int b) |
26 | { |
27 | if (offload->inc) |
28 | return a <= b; |
29 | else |
30 | return a >= b; |
31 | } |
32 | |
33 | static inline unsigned int |
34 | can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val) |
35 | { |
36 | if (offload->inc) |
37 | return (*val)++; |
38 | else |
39 | return (*val)--; |
40 | } |
41 | |
42 | static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) |
43 | { |
44 | struct can_rx_offload *offload = container_of(napi, |
45 | struct can_rx_offload, |
46 | napi); |
47 | struct net_device *dev = offload->dev; |
48 | struct net_device_stats *stats = &dev->stats; |
49 | struct sk_buff *skb; |
50 | int work_done = 0; |
51 | |
52 | while ((work_done < quota) && |
53 | (skb = skb_dequeue(list: &offload->skb_queue))) { |
54 | struct can_frame *cf = (struct can_frame *)skb->data; |
55 | |
56 | work_done++; |
57 | if (!(cf->can_id & CAN_ERR_FLAG)) { |
58 | stats->rx_packets++; |
59 | if (!(cf->can_id & CAN_RTR_FLAG)) |
60 | stats->rx_bytes += cf->len; |
61 | } |
62 | netif_receive_skb(skb); |
63 | } |
64 | |
65 | if (work_done < quota) { |
66 | napi_complete_done(n: napi, work_done); |
67 | |
68 | /* Check if there was another interrupt */ |
69 | if (!skb_queue_empty(list: &offload->skb_queue)) |
70 | napi_schedule(n: &offload->napi); |
71 | } |
72 | |
73 | return work_done; |
74 | } |
75 | |
76 | static inline void |
77 | __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, |
78 | int (*compare)(struct sk_buff *a, struct sk_buff *b)) |
79 | { |
80 | struct sk_buff *pos, *insert = NULL; |
81 | |
82 | skb_queue_reverse_walk(head, pos) { |
83 | const struct can_rx_offload_cb *cb_pos, *cb_new; |
84 | |
85 | cb_pos = can_rx_offload_get_cb(skb: pos); |
86 | cb_new = can_rx_offload_get_cb(skb: new); |
87 | |
88 | netdev_dbg(new->dev, |
89 | "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n" , |
90 | __func__, |
91 | cb_pos->timestamp, cb_new->timestamp, |
92 | cb_new->timestamp - cb_pos->timestamp, |
93 | skb_queue_len(head)); |
94 | |
95 | if (compare(pos, new) < 0) |
96 | continue; |
97 | insert = pos; |
98 | break; |
99 | } |
100 | if (!insert) |
101 | __skb_queue_head(list: head, newsk: new); |
102 | else |
103 | __skb_queue_after(list: head, prev: insert, newsk: new); |
104 | } |
105 | |
106 | static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b) |
107 | { |
108 | const struct can_rx_offload_cb *cb_a, *cb_b; |
109 | |
110 | cb_a = can_rx_offload_get_cb(skb: a); |
111 | cb_b = can_rx_offload_get_cb(skb: b); |
112 | |
113 | /* Subtract two u32 and return result as int, to keep |
114 | * difference steady around the u32 overflow. |
115 | */ |
116 | return cb_b->timestamp - cb_a->timestamp; |
117 | } |
118 | |
119 | /** |
120 | * can_rx_offload_offload_one() - Read one CAN frame from HW |
121 | * @offload: pointer to rx_offload context |
122 | * @n: number of mailbox to read |
123 | * |
124 | * The task of this function is to read a CAN frame from mailbox @n |
125 | * from the device and return the mailbox's content as a struct |
126 | * sk_buff. |
127 | * |
128 | * If the struct can_rx_offload::skb_queue exceeds the maximal queue |
129 | * length (struct can_rx_offload::skb_queue_len_max) or no skb can be |
130 | * allocated, the mailbox contents is discarded by reading it into an |
131 | * overflow buffer. This way the mailbox is marked as free by the |
132 | * driver. |
133 | * |
134 | * Return: A pointer to skb containing the CAN frame on success. |
135 | * |
136 | * NULL if the mailbox @n is empty. |
137 | * |
138 | * ERR_PTR() in case of an error |
139 | */ |
140 | static struct sk_buff * |
141 | can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) |
142 | { |
143 | struct sk_buff *skb; |
144 | struct can_rx_offload_cb *cb; |
145 | bool drop = false; |
146 | u32 timestamp; |
147 | |
148 | /* If queue is full drop frame */ |
149 | if (unlikely(skb_queue_len(&offload->skb_queue) > |
150 | offload->skb_queue_len_max)) |
151 | drop = true; |
152 | |
153 | skb = offload->mailbox_read(offload, n, ×tamp, drop); |
154 | /* Mailbox was empty. */ |
155 | if (unlikely(!skb)) |
156 | return NULL; |
157 | |
158 | /* There was a problem reading the mailbox, propagate |
159 | * error value. |
160 | */ |
161 | if (IS_ERR(ptr: skb)) { |
162 | offload->dev->stats.rx_dropped++; |
163 | offload->dev->stats.rx_fifo_errors++; |
164 | |
165 | return skb; |
166 | } |
167 | |
168 | /* Mailbox was read. */ |
169 | cb = can_rx_offload_get_cb(skb); |
170 | cb->timestamp = timestamp; |
171 | |
172 | return skb; |
173 | } |
174 | |
175 | int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, |
176 | u64 pending) |
177 | { |
178 | unsigned int i; |
179 | int received = 0; |
180 | |
181 | for (i = offload->mb_first; |
182 | can_rx_offload_le(offload, a: i, b: offload->mb_last); |
183 | can_rx_offload_inc(offload, val: &i)) { |
184 | struct sk_buff *skb; |
185 | |
186 | if (!(pending & BIT_ULL(i))) |
187 | continue; |
188 | |
189 | skb = can_rx_offload_offload_one(offload, n: i); |
190 | if (IS_ERR_OR_NULL(ptr: skb)) |
191 | continue; |
192 | |
193 | __skb_queue_add_sort(head: &offload->skb_irq_queue, new: skb, |
194 | compare: can_rx_offload_compare); |
195 | received++; |
196 | } |
197 | |
198 | return received; |
199 | } |
200 | EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp); |
201 | |
202 | int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) |
203 | { |
204 | struct sk_buff *skb; |
205 | int received = 0; |
206 | |
207 | while (1) { |
208 | skb = can_rx_offload_offload_one(offload, n: 0); |
209 | if (IS_ERR(ptr: skb)) |
210 | continue; |
211 | if (!skb) |
212 | break; |
213 | |
214 | __skb_queue_tail(list: &offload->skb_irq_queue, newsk: skb); |
215 | received++; |
216 | } |
217 | |
218 | return received; |
219 | } |
220 | EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); |
221 | |
222 | int can_rx_offload_queue_timestamp(struct can_rx_offload *offload, |
223 | struct sk_buff *skb, u32 timestamp) |
224 | { |
225 | struct can_rx_offload_cb *cb; |
226 | |
227 | if (skb_queue_len(list_: &offload->skb_queue) > |
228 | offload->skb_queue_len_max) { |
229 | dev_kfree_skb_any(skb); |
230 | return -ENOBUFS; |
231 | } |
232 | |
233 | cb = can_rx_offload_get_cb(skb); |
234 | cb->timestamp = timestamp; |
235 | |
236 | __skb_queue_add_sort(head: &offload->skb_irq_queue, new: skb, |
237 | compare: can_rx_offload_compare); |
238 | |
239 | return 0; |
240 | } |
241 | EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp); |
242 | |
243 | unsigned int |
244 | can_rx_offload_get_echo_skb_queue_timestamp(struct can_rx_offload *offload, |
245 | unsigned int idx, u32 timestamp, |
246 | unsigned int *frame_len_ptr) |
247 | { |
248 | struct net_device *dev = offload->dev; |
249 | struct net_device_stats *stats = &dev->stats; |
250 | struct sk_buff *skb; |
251 | unsigned int len; |
252 | int err; |
253 | |
254 | skb = __can_get_echo_skb(dev, idx, len_ptr: &len, frame_len_ptr); |
255 | if (!skb) |
256 | return 0; |
257 | |
258 | err = can_rx_offload_queue_timestamp(offload, skb, timestamp); |
259 | if (err) { |
260 | stats->rx_errors++; |
261 | stats->tx_fifo_errors++; |
262 | } |
263 | |
264 | return len; |
265 | } |
266 | EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_timestamp); |
267 | |
268 | int can_rx_offload_queue_tail(struct can_rx_offload *offload, |
269 | struct sk_buff *skb) |
270 | { |
271 | if (skb_queue_len(list_: &offload->skb_queue) > |
272 | offload->skb_queue_len_max) { |
273 | dev_kfree_skb_any(skb); |
274 | return -ENOBUFS; |
275 | } |
276 | |
277 | __skb_queue_tail(list: &offload->skb_irq_queue, newsk: skb); |
278 | |
279 | return 0; |
280 | } |
281 | EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail); |
282 | |
283 | unsigned int |
284 | can_rx_offload_get_echo_skb_queue_tail(struct can_rx_offload *offload, |
285 | unsigned int idx, |
286 | unsigned int *frame_len_ptr) |
287 | { |
288 | struct net_device *dev = offload->dev; |
289 | struct net_device_stats *stats = &dev->stats; |
290 | struct sk_buff *skb; |
291 | unsigned int len; |
292 | int err; |
293 | |
294 | skb = __can_get_echo_skb(dev, idx, len_ptr: &len, frame_len_ptr); |
295 | if (!skb) |
296 | return 0; |
297 | |
298 | err = can_rx_offload_queue_tail(offload, skb); |
299 | if (err) { |
300 | stats->rx_errors++; |
301 | stats->tx_fifo_errors++; |
302 | } |
303 | |
304 | return len; |
305 | } |
306 | EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_tail); |
307 | |
308 | void can_rx_offload_irq_finish(struct can_rx_offload *offload) |
309 | { |
310 | unsigned long flags; |
311 | int queue_len; |
312 | |
313 | if (skb_queue_empty_lockless(list: &offload->skb_irq_queue)) |
314 | return; |
315 | |
316 | spin_lock_irqsave(&offload->skb_queue.lock, flags); |
317 | skb_queue_splice_tail_init(list: &offload->skb_irq_queue, head: &offload->skb_queue); |
318 | spin_unlock_irqrestore(lock: &offload->skb_queue.lock, flags); |
319 | |
320 | queue_len = skb_queue_len(list_: &offload->skb_queue); |
321 | if (queue_len > offload->skb_queue_len_max / 8) |
322 | netdev_dbg(offload->dev, "%s: queue_len=%d\n" , |
323 | __func__, queue_len); |
324 | |
325 | napi_schedule(n: &offload->napi); |
326 | } |
327 | EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish); |
328 | |
329 | void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload) |
330 | { |
331 | unsigned long flags; |
332 | int queue_len; |
333 | |
334 | if (skb_queue_empty_lockless(list: &offload->skb_irq_queue)) |
335 | return; |
336 | |
337 | spin_lock_irqsave(&offload->skb_queue.lock, flags); |
338 | skb_queue_splice_tail_init(list: &offload->skb_irq_queue, head: &offload->skb_queue); |
339 | spin_unlock_irqrestore(lock: &offload->skb_queue.lock, flags); |
340 | |
341 | queue_len = skb_queue_len(list_: &offload->skb_queue); |
342 | if (queue_len > offload->skb_queue_len_max / 8) |
343 | netdev_dbg(offload->dev, "%s: queue_len=%d\n" , |
344 | __func__, queue_len); |
345 | |
346 | local_bh_disable(); |
347 | napi_schedule(n: &offload->napi); |
348 | local_bh_enable(); |
349 | } |
350 | EXPORT_SYMBOL_GPL(can_rx_offload_threaded_irq_finish); |
351 | |
352 | static int can_rx_offload_init_queue(struct net_device *dev, |
353 | struct can_rx_offload *offload, |
354 | unsigned int weight) |
355 | { |
356 | offload->dev = dev; |
357 | |
358 | /* Limit queue len to 4x the weight (rounded to next power of two) */ |
359 | offload->skb_queue_len_max = 2 << fls(x: weight); |
360 | offload->skb_queue_len_max *= 4; |
361 | skb_queue_head_init(list: &offload->skb_queue); |
362 | __skb_queue_head_init(list: &offload->skb_irq_queue); |
363 | |
364 | netif_napi_add_weight(dev, napi: &offload->napi, poll: can_rx_offload_napi_poll, |
365 | weight); |
366 | |
367 | dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n" , |
368 | __func__, offload->skb_queue_len_max); |
369 | |
370 | return 0; |
371 | } |
372 | |
373 | int can_rx_offload_add_timestamp(struct net_device *dev, |
374 | struct can_rx_offload *offload) |
375 | { |
376 | unsigned int weight; |
377 | |
378 | if (offload->mb_first > BITS_PER_LONG_LONG || |
379 | offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read) |
380 | return -EINVAL; |
381 | |
382 | if (offload->mb_first < offload->mb_last) { |
383 | offload->inc = true; |
384 | weight = offload->mb_last - offload->mb_first; |
385 | } else { |
386 | offload->inc = false; |
387 | weight = offload->mb_first - offload->mb_last; |
388 | } |
389 | |
390 | return can_rx_offload_init_queue(dev, offload, weight); |
391 | } |
392 | EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp); |
393 | |
394 | int can_rx_offload_add_fifo(struct net_device *dev, |
395 | struct can_rx_offload *offload, unsigned int weight) |
396 | { |
397 | if (!offload->mailbox_read) |
398 | return -EINVAL; |
399 | |
400 | return can_rx_offload_init_queue(dev, offload, weight); |
401 | } |
402 | EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo); |
403 | |
404 | int can_rx_offload_add_manual(struct net_device *dev, |
405 | struct can_rx_offload *offload, |
406 | unsigned int weight) |
407 | { |
408 | if (offload->mailbox_read) |
409 | return -EINVAL; |
410 | |
411 | return can_rx_offload_init_queue(dev, offload, weight); |
412 | } |
413 | EXPORT_SYMBOL_GPL(can_rx_offload_add_manual); |
414 | |
415 | void can_rx_offload_enable(struct can_rx_offload *offload) |
416 | { |
417 | napi_enable(n: &offload->napi); |
418 | } |
419 | EXPORT_SYMBOL_GPL(can_rx_offload_enable); |
420 | |
421 | void can_rx_offload_del(struct can_rx_offload *offload) |
422 | { |
423 | netif_napi_del(napi: &offload->napi); |
424 | skb_queue_purge(list: &offload->skb_queue); |
425 | __skb_queue_purge(list: &offload->skb_irq_queue); |
426 | } |
427 | EXPORT_SYMBOL_GPL(can_rx_offload_del); |
428 | |