1 | /* Copyright 2009 - 2016 Freescale Semiconductor, Inc. |
2 | * |
3 | * Redistribution and use in source and binary forms, with or without |
4 | * modification, are permitted provided that the following conditions are met: |
5 | * * Redistributions of source code must retain the above copyright |
6 | * notice, this list of conditions and the following disclaimer. |
7 | * * Redistributions in binary form must reproduce the above copyright |
8 | * notice, this list of conditions and the following disclaimer in the |
9 | * documentation and/or other materials provided with the distribution. |
10 | * * Neither the name of Freescale Semiconductor nor the |
11 | * names of its contributors may be used to endorse or promote products |
12 | * derived from this software without specific prior written permission. |
13 | * |
14 | * ALTERNATIVELY, this software may be distributed under the terms of the |
15 | * GNU General Public License ("GPL") as published by the Free Software |
16 | * Foundation, either version 2 of that License or (at your option) any |
17 | * later version. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY |
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY |
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
29 | */ |
30 | |
31 | #include "qman_test.h" |
32 | |
33 | #include <linux/dma-mapping.h> |
34 | #include <linux/delay.h> |
35 | |
36 | /* |
37 | * Algorithm: |
38 | * |
39 | * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates |
40 | * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The |
41 | * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will |
42 | * shuttle a "hot potato" frame around them such that every forwarding action |
43 | * moves it from one cpu to another. (The use of more than one handler per cpu |
44 | * is to allow enough handlers/FQs to truly test the significance of caching - |
45 | * ie. when cache-expiries are occurring.) |
46 | * |
47 | * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the |
48 | * first and last words of the frame data will undergo a transformation step on |
49 | * each forwarding action. To achieve this, each handler will be assigned a |
50 | * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is |
51 | * received by a handler, the mixer of the expected sender is XOR'd into all |
52 | * words of the entire frame, which is then validated against the original |
53 | * values. Then, before forwarding, the entire frame is XOR'd with the mixer of |
54 | * the current handler. Apart from validating that the frame is taking the |
55 | * expected path, this also provides some quasi-realistic overheads to each |
56 | * forwarding action - dereferencing *all* the frame data, computation, and |
57 | * conditional branching. There is a "special" handler designated to act as the |
58 | * instigator of the test by creating an enqueuing the "hot potato" frame, and |
59 | * to determine when the test has completed by counting HP_LOOPS iterations. |
60 | * |
61 | * Init phases: |
62 | * |
63 | * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them |
64 | * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU |
65 | * handlers and link-list them (but do no other handler setup). |
66 | * |
67 | * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each |
68 | * hp_cpu's 'iterator' to point to its first handler. With each loop, |
69 | * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler |
70 | * and advance the iterator for the next loop. This includes a final fixup, |
71 | * which connects the last handler to the first (and which is why phase 2 |
72 | * and 3 are separate). |
73 | * |
74 | * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each |
75 | * hp_cpu's 'iterator' to point to its first handler. With each loop, |
76 | * initialise FQ objects and advance the iterator for the next loop. |
77 | * Moreover, do this initialisation on the cpu it applies to so that Rx FQ |
78 | * initialisation targets the correct cpu. |
79 | */ |
80 | |
81 | /* |
82 | * helper to run something on all cpus (can't use on_each_cpu(), as that invokes |
83 | * the fn from irq context, which is too restrictive). |
84 | */ |
85 | struct bstrap { |
86 | int (*fn)(void); |
87 | atomic_t started; |
88 | }; |
89 | static int bstrap_fn(void *bs) |
90 | { |
91 | struct bstrap *bstrap = bs; |
92 | int err; |
93 | |
94 | atomic_inc(v: &bstrap->started); |
95 | err = bstrap->fn(); |
96 | if (err) |
97 | return err; |
98 | while (!kthread_should_stop()) |
99 | msleep(msecs: 20); |
100 | return 0; |
101 | } |
102 | static int on_all_cpus(int (*fn)(void)) |
103 | { |
104 | int cpu; |
105 | |
106 | for_each_cpu(cpu, cpu_online_mask) { |
107 | struct bstrap bstrap = { |
108 | .fn = fn, |
109 | .started = ATOMIC_INIT(0) |
110 | }; |
111 | struct task_struct *k = kthread_create(bstrap_fn, &bstrap, |
112 | "hotpotato%d" , cpu); |
113 | int ret; |
114 | |
115 | if (IS_ERR(ptr: k)) |
116 | return -ENOMEM; |
117 | kthread_bind(k, cpu); |
118 | wake_up_process(tsk: k); |
119 | /* |
120 | * If we call kthread_stop() before the "wake up" has had an |
121 | * effect, then the thread may exit with -EINTR without ever |
122 | * running the function. So poll until it's started before |
123 | * requesting it to stop. |
124 | */ |
125 | while (!atomic_read(v: &bstrap.started)) |
126 | msleep(msecs: 20); |
127 | ret = kthread_stop(k); |
128 | if (ret) |
129 | return ret; |
130 | } |
131 | return 0; |
132 | } |
133 | |
134 | struct hp_handler { |
135 | |
136 | /* The following data is stashed when 'rx' is dequeued; */ |
137 | /* -------------- */ |
138 | /* The Rx FQ, dequeues of which will stash the entire hp_handler */ |
139 | struct qman_fq rx; |
140 | /* The Tx FQ we should forward to */ |
141 | struct qman_fq tx; |
142 | /* The value we XOR post-dequeue, prior to validating */ |
143 | u32 rx_mixer; |
144 | /* The value we XOR pre-enqueue, after validating */ |
145 | u32 tx_mixer; |
146 | /* what the hotpotato address should be on dequeue */ |
147 | dma_addr_t addr; |
148 | u32 *frame_ptr; |
149 | |
150 | /* The following data isn't (necessarily) stashed on dequeue; */ |
151 | /* -------------- */ |
152 | u32 fqid_rx, fqid_tx; |
153 | /* list node for linking us into 'hp_cpu' */ |
154 | struct list_head node; |
155 | /* Just to check ... */ |
156 | unsigned int processor_id; |
157 | } ____cacheline_aligned; |
158 | |
159 | struct hp_cpu { |
160 | /* identify the cpu we run on; */ |
161 | unsigned int processor_id; |
162 | /* root node for the per-cpu list of handlers */ |
163 | struct list_head handlers; |
164 | /* list node for linking us into 'hp_cpu_list' */ |
165 | struct list_head node; |
166 | /* |
167 | * when repeatedly scanning 'hp_list', each time linking the n'th |
168 | * handlers together, this is used as per-cpu iterator state |
169 | */ |
170 | struct hp_handler *iterator; |
171 | }; |
172 | |
173 | /* Each cpu has one of these */ |
174 | static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); |
175 | |
176 | /* links together the hp_cpu structs, in first-come first-serve order. */ |
177 | static LIST_HEAD(hp_cpu_list); |
178 | static DEFINE_SPINLOCK(hp_lock); |
179 | |
180 | static unsigned int hp_cpu_list_length; |
181 | |
182 | /* the "special" handler, that starts and terminates the test. */ |
183 | static struct hp_handler *special_handler; |
184 | static int loop_counter; |
185 | |
186 | /* handlers are allocated out of this, so they're properly aligned. */ |
187 | static struct kmem_cache *hp_handler_slab; |
188 | |
189 | /* this is the frame data */ |
190 | static void *__frame_ptr; |
191 | static u32 *frame_ptr; |
192 | static dma_addr_t frame_dma; |
193 | |
194 | /* needed for dma_map*() */ |
195 | static const struct qm_portal_config *pcfg; |
196 | |
197 | /* the main function waits on this */ |
198 | static DECLARE_WAIT_QUEUE_HEAD(queue); |
199 | |
200 | #define HP_PER_CPU 2 |
201 | #define HP_LOOPS 8 |
202 | /* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */ |
203 | #define HP_NUM_WORDS 80 |
204 | /* First word of the LFSR-based frame data */ |
205 | #define HP_FIRST_WORD 0xabbaf00d |
206 | |
207 | static inline u32 do_lfsr(u32 prev) |
208 | { |
209 | return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u); |
210 | } |
211 | |
212 | static int allocate_frame_data(void) |
213 | { |
214 | u32 lfsr = HP_FIRST_WORD; |
215 | int loop; |
216 | |
217 | if (!qman_dma_portal) { |
218 | pr_crit("portal not available\n" ); |
219 | return -EIO; |
220 | } |
221 | |
222 | pcfg = qman_get_qm_portal_config(portal: qman_dma_portal); |
223 | |
224 | __frame_ptr = kmalloc(size: 4 * HP_NUM_WORDS, GFP_KERNEL); |
225 | if (!__frame_ptr) |
226 | return -ENOMEM; |
227 | |
228 | frame_ptr = PTR_ALIGN(__frame_ptr, 64); |
229 | for (loop = 0; loop < HP_NUM_WORDS; loop++) { |
230 | frame_ptr[loop] = lfsr; |
231 | lfsr = do_lfsr(prev: lfsr); |
232 | } |
233 | |
234 | frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS, |
235 | DMA_BIDIRECTIONAL); |
236 | if (dma_mapping_error(dev: pcfg->dev, dma_addr: frame_dma)) { |
237 | pr_crit("dma mapping failure\n" ); |
238 | kfree(objp: __frame_ptr); |
239 | return -EIO; |
240 | } |
241 | |
242 | return 0; |
243 | } |
244 | |
245 | static void deallocate_frame_data(void) |
246 | { |
247 | dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS, |
248 | DMA_BIDIRECTIONAL); |
249 | kfree(objp: __frame_ptr); |
250 | } |
251 | |
252 | static inline int process_frame_data(struct hp_handler *handler, |
253 | const struct qm_fd *fd) |
254 | { |
255 | u32 *p = handler->frame_ptr; |
256 | u32 lfsr = HP_FIRST_WORD; |
257 | int loop; |
258 | |
259 | if (qm_fd_addr_get64(fd) != handler->addr) { |
260 | pr_crit("bad frame address, [%llX != %llX]\n" , |
261 | qm_fd_addr_get64(fd), handler->addr); |
262 | return -EIO; |
263 | } |
264 | for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { |
265 | *p ^= handler->rx_mixer; |
266 | if (*p != lfsr) { |
267 | pr_crit("corrupt frame data" ); |
268 | return -EIO; |
269 | } |
270 | *p ^= handler->tx_mixer; |
271 | lfsr = do_lfsr(prev: lfsr); |
272 | } |
273 | return 0; |
274 | } |
275 | |
276 | static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal, |
277 | struct qman_fq *fq, |
278 | const struct qm_dqrr_entry *dqrr, |
279 | bool sched_napi) |
280 | { |
281 | struct hp_handler *handler = (struct hp_handler *)fq; |
282 | |
283 | if (process_frame_data(handler, fd: &dqrr->fd)) { |
284 | WARN_ON(1); |
285 | goto skip; |
286 | } |
287 | if (qman_enqueue(fq: &handler->tx, fd: &dqrr->fd)) { |
288 | pr_crit("qman_enqueue() failed" ); |
289 | WARN_ON(1); |
290 | } |
291 | skip: |
292 | return qman_cb_dqrr_consume; |
293 | } |
294 | |
295 | static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal, |
296 | struct qman_fq *fq, |
297 | const struct qm_dqrr_entry *dqrr, |
298 | bool sched_napi) |
299 | { |
300 | struct hp_handler *handler = (struct hp_handler *)fq; |
301 | |
302 | process_frame_data(handler, fd: &dqrr->fd); |
303 | if (++loop_counter < HP_LOOPS) { |
304 | if (qman_enqueue(fq: &handler->tx, fd: &dqrr->fd)) { |
305 | pr_crit("qman_enqueue() failed" ); |
306 | WARN_ON(1); |
307 | goto skip; |
308 | } |
309 | } else { |
310 | pr_info("Received final (%dth) frame\n" , loop_counter); |
311 | wake_up(&queue); |
312 | } |
313 | skip: |
314 | return qman_cb_dqrr_consume; |
315 | } |
316 | |
317 | static int create_per_cpu_handlers(void) |
318 | { |
319 | struct hp_handler *handler; |
320 | int loop; |
321 | struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus); |
322 | |
323 | hp_cpu->processor_id = smp_processor_id(); |
324 | spin_lock(lock: &hp_lock); |
325 | list_add_tail(new: &hp_cpu->node, head: &hp_cpu_list); |
326 | hp_cpu_list_length++; |
327 | spin_unlock(lock: &hp_lock); |
328 | INIT_LIST_HEAD(list: &hp_cpu->handlers); |
329 | for (loop = 0; loop < HP_PER_CPU; loop++) { |
330 | handler = kmem_cache_alloc(cachep: hp_handler_slab, GFP_KERNEL); |
331 | if (!handler) { |
332 | pr_crit("kmem_cache_alloc() failed" ); |
333 | WARN_ON(1); |
334 | return -EIO; |
335 | } |
336 | handler->processor_id = hp_cpu->processor_id; |
337 | handler->addr = frame_dma; |
338 | handler->frame_ptr = frame_ptr; |
339 | list_add_tail(new: &handler->node, head: &hp_cpu->handlers); |
340 | } |
341 | return 0; |
342 | } |
343 | |
344 | static int destroy_per_cpu_handlers(void) |
345 | { |
346 | struct list_head *loop, *tmp; |
347 | struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus); |
348 | |
349 | spin_lock(lock: &hp_lock); |
350 | list_del(entry: &hp_cpu->node); |
351 | spin_unlock(lock: &hp_lock); |
352 | list_for_each_safe(loop, tmp, &hp_cpu->handlers) { |
353 | u32 flags = 0; |
354 | struct hp_handler *handler = list_entry(loop, struct hp_handler, |
355 | node); |
356 | if (qman_retire_fq(fq: &handler->rx, flags: &flags) || |
357 | (flags & QMAN_FQ_STATE_BLOCKOOS)) { |
358 | pr_crit("qman_retire_fq(rx) failed, flags: %x" , flags); |
359 | WARN_ON(1); |
360 | return -EIO; |
361 | } |
362 | if (qman_oos_fq(fq: &handler->rx)) { |
363 | pr_crit("qman_oos_fq(rx) failed" ); |
364 | WARN_ON(1); |
365 | return -EIO; |
366 | } |
367 | qman_destroy_fq(fq: &handler->rx); |
368 | qman_destroy_fq(fq: &handler->tx); |
369 | qman_release_fqid(fqid: handler->fqid_rx); |
370 | list_del(entry: &handler->node); |
371 | kmem_cache_free(s: hp_handler_slab, objp: handler); |
372 | } |
373 | return 0; |
374 | } |
375 | |
376 | static inline u8 num_cachelines(u32 offset) |
377 | { |
378 | u8 res = (offset + (L1_CACHE_BYTES - 1)) |
379 | / (L1_CACHE_BYTES); |
380 | if (res > 3) |
381 | return 3; |
382 | return res; |
383 | } |
384 | #define STASH_DATA_CL \ |
385 | num_cachelines(HP_NUM_WORDS * 4) |
386 | #define STASH_CTX_CL \ |
387 | num_cachelines(offsetof(struct hp_handler, fqid_rx)) |
388 | |
389 | static int init_handler(void *h) |
390 | { |
391 | struct qm_mcc_initfq opts; |
392 | struct hp_handler *handler = h; |
393 | int err; |
394 | |
395 | if (handler->processor_id != smp_processor_id()) { |
396 | err = -EIO; |
397 | goto failed; |
398 | } |
399 | /* Set up rx */ |
400 | memset(&handler->rx, 0, sizeof(handler->rx)); |
401 | if (handler == special_handler) |
402 | handler->rx.cb.dqrr = special_dqrr; |
403 | else |
404 | handler->rx.cb.dqrr = normal_dqrr; |
405 | err = qman_create_fq(fqid: handler->fqid_rx, flags: 0, fq: &handler->rx); |
406 | if (err) { |
407 | pr_crit("qman_create_fq(rx) failed" ); |
408 | goto failed; |
409 | } |
410 | memset(&opts, 0, sizeof(opts)); |
411 | opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | |
412 | QM_INITFQ_WE_CONTEXTA); |
413 | opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING); |
414 | qm_fqd_set_stashing(fqd: &opts.fqd, as: 0, STASH_DATA_CL, STASH_CTX_CL); |
415 | err = qman_init_fq(fq: &handler->rx, QMAN_INITFQ_FLAG_SCHED | |
416 | QMAN_INITFQ_FLAG_LOCAL, opts: &opts); |
417 | if (err) { |
418 | pr_crit("qman_init_fq(rx) failed" ); |
419 | goto failed; |
420 | } |
421 | /* Set up tx */ |
422 | memset(&handler->tx, 0, sizeof(handler->tx)); |
423 | err = qman_create_fq(fqid: handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY, |
424 | fq: &handler->tx); |
425 | if (err) { |
426 | pr_crit("qman_create_fq(tx) failed" ); |
427 | goto failed; |
428 | } |
429 | |
430 | return 0; |
431 | failed: |
432 | return err; |
433 | } |
434 | |
435 | static void init_handler_cb(void *h) |
436 | { |
437 | if (init_handler(h)) |
438 | WARN_ON(1); |
439 | } |
440 | |
441 | static int init_phase2(void) |
442 | { |
443 | int loop; |
444 | u32 fqid = 0; |
445 | u32 lfsr = 0xdeadbeef; |
446 | struct hp_cpu *hp_cpu; |
447 | struct hp_handler *handler; |
448 | |
449 | for (loop = 0; loop < HP_PER_CPU; loop++) { |
450 | list_for_each_entry(hp_cpu, &hp_cpu_list, node) { |
451 | int err; |
452 | |
453 | if (!loop) |
454 | hp_cpu->iterator = list_first_entry( |
455 | &hp_cpu->handlers, |
456 | struct hp_handler, node); |
457 | else |
458 | hp_cpu->iterator = list_entry( |
459 | hp_cpu->iterator->node.next, |
460 | struct hp_handler, node); |
461 | /* Rx FQID is the previous handler's Tx FQID */ |
462 | hp_cpu->iterator->fqid_rx = fqid; |
463 | /* Allocate new FQID for Tx */ |
464 | err = qman_alloc_fqid(&fqid); |
465 | if (err) { |
466 | pr_crit("qman_alloc_fqid() failed" ); |
467 | return err; |
468 | } |
469 | hp_cpu->iterator->fqid_tx = fqid; |
470 | /* Rx mixer is the previous handler's Tx mixer */ |
471 | hp_cpu->iterator->rx_mixer = lfsr; |
472 | /* Get new mixer for Tx */ |
473 | lfsr = do_lfsr(prev: lfsr); |
474 | hp_cpu->iterator->tx_mixer = lfsr; |
475 | } |
476 | } |
477 | /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */ |
478 | hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node); |
479 | handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node); |
480 | if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef) |
481 | return 1; |
482 | handler->fqid_rx = fqid; |
483 | handler->rx_mixer = lfsr; |
484 | /* and tag it as our "special" handler */ |
485 | special_handler = handler; |
486 | return 0; |
487 | } |
488 | |
489 | static int init_phase3(void) |
490 | { |
491 | int loop, err; |
492 | struct hp_cpu *hp_cpu; |
493 | |
494 | for (loop = 0; loop < HP_PER_CPU; loop++) { |
495 | list_for_each_entry(hp_cpu, &hp_cpu_list, node) { |
496 | if (!loop) |
497 | hp_cpu->iterator = list_first_entry( |
498 | &hp_cpu->handlers, |
499 | struct hp_handler, node); |
500 | else |
501 | hp_cpu->iterator = list_entry( |
502 | hp_cpu->iterator->node.next, |
503 | struct hp_handler, node); |
504 | preempt_disable(); |
505 | if (hp_cpu->processor_id == smp_processor_id()) { |
506 | err = init_handler(h: hp_cpu->iterator); |
507 | if (err) |
508 | return err; |
509 | } else { |
510 | smp_call_function_single(cpuid: hp_cpu->processor_id, |
511 | func: init_handler_cb, info: hp_cpu->iterator, wait: 1); |
512 | } |
513 | preempt_enable(); |
514 | } |
515 | } |
516 | return 0; |
517 | } |
518 | |
519 | static int send_first_frame(void *ignore) |
520 | { |
521 | u32 *p = special_handler->frame_ptr; |
522 | u32 lfsr = HP_FIRST_WORD; |
523 | int loop, err; |
524 | struct qm_fd fd; |
525 | |
526 | if (special_handler->processor_id != smp_processor_id()) { |
527 | err = -EIO; |
528 | goto failed; |
529 | } |
530 | memset(&fd, 0, sizeof(fd)); |
531 | qm_fd_addr_set64(fd: &fd, addr: special_handler->addr); |
532 | qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4); |
533 | for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { |
534 | if (*p != lfsr) { |
535 | err = -EIO; |
536 | pr_crit("corrupt frame data" ); |
537 | goto failed; |
538 | } |
539 | *p ^= special_handler->tx_mixer; |
540 | lfsr = do_lfsr(prev: lfsr); |
541 | } |
542 | pr_info("Sending first frame\n" ); |
543 | err = qman_enqueue(fq: &special_handler->tx, fd: &fd); |
544 | if (err) { |
545 | pr_crit("qman_enqueue() failed" ); |
546 | goto failed; |
547 | } |
548 | |
549 | return 0; |
550 | failed: |
551 | return err; |
552 | } |
553 | |
554 | static void send_first_frame_cb(void *ignore) |
555 | { |
556 | if (send_first_frame(NULL)) |
557 | WARN_ON(1); |
558 | } |
559 | |
560 | int qman_test_stash(void) |
561 | { |
562 | int err; |
563 | |
564 | if (cpumask_weight(cpu_online_mask) < 2) { |
565 | pr_info("%s(): skip - only 1 CPU\n" , __func__); |
566 | return 0; |
567 | } |
568 | |
569 | pr_info("%s(): Starting\n" , __func__); |
570 | |
571 | hp_cpu_list_length = 0; |
572 | loop_counter = 0; |
573 | hp_handler_slab = kmem_cache_create(name: "hp_handler_slab" , |
574 | size: sizeof(struct hp_handler), L1_CACHE_BYTES, |
575 | SLAB_HWCACHE_ALIGN, NULL); |
576 | if (!hp_handler_slab) { |
577 | err = -EIO; |
578 | pr_crit("kmem_cache_create() failed" ); |
579 | goto failed; |
580 | } |
581 | |
582 | err = allocate_frame_data(); |
583 | if (err) |
584 | goto failed; |
585 | |
586 | /* Init phase 1 */ |
587 | pr_info("Creating %d handlers per cpu...\n" , HP_PER_CPU); |
588 | if (on_all_cpus(fn: create_per_cpu_handlers)) { |
589 | err = -EIO; |
590 | pr_crit("on_each_cpu() failed" ); |
591 | goto failed; |
592 | } |
593 | pr_info("Number of cpus: %d, total of %d handlers\n" , |
594 | hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU); |
595 | |
596 | err = init_phase2(); |
597 | if (err) |
598 | goto failed; |
599 | |
600 | err = init_phase3(); |
601 | if (err) |
602 | goto failed; |
603 | |
604 | preempt_disable(); |
605 | if (special_handler->processor_id == smp_processor_id()) { |
606 | err = send_first_frame(NULL); |
607 | if (err) |
608 | goto failed; |
609 | } else { |
610 | smp_call_function_single(cpuid: special_handler->processor_id, |
611 | func: send_first_frame_cb, NULL, wait: 1); |
612 | } |
613 | preempt_enable(); |
614 | |
615 | wait_event(queue, loop_counter == HP_LOOPS); |
616 | deallocate_frame_data(); |
617 | if (on_all_cpus(fn: destroy_per_cpu_handlers)) { |
618 | err = -EIO; |
619 | pr_crit("on_each_cpu() failed" ); |
620 | goto failed; |
621 | } |
622 | kmem_cache_destroy(s: hp_handler_slab); |
623 | pr_info("%s(): Finished\n" , __func__); |
624 | |
625 | return 0; |
626 | failed: |
627 | WARN_ON(1); |
628 | return err; |
629 | } |
630 | |