1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Intel(R) Trace Hub Memory Storage Unit |
4 | * |
5 | * Copyright (C) 2014-2015 Intel Corporation. |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | |
10 | #include <linux/types.h> |
11 | #include <linux/module.h> |
12 | #include <linux/device.h> |
13 | #include <linux/uaccess.h> |
14 | #include <linux/sizes.h> |
15 | #include <linux/printk.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/mm.h> |
18 | #include <linux/fs.h> |
19 | #include <linux/io.h> |
20 | #include <linux/workqueue.h> |
21 | #include <linux/dma-mapping.h> |
22 | |
23 | #ifdef CONFIG_X86 |
24 | #include <asm/set_memory.h> |
25 | #endif |
26 | |
27 | #include <linux/intel_th.h> |
28 | #include "intel_th.h" |
29 | #include "msu.h" |
30 | |
31 | #define msc_dev(x) (&(x)->thdev->dev) |
32 | |
33 | /* |
34 | * Lockout state transitions: |
35 | * READY -> INUSE -+-> LOCKED -+-> READY -> etc. |
36 | * \-----------/ |
37 | * WIN_READY: window can be used by HW |
38 | * WIN_INUSE: window is in use |
39 | * WIN_LOCKED: window is filled up and is being processed by the buffer |
40 | * handling code |
41 | * |
42 | * All state transitions happen automatically, except for the LOCKED->READY, |
43 | * which needs to be signalled by the buffer code by calling |
44 | * intel_th_msc_window_unlock(). |
45 | * |
46 | * When the interrupt handler has to switch to the next window, it checks |
47 | * whether it's READY, and if it is, it performs the switch and tracing |
48 | * continues. If it's LOCKED, it stops the trace. |
49 | */ |
50 | enum lockout_state { |
51 | WIN_READY = 0, |
52 | WIN_INUSE, |
53 | WIN_LOCKED |
54 | }; |
55 | |
56 | /** |
57 | * struct msc_window - multiblock mode window descriptor |
58 | * @entry: window list linkage (msc::win_list) |
59 | * @pgoff: page offset into the buffer that this window starts at |
60 | * @lockout: lockout state, see comment below |
61 | * @lo_lock: lockout state serialization |
62 | * @nr_blocks: number of blocks (pages) in this window |
63 | * @nr_segs: number of segments in this window (<= @nr_blocks) |
64 | * @_sgt: array of block descriptors |
65 | * @sgt: array of block descriptors |
66 | */ |
67 | struct msc_window { |
68 | struct list_head entry; |
69 | unsigned long pgoff; |
70 | enum lockout_state lockout; |
71 | spinlock_t lo_lock; |
72 | unsigned int nr_blocks; |
73 | unsigned int nr_segs; |
74 | struct msc *msc; |
75 | struct sg_table _sgt; |
76 | struct sg_table *sgt; |
77 | }; |
78 | |
79 | /** |
80 | * struct msc_iter - iterator for msc buffer |
81 | * @entry: msc::iter_list linkage |
82 | * @msc: pointer to the MSC device |
83 | * @start_win: oldest window |
84 | * @win: current window |
85 | * @offset: current logical offset into the buffer |
86 | * @start_block: oldest block in the window |
87 | * @block: block number in the window |
88 | * @block_off: offset into current block |
89 | * @wrap_count: block wrapping handling |
90 | * @eof: end of buffer reached |
91 | */ |
92 | struct msc_iter { |
93 | struct list_head entry; |
94 | struct msc *msc; |
95 | struct msc_window *start_win; |
96 | struct msc_window *win; |
97 | unsigned long offset; |
98 | struct scatterlist *start_block; |
99 | struct scatterlist *block; |
100 | unsigned int block_off; |
101 | unsigned int wrap_count; |
102 | unsigned int eof; |
103 | }; |
104 | |
105 | /** |
106 | * struct msc - MSC device representation |
107 | * @reg_base: register window base address |
108 | * @thdev: intel_th_device pointer |
109 | * @mbuf: MSU buffer, if assigned |
110 | * @mbuf_priv MSU buffer's private data, if @mbuf |
111 | * @win_list: list of windows in multiblock mode |
112 | * @single_sgt: single mode buffer |
113 | * @cur_win: current window |
114 | * @nr_pages: total number of pages allocated for this buffer |
115 | * @single_sz: amount of data in single mode |
116 | * @single_wrap: single mode wrap occurred |
117 | * @base: buffer's base pointer |
118 | * @base_addr: buffer's base address |
119 | * @user_count: number of users of the buffer |
120 | * @mmap_count: number of mappings |
121 | * @buf_mutex: mutex to serialize access to buffer-related bits |
122 | |
123 | * @enabled: MSC is enabled |
124 | * @wrap: wrapping is enabled |
125 | * @mode: MSC operating mode |
126 | * @burst_len: write burst length |
127 | * @index: number of this MSC in the MSU |
128 | */ |
129 | struct msc { |
130 | void __iomem *reg_base; |
131 | void __iomem *msu_base; |
132 | struct intel_th_device *thdev; |
133 | |
134 | const struct msu_buffer *mbuf; |
135 | void *mbuf_priv; |
136 | |
137 | struct work_struct work; |
138 | struct list_head win_list; |
139 | struct sg_table single_sgt; |
140 | struct msc_window *cur_win; |
141 | struct msc_window *switch_on_unlock; |
142 | unsigned long nr_pages; |
143 | unsigned long single_sz; |
144 | unsigned int single_wrap : 1; |
145 | void *base; |
146 | dma_addr_t base_addr; |
147 | u32 orig_addr; |
148 | u32 orig_sz; |
149 | |
150 | /* <0: no buffer, 0: no users, >0: active users */ |
151 | atomic_t user_count; |
152 | |
153 | atomic_t mmap_count; |
154 | struct mutex buf_mutex; |
155 | |
156 | struct list_head iter_list; |
157 | |
158 | bool stop_on_full; |
159 | |
160 | /* config */ |
161 | unsigned int enabled : 1, |
162 | wrap : 1, |
163 | do_irq : 1, |
164 | multi_is_broken : 1; |
165 | unsigned int mode; |
166 | unsigned int burst_len; |
167 | unsigned int index; |
168 | }; |
169 | |
170 | static LIST_HEAD(msu_buffer_list); |
171 | static DEFINE_MUTEX(msu_buffer_mutex); |
172 | |
173 | /** |
174 | * struct msu_buffer_entry - internal MSU buffer bookkeeping |
175 | * @entry: link to msu_buffer_list |
176 | * @mbuf: MSU buffer object |
177 | * @owner: module that provides this MSU buffer |
178 | */ |
179 | struct msu_buffer_entry { |
180 | struct list_head entry; |
181 | const struct msu_buffer *mbuf; |
182 | struct module *owner; |
183 | }; |
184 | |
185 | static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name) |
186 | { |
187 | struct msu_buffer_entry *mbe; |
188 | |
189 | lockdep_assert_held(&msu_buffer_mutex); |
190 | |
191 | list_for_each_entry(mbe, &msu_buffer_list, entry) { |
192 | if (!strcmp(mbe->mbuf->name, name)) |
193 | return mbe; |
194 | } |
195 | |
196 | return NULL; |
197 | } |
198 | |
199 | static const struct msu_buffer * |
200 | msu_buffer_get(const char *name) |
201 | { |
202 | struct msu_buffer_entry *mbe; |
203 | |
204 | mutex_lock(&msu_buffer_mutex); |
205 | mbe = __msu_buffer_entry_find(name); |
206 | if (mbe && !try_module_get(module: mbe->owner)) |
207 | mbe = NULL; |
208 | mutex_unlock(lock: &msu_buffer_mutex); |
209 | |
210 | return mbe ? mbe->mbuf : NULL; |
211 | } |
212 | |
213 | static void msu_buffer_put(const struct msu_buffer *mbuf) |
214 | { |
215 | struct msu_buffer_entry *mbe; |
216 | |
217 | mutex_lock(&msu_buffer_mutex); |
218 | mbe = __msu_buffer_entry_find(name: mbuf->name); |
219 | if (mbe) |
220 | module_put(module: mbe->owner); |
221 | mutex_unlock(lock: &msu_buffer_mutex); |
222 | } |
223 | |
224 | int intel_th_msu_buffer_register(const struct msu_buffer *mbuf, |
225 | struct module *owner) |
226 | { |
227 | struct msu_buffer_entry *mbe; |
228 | int ret = 0; |
229 | |
230 | mbe = kzalloc(size: sizeof(*mbe), GFP_KERNEL); |
231 | if (!mbe) |
232 | return -ENOMEM; |
233 | |
234 | mutex_lock(&msu_buffer_mutex); |
235 | if (__msu_buffer_entry_find(name: mbuf->name)) { |
236 | ret = -EEXIST; |
237 | kfree(objp: mbe); |
238 | goto unlock; |
239 | } |
240 | |
241 | mbe->mbuf = mbuf; |
242 | mbe->owner = owner; |
243 | list_add_tail(new: &mbe->entry, head: &msu_buffer_list); |
244 | unlock: |
245 | mutex_unlock(lock: &msu_buffer_mutex); |
246 | |
247 | return ret; |
248 | } |
249 | EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register); |
250 | |
251 | void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf) |
252 | { |
253 | struct msu_buffer_entry *mbe; |
254 | |
255 | mutex_lock(&msu_buffer_mutex); |
256 | mbe = __msu_buffer_entry_find(name: mbuf->name); |
257 | if (mbe) { |
258 | list_del(entry: &mbe->entry); |
259 | kfree(objp: mbe); |
260 | } |
261 | mutex_unlock(lock: &msu_buffer_mutex); |
262 | } |
263 | EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister); |
264 | |
265 | static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) |
266 | { |
267 | /* header hasn't been written */ |
268 | if (!bdesc->valid_dw) |
269 | return true; |
270 | |
271 | /* valid_dw includes the header */ |
272 | if (!msc_data_sz(bdesc)) |
273 | return true; |
274 | |
275 | return false; |
276 | } |
277 | |
278 | static inline struct scatterlist *msc_win_base_sg(struct msc_window *win) |
279 | { |
280 | return win->sgt->sgl; |
281 | } |
282 | |
283 | static inline struct msc_block_desc *msc_win_base(struct msc_window *win) |
284 | { |
285 | return sg_virt(sg: msc_win_base_sg(win)); |
286 | } |
287 | |
288 | static inline dma_addr_t msc_win_base_dma(struct msc_window *win) |
289 | { |
290 | return sg_dma_address(msc_win_base_sg(win)); |
291 | } |
292 | |
293 | static inline unsigned long |
294 | msc_win_base_pfn(struct msc_window *win) |
295 | { |
296 | return PFN_DOWN(msc_win_base_dma(win)); |
297 | } |
298 | |
299 | /** |
300 | * msc_is_last_win() - check if a window is the last one for a given MSC |
301 | * @win: window |
302 | * Return: true if @win is the last window in MSC's multiblock buffer |
303 | */ |
304 | static inline bool msc_is_last_win(struct msc_window *win) |
305 | { |
306 | return win->entry.next == &win->msc->win_list; |
307 | } |
308 | |
309 | /** |
310 | * msc_next_window() - return next window in the multiblock buffer |
311 | * @win: current window |
312 | * |
313 | * Return: window following the current one |
314 | */ |
315 | static struct msc_window *msc_next_window(struct msc_window *win) |
316 | { |
317 | if (msc_is_last_win(win)) |
318 | return list_first_entry(&win->msc->win_list, struct msc_window, |
319 | entry); |
320 | |
321 | return list_next_entry(win, entry); |
322 | } |
323 | |
324 | static size_t msc_win_total_sz(struct msc_window *win) |
325 | { |
326 | struct scatterlist *sg; |
327 | unsigned int blk; |
328 | size_t size = 0; |
329 | |
330 | for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { |
331 | struct msc_block_desc *bdesc = sg_virt(sg); |
332 | |
333 | if (msc_block_wrapped(bdesc)) |
334 | return (size_t)win->nr_blocks << PAGE_SHIFT; |
335 | |
336 | size += msc_total_sz(bdesc); |
337 | if (msc_block_last_written(bdesc)) |
338 | break; |
339 | } |
340 | |
341 | return size; |
342 | } |
343 | |
344 | /** |
345 | * msc_find_window() - find a window matching a given sg_table |
346 | * @msc: MSC device |
347 | * @sgt: SG table of the window |
348 | * @nonempty: skip over empty windows |
349 | * |
350 | * Return: MSC window structure pointer or NULL if the window |
351 | * could not be found. |
352 | */ |
353 | static struct msc_window * |
354 | msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty) |
355 | { |
356 | struct msc_window *win; |
357 | unsigned int found = 0; |
358 | |
359 | if (list_empty(head: &msc->win_list)) |
360 | return NULL; |
361 | |
362 | /* |
363 | * we might need a radix tree for this, depending on how |
364 | * many windows a typical user would allocate; ideally it's |
365 | * something like 2, in which case we're good |
366 | */ |
367 | list_for_each_entry(win, &msc->win_list, entry) { |
368 | if (win->sgt == sgt) |
369 | found++; |
370 | |
371 | /* skip the empty ones */ |
372 | if (nonempty && msc_block_is_empty(bdesc: msc_win_base(win))) |
373 | continue; |
374 | |
375 | if (found) |
376 | return win; |
377 | } |
378 | |
379 | return NULL; |
380 | } |
381 | |
382 | /** |
383 | * msc_oldest_window() - locate the window with oldest data |
384 | * @msc: MSC device |
385 | * |
386 | * This should only be used in multiblock mode. Caller should hold the |
387 | * msc::user_count reference. |
388 | * |
389 | * Return: the oldest window with valid data |
390 | */ |
391 | static struct msc_window *msc_oldest_window(struct msc *msc) |
392 | { |
393 | struct msc_window *win; |
394 | |
395 | if (list_empty(head: &msc->win_list)) |
396 | return NULL; |
397 | |
398 | win = msc_find_window(msc, sgt: msc_next_window(win: msc->cur_win)->sgt, nonempty: true); |
399 | if (win) |
400 | return win; |
401 | |
402 | return list_first_entry(&msc->win_list, struct msc_window, entry); |
403 | } |
404 | |
405 | /** |
406 | * msc_win_oldest_sg() - locate the oldest block in a given window |
407 | * @win: window to look at |
408 | * |
409 | * Return: index of the block with the oldest data |
410 | */ |
411 | static struct scatterlist *msc_win_oldest_sg(struct msc_window *win) |
412 | { |
413 | unsigned int blk; |
414 | struct scatterlist *sg; |
415 | struct msc_block_desc *bdesc = msc_win_base(win); |
416 | |
417 | /* without wrapping, first block is the oldest */ |
418 | if (!msc_block_wrapped(bdesc)) |
419 | return msc_win_base_sg(win); |
420 | |
421 | /* |
422 | * with wrapping, last written block contains both the newest and the |
423 | * oldest data for this window. |
424 | */ |
425 | for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { |
426 | struct msc_block_desc *bdesc = sg_virt(sg); |
427 | |
428 | if (msc_block_last_written(bdesc)) |
429 | return sg; |
430 | } |
431 | |
432 | return msc_win_base_sg(win); |
433 | } |
434 | |
435 | static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter) |
436 | { |
437 | return sg_virt(sg: iter->block); |
438 | } |
439 | |
440 | static struct msc_iter *msc_iter_install(struct msc *msc) |
441 | { |
442 | struct msc_iter *iter; |
443 | |
444 | iter = kzalloc(size: sizeof(*iter), GFP_KERNEL); |
445 | if (!iter) |
446 | return ERR_PTR(error: -ENOMEM); |
447 | |
448 | mutex_lock(&msc->buf_mutex); |
449 | |
450 | /* |
451 | * Reading and tracing are mutually exclusive; if msc is |
452 | * enabled, open() will fail; otherwise existing readers |
453 | * will prevent enabling the msc and the rest of fops don't |
454 | * need to worry about it. |
455 | */ |
456 | if (msc->enabled) { |
457 | kfree(objp: iter); |
458 | iter = ERR_PTR(error: -EBUSY); |
459 | goto unlock; |
460 | } |
461 | |
462 | iter->msc = msc; |
463 | |
464 | list_add_tail(new: &iter->entry, head: &msc->iter_list); |
465 | unlock: |
466 | mutex_unlock(lock: &msc->buf_mutex); |
467 | |
468 | return iter; |
469 | } |
470 | |
471 | static void msc_iter_remove(struct msc_iter *iter, struct msc *msc) |
472 | { |
473 | mutex_lock(&msc->buf_mutex); |
474 | list_del(entry: &iter->entry); |
475 | mutex_unlock(lock: &msc->buf_mutex); |
476 | |
477 | kfree(objp: iter); |
478 | } |
479 | |
480 | static void msc_iter_block_start(struct msc_iter *iter) |
481 | { |
482 | if (iter->start_block) |
483 | return; |
484 | |
485 | iter->start_block = msc_win_oldest_sg(win: iter->win); |
486 | iter->block = iter->start_block; |
487 | iter->wrap_count = 0; |
488 | |
489 | /* |
490 | * start with the block with oldest data; if data has wrapped |
491 | * in this window, it should be in this block |
492 | */ |
493 | if (msc_block_wrapped(bdesc: msc_iter_bdesc(iter))) |
494 | iter->wrap_count = 2; |
495 | |
496 | } |
497 | |
498 | static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc) |
499 | { |
500 | /* already started, nothing to do */ |
501 | if (iter->start_win) |
502 | return 0; |
503 | |
504 | iter->start_win = msc_oldest_window(msc); |
505 | if (!iter->start_win) |
506 | return -EINVAL; |
507 | |
508 | iter->win = iter->start_win; |
509 | iter->start_block = NULL; |
510 | |
511 | msc_iter_block_start(iter); |
512 | |
513 | return 0; |
514 | } |
515 | |
516 | static int msc_iter_win_advance(struct msc_iter *iter) |
517 | { |
518 | iter->win = msc_next_window(win: iter->win); |
519 | iter->start_block = NULL; |
520 | |
521 | if (iter->win == iter->start_win) { |
522 | iter->eof++; |
523 | return 1; |
524 | } |
525 | |
526 | msc_iter_block_start(iter); |
527 | |
528 | return 0; |
529 | } |
530 | |
531 | static int msc_iter_block_advance(struct msc_iter *iter) |
532 | { |
533 | iter->block_off = 0; |
534 | |
535 | /* wrapping */ |
536 | if (iter->wrap_count && iter->block == iter->start_block) { |
537 | iter->wrap_count--; |
538 | if (!iter->wrap_count) |
539 | /* copied newest data from the wrapped block */ |
540 | return msc_iter_win_advance(iter); |
541 | } |
542 | |
543 | /* no wrapping, check for last written block */ |
544 | if (!iter->wrap_count && msc_block_last_written(bdesc: msc_iter_bdesc(iter))) |
545 | /* copied newest data for the window */ |
546 | return msc_iter_win_advance(iter); |
547 | |
548 | /* block advance */ |
549 | if (sg_is_last(sg: iter->block)) |
550 | iter->block = msc_win_base_sg(win: iter->win); |
551 | else |
552 | iter->block = sg_next(iter->block); |
553 | |
554 | /* no wrapping, sanity check in case there is no last written block */ |
555 | if (!iter->wrap_count && iter->block == iter->start_block) |
556 | return msc_iter_win_advance(iter); |
557 | |
558 | return 0; |
559 | } |
560 | |
561 | /** |
562 | * msc_buffer_iterate() - go through multiblock buffer's data |
563 | * @iter: iterator structure |
564 | * @size: amount of data to scan |
565 | * @data: callback's private data |
566 | * @fn: iterator callback |
567 | * |
568 | * This will start at the window which will be written to next (containing |
569 | * the oldest data) and work its way to the current window, calling @fn |
570 | * for each chunk of data as it goes. |
571 | * |
572 | * Caller should have msc::user_count reference to make sure the buffer |
573 | * doesn't disappear from under us. |
574 | * |
575 | * Return: amount of data actually scanned. |
576 | */ |
577 | static ssize_t |
578 | msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data, |
579 | unsigned long (*fn)(void *, void *, size_t)) |
580 | { |
581 | struct msc *msc = iter->msc; |
582 | size_t len = size; |
583 | unsigned int advance; |
584 | |
585 | if (iter->eof) |
586 | return 0; |
587 | |
588 | /* start with the oldest window */ |
589 | if (msc_iter_win_start(iter, msc)) |
590 | return 0; |
591 | |
592 | do { |
593 | unsigned long data_bytes = msc_data_sz(bdesc: msc_iter_bdesc(iter)); |
594 | void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC; |
595 | size_t tocopy = data_bytes, copied = 0; |
596 | size_t remaining = 0; |
597 | |
598 | advance = 1; |
599 | |
600 | /* |
601 | * If block wrapping happened, we need to visit the last block |
602 | * twice, because it contains both the oldest and the newest |
603 | * data in this window. |
604 | * |
605 | * First time (wrap_count==2), in the very beginning, to collect |
606 | * the oldest data, which is in the range |
607 | * (data_bytes..DATA_IN_PAGE). |
608 | * |
609 | * Second time (wrap_count==1), it's just like any other block, |
610 | * containing data in the range of [MSC_BDESC..data_bytes]. |
611 | */ |
612 | if (iter->block == iter->start_block && iter->wrap_count == 2) { |
613 | tocopy = DATA_IN_PAGE - data_bytes; |
614 | src += data_bytes; |
615 | } |
616 | |
617 | if (!tocopy) |
618 | goto next_block; |
619 | |
620 | tocopy -= iter->block_off; |
621 | src += iter->block_off; |
622 | |
623 | if (len < tocopy) { |
624 | tocopy = len; |
625 | advance = 0; |
626 | } |
627 | |
628 | remaining = fn(data, src, tocopy); |
629 | |
630 | if (remaining) |
631 | advance = 0; |
632 | |
633 | copied = tocopy - remaining; |
634 | len -= copied; |
635 | iter->block_off += copied; |
636 | iter->offset += copied; |
637 | |
638 | if (!advance) |
639 | break; |
640 | |
641 | next_block: |
642 | if (msc_iter_block_advance(iter)) |
643 | break; |
644 | |
645 | } while (len); |
646 | |
647 | return size - len; |
648 | } |
649 | |
650 | /** |
651 | * msc_buffer_clear_hw_header() - clear hw header for multiblock |
652 | * @msc: MSC device |
653 | */ |
654 | static void (struct msc *msc) |
655 | { |
656 | struct msc_window *win; |
657 | struct scatterlist *sg; |
658 | |
659 | list_for_each_entry(win, &msc->win_list, entry) { |
660 | unsigned int blk; |
661 | |
662 | for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { |
663 | struct msc_block_desc *bdesc = sg_virt(sg); |
664 | |
665 | memset_startat(bdesc, 0, hw_tag); |
666 | } |
667 | } |
668 | } |
669 | |
670 | static int intel_th_msu_init(struct msc *msc) |
671 | { |
672 | u32 mintctl, msusts; |
673 | |
674 | if (!msc->do_irq) |
675 | return 0; |
676 | |
677 | if (!msc->mbuf) |
678 | return 0; |
679 | |
680 | mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); |
681 | mintctl |= msc->index ? M1BLIE : M0BLIE; |
682 | iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); |
683 | if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) { |
684 | dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n" ); |
685 | msc->do_irq = 0; |
686 | return 0; |
687 | } |
688 | |
689 | msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); |
690 | iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); |
691 | |
692 | return 0; |
693 | } |
694 | |
695 | static void intel_th_msu_deinit(struct msc *msc) |
696 | { |
697 | u32 mintctl; |
698 | |
699 | if (!msc->do_irq) |
700 | return; |
701 | |
702 | mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); |
703 | mintctl &= msc->index ? ~M1BLIE : ~M0BLIE; |
704 | iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); |
705 | } |
706 | |
707 | static int msc_win_set_lockout(struct msc_window *win, |
708 | enum lockout_state expect, |
709 | enum lockout_state new) |
710 | { |
711 | enum lockout_state old; |
712 | unsigned long flags; |
713 | int ret = 0; |
714 | |
715 | if (!win->msc->mbuf) |
716 | return 0; |
717 | |
718 | spin_lock_irqsave(&win->lo_lock, flags); |
719 | old = win->lockout; |
720 | |
721 | if (old != expect) { |
722 | ret = -EINVAL; |
723 | goto unlock; |
724 | } |
725 | |
726 | win->lockout = new; |
727 | |
728 | if (old == expect && new == WIN_LOCKED) |
729 | atomic_inc(v: &win->msc->user_count); |
730 | else if (old == expect && old == WIN_LOCKED) |
731 | atomic_dec(v: &win->msc->user_count); |
732 | |
733 | unlock: |
734 | spin_unlock_irqrestore(lock: &win->lo_lock, flags); |
735 | |
736 | if (ret) { |
737 | if (expect == WIN_READY && old == WIN_LOCKED) |
738 | return -EBUSY; |
739 | |
740 | /* from intel_th_msc_window_unlock(), don't warn if not locked */ |
741 | if (expect == WIN_LOCKED && old == new) |
742 | return 0; |
743 | |
744 | dev_warn_ratelimited(msc_dev(win->msc), |
745 | "expected lockout state %d, got %d\n" , |
746 | expect, old); |
747 | } |
748 | |
749 | return ret; |
750 | } |
751 | /** |
752 | * msc_configure() - set up MSC hardware |
753 | * @msc: the MSC device to configure |
754 | * |
755 | * Program storage mode, wrapping, burst length and trace buffer address |
756 | * into a given MSC. Then, enable tracing and set msc::enabled. |
757 | * The latter is serialized on msc::buf_mutex, so make sure to hold it. |
758 | */ |
759 | static int msc_configure(struct msc *msc) |
760 | { |
761 | u32 reg; |
762 | |
763 | lockdep_assert_held(&msc->buf_mutex); |
764 | |
765 | if (msc->mode > MSC_MODE_MULTI) |
766 | return -EINVAL; |
767 | |
768 | if (msc->mode == MSC_MODE_MULTI) { |
769 | if (msc_win_set_lockout(win: msc->cur_win, expect: WIN_READY, new: WIN_INUSE)) |
770 | return -EBUSY; |
771 | |
772 | msc_buffer_clear_hw_header(msc); |
773 | } |
774 | |
775 | msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR); |
776 | msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE); |
777 | |
778 | reg = msc->base_addr >> PAGE_SHIFT; |
779 | iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR); |
780 | |
781 | if (msc->mode == MSC_MODE_SINGLE) { |
782 | reg = msc->nr_pages; |
783 | iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE); |
784 | } |
785 | |
786 | reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); |
787 | reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD); |
788 | |
789 | reg |= MSC_EN; |
790 | reg |= msc->mode << __ffs(MSC_MODE); |
791 | reg |= msc->burst_len << __ffs(MSC_LEN); |
792 | |
793 | if (msc->wrap) |
794 | reg |= MSC_WRAPEN; |
795 | |
796 | iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); |
797 | |
798 | intel_th_msu_init(msc); |
799 | |
800 | msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; |
801 | intel_th_trace_enable(thdev: msc->thdev); |
802 | msc->enabled = 1; |
803 | |
804 | if (msc->mbuf && msc->mbuf->activate) |
805 | msc->mbuf->activate(msc->mbuf_priv); |
806 | |
807 | return 0; |
808 | } |
809 | |
810 | /** |
811 | * msc_disable() - disable MSC hardware |
812 | * @msc: MSC device to disable |
813 | * |
814 | * If @msc is enabled, disable tracing on the switch and then disable MSC |
815 | * storage. Caller must hold msc::buf_mutex. |
816 | */ |
817 | static void msc_disable(struct msc *msc) |
818 | { |
819 | struct msc_window *win = msc->cur_win; |
820 | u32 reg; |
821 | |
822 | lockdep_assert_held(&msc->buf_mutex); |
823 | |
824 | if (msc->mode == MSC_MODE_MULTI) |
825 | msc_win_set_lockout(win, expect: WIN_INUSE, new: WIN_LOCKED); |
826 | |
827 | if (msc->mbuf && msc->mbuf->deactivate) |
828 | msc->mbuf->deactivate(msc->mbuf_priv); |
829 | intel_th_msu_deinit(msc); |
830 | intel_th_trace_disable(thdev: msc->thdev); |
831 | |
832 | if (msc->mode == MSC_MODE_SINGLE) { |
833 | reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); |
834 | msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT); |
835 | |
836 | reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); |
837 | msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1); |
838 | dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n" , |
839 | reg, msc->single_sz, msc->single_wrap); |
840 | } |
841 | |
842 | reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); |
843 | reg &= ~MSC_EN; |
844 | iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); |
845 | |
846 | if (msc->mbuf && msc->mbuf->ready) |
847 | msc->mbuf->ready(msc->mbuf_priv, win->sgt, |
848 | msc_win_total_sz(win)); |
849 | |
850 | msc->enabled = 0; |
851 | |
852 | iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR); |
853 | iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE); |
854 | |
855 | dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n" , |
856 | ioread32(msc->reg_base + REG_MSU_MSC0NWSA)); |
857 | |
858 | reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); |
859 | dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n" , reg); |
860 | |
861 | reg = ioread32(msc->reg_base + REG_MSU_MSUSTS); |
862 | reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; |
863 | iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS); |
864 | } |
865 | |
866 | static int intel_th_msc_activate(struct intel_th_device *thdev) |
867 | { |
868 | struct msc *msc = dev_get_drvdata(dev: &thdev->dev); |
869 | int ret = -EBUSY; |
870 | |
871 | if (!atomic_inc_unless_negative(v: &msc->user_count)) |
872 | return -ENODEV; |
873 | |
874 | mutex_lock(&msc->buf_mutex); |
875 | |
876 | /* if there are readers, refuse */ |
877 | if (list_empty(head: &msc->iter_list)) |
878 | ret = msc_configure(msc); |
879 | |
880 | mutex_unlock(lock: &msc->buf_mutex); |
881 | |
882 | if (ret) |
883 | atomic_dec(v: &msc->user_count); |
884 | |
885 | return ret; |
886 | } |
887 | |
888 | static void intel_th_msc_deactivate(struct intel_th_device *thdev) |
889 | { |
890 | struct msc *msc = dev_get_drvdata(dev: &thdev->dev); |
891 | |
892 | mutex_lock(&msc->buf_mutex); |
893 | if (msc->enabled) { |
894 | msc_disable(msc); |
895 | atomic_dec(v: &msc->user_count); |
896 | } |
897 | mutex_unlock(lock: &msc->buf_mutex); |
898 | } |
899 | |
900 | /** |
901 | * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode |
902 | * @msc: MSC device |
903 | * @size: allocation size in bytes |
904 | * |
905 | * This modifies msc::base, which requires msc::buf_mutex to serialize, so the |
906 | * caller is expected to hold it. |
907 | * |
908 | * Return: 0 on success, -errno otherwise. |
909 | */ |
910 | static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size) |
911 | { |
912 | unsigned long nr_pages = size >> PAGE_SHIFT; |
913 | unsigned int order = get_order(size); |
914 | struct page *page; |
915 | int ret; |
916 | |
917 | if (!size) |
918 | return 0; |
919 | |
920 | ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL); |
921 | if (ret) |
922 | goto err_out; |
923 | |
924 | ret = -ENOMEM; |
925 | page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order); |
926 | if (!page) |
927 | goto err_free_sgt; |
928 | |
929 | split_page(page, order); |
930 | sg_set_buf(sg: msc->single_sgt.sgl, page_address(page), buflen: size); |
931 | |
932 | ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1, |
933 | DMA_FROM_DEVICE); |
934 | if (ret < 0) |
935 | goto err_free_pages; |
936 | |
937 | msc->nr_pages = nr_pages; |
938 | msc->base = page_address(page); |
939 | msc->base_addr = sg_dma_address(msc->single_sgt.sgl); |
940 | |
941 | return 0; |
942 | |
943 | err_free_pages: |
944 | __free_pages(page, order); |
945 | |
946 | err_free_sgt: |
947 | sg_free_table(&msc->single_sgt); |
948 | |
949 | err_out: |
950 | return ret; |
951 | } |
952 | |
953 | /** |
954 | * msc_buffer_contig_free() - free a contiguous buffer |
955 | * @msc: MSC configured in SINGLE mode |
956 | */ |
957 | static void msc_buffer_contig_free(struct msc *msc) |
958 | { |
959 | unsigned long off; |
960 | |
961 | dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, |
962 | 1, DMA_FROM_DEVICE); |
963 | sg_free_table(&msc->single_sgt); |
964 | |
965 | for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { |
966 | struct page *page = virt_to_page(msc->base + off); |
967 | |
968 | page->mapping = NULL; |
969 | __free_page(page); |
970 | } |
971 | |
972 | msc->nr_pages = 0; |
973 | } |
974 | |
975 | /** |
976 | * msc_buffer_contig_get_page() - find a page at a given offset |
977 | * @msc: MSC configured in SINGLE mode |
978 | * @pgoff: page offset |
979 | * |
980 | * Return: page, if @pgoff is within the range, NULL otherwise. |
981 | */ |
982 | static struct page *msc_buffer_contig_get_page(struct msc *msc, |
983 | unsigned long pgoff) |
984 | { |
985 | if (pgoff >= msc->nr_pages) |
986 | return NULL; |
987 | |
988 | return virt_to_page(msc->base + (pgoff << PAGE_SHIFT)); |
989 | } |
990 | |
991 | static int __msc_buffer_win_alloc(struct msc_window *win, |
992 | unsigned int nr_segs) |
993 | { |
994 | struct scatterlist *sg_ptr; |
995 | void *block; |
996 | int i, ret; |
997 | |
998 | ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL); |
999 | if (ret) |
1000 | return -ENOMEM; |
1001 | |
1002 | for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { |
1003 | block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent, |
1004 | PAGE_SIZE, dma_handle: &sg_dma_address(sg_ptr), |
1005 | GFP_KERNEL); |
1006 | if (!block) |
1007 | goto err_nomem; |
1008 | |
1009 | sg_set_buf(sg: sg_ptr, buf: block, PAGE_SIZE); |
1010 | } |
1011 | |
1012 | return nr_segs; |
1013 | |
1014 | err_nomem: |
1015 | for_each_sg(win->sgt->sgl, sg_ptr, i, ret) |
1016 | dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, |
1017 | cpu_addr: sg_virt(sg: sg_ptr), sg_dma_address(sg_ptr)); |
1018 | |
1019 | sg_free_table(win->sgt); |
1020 | |
1021 | return -ENOMEM; |
1022 | } |
1023 | |
1024 | #ifdef CONFIG_X86 |
1025 | static void msc_buffer_set_uc(struct msc *msc) |
1026 | { |
1027 | struct scatterlist *sg_ptr; |
1028 | struct msc_window *win; |
1029 | int i; |
1030 | |
1031 | if (msc->mode == MSC_MODE_SINGLE) { |
1032 | set_memory_uc(addr: (unsigned long)msc->base, numpages: msc->nr_pages); |
1033 | return; |
1034 | } |
1035 | |
1036 | list_for_each_entry(win, &msc->win_list, entry) { |
1037 | for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { |
1038 | /* Set the page as uncached */ |
1039 | set_memory_uc(addr: (unsigned long)sg_virt(sg: sg_ptr), |
1040 | PFN_DOWN(sg_ptr->length)); |
1041 | } |
1042 | } |
1043 | } |
1044 | |
1045 | static void msc_buffer_set_wb(struct msc *msc) |
1046 | { |
1047 | struct scatterlist *sg_ptr; |
1048 | struct msc_window *win; |
1049 | int i; |
1050 | |
1051 | if (msc->mode == MSC_MODE_SINGLE) { |
1052 | set_memory_wb(addr: (unsigned long)msc->base, numpages: msc->nr_pages); |
1053 | return; |
1054 | } |
1055 | |
1056 | list_for_each_entry(win, &msc->win_list, entry) { |
1057 | for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { |
1058 | /* Reset the page to write-back */ |
1059 | set_memory_wb(addr: (unsigned long)sg_virt(sg: sg_ptr), |
1060 | PFN_DOWN(sg_ptr->length)); |
1061 | } |
1062 | } |
1063 | } |
1064 | #else /* !X86 */ |
1065 | static inline void |
1066 | msc_buffer_set_uc(struct msc *msc) {} |
1067 | static inline void msc_buffer_set_wb(struct msc *msc) {} |
1068 | #endif /* CONFIG_X86 */ |
1069 | |
1070 | static struct page *msc_sg_page(struct scatterlist *sg) |
1071 | { |
1072 | void *addr = sg_virt(sg); |
1073 | |
1074 | if (is_vmalloc_addr(x: addr)) |
1075 | return vmalloc_to_page(addr); |
1076 | |
1077 | return sg_page(sg); |
1078 | } |
1079 | |
1080 | /** |
1081 | * msc_buffer_win_alloc() - alloc a window for a multiblock mode |
1082 | * @msc: MSC device |
1083 | * @nr_blocks: number of pages in this window |
1084 | * |
1085 | * This modifies msc::win_list and msc::base, which requires msc::buf_mutex |
1086 | * to serialize, so the caller is expected to hold it. |
1087 | * |
1088 | * Return: 0 on success, -errno otherwise. |
1089 | */ |
1090 | static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) |
1091 | { |
1092 | struct msc_window *win; |
1093 | int ret = -ENOMEM; |
1094 | |
1095 | if (!nr_blocks) |
1096 | return 0; |
1097 | |
1098 | win = kzalloc(size: sizeof(*win), GFP_KERNEL); |
1099 | if (!win) |
1100 | return -ENOMEM; |
1101 | |
1102 | win->msc = msc; |
1103 | win->sgt = &win->_sgt; |
1104 | win->lockout = WIN_READY; |
1105 | spin_lock_init(&win->lo_lock); |
1106 | |
1107 | if (!list_empty(head: &msc->win_list)) { |
1108 | struct msc_window *prev = list_last_entry(&msc->win_list, |
1109 | struct msc_window, |
1110 | entry); |
1111 | |
1112 | win->pgoff = prev->pgoff + prev->nr_blocks; |
1113 | } |
1114 | |
1115 | if (msc->mbuf && msc->mbuf->alloc_window) |
1116 | ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt, |
1117 | nr_blocks << PAGE_SHIFT); |
1118 | else |
1119 | ret = __msc_buffer_win_alloc(win, nr_segs: nr_blocks); |
1120 | |
1121 | if (ret <= 0) |
1122 | goto err_nomem; |
1123 | |
1124 | win->nr_segs = ret; |
1125 | win->nr_blocks = nr_blocks; |
1126 | |
1127 | if (list_empty(head: &msc->win_list)) { |
1128 | msc->base = msc_win_base(win); |
1129 | msc->base_addr = msc_win_base_dma(win); |
1130 | msc->cur_win = win; |
1131 | } |
1132 | |
1133 | list_add_tail(new: &win->entry, head: &msc->win_list); |
1134 | msc->nr_pages += nr_blocks; |
1135 | |
1136 | return 0; |
1137 | |
1138 | err_nomem: |
1139 | kfree(objp: win); |
1140 | |
1141 | return ret; |
1142 | } |
1143 | |
1144 | static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) |
1145 | { |
1146 | struct scatterlist *sg; |
1147 | int i; |
1148 | |
1149 | for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) { |
1150 | struct page *page = msc_sg_page(sg); |
1151 | |
1152 | page->mapping = NULL; |
1153 | dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, |
1154 | cpu_addr: sg_virt(sg), sg_dma_address(sg)); |
1155 | } |
1156 | sg_free_table(win->sgt); |
1157 | } |
1158 | |
1159 | /** |
1160 | * msc_buffer_win_free() - free a window from MSC's window list |
1161 | * @msc: MSC device |
1162 | * @win: window to free |
1163 | * |
1164 | * This modifies msc::win_list and msc::base, which requires msc::buf_mutex |
1165 | * to serialize, so the caller is expected to hold it. |
1166 | */ |
1167 | static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) |
1168 | { |
1169 | msc->nr_pages -= win->nr_blocks; |
1170 | |
1171 | list_del(entry: &win->entry); |
1172 | if (list_empty(head: &msc->win_list)) { |
1173 | msc->base = NULL; |
1174 | msc->base_addr = 0; |
1175 | } |
1176 | |
1177 | if (msc->mbuf && msc->mbuf->free_window) |
1178 | msc->mbuf->free_window(msc->mbuf_priv, win->sgt); |
1179 | else |
1180 | __msc_buffer_win_free(msc, win); |
1181 | |
1182 | kfree(objp: win); |
1183 | } |
1184 | |
1185 | /** |
1186 | * msc_buffer_relink() - set up block descriptors for multiblock mode |
1187 | * @msc: MSC device |
1188 | * |
1189 | * This traverses msc::win_list, which requires msc::buf_mutex to serialize, |
1190 | * so the caller is expected to hold it. |
1191 | */ |
1192 | static void msc_buffer_relink(struct msc *msc) |
1193 | { |
1194 | struct msc_window *win, *next_win; |
1195 | |
1196 | /* call with msc::mutex locked */ |
1197 | list_for_each_entry(win, &msc->win_list, entry) { |
1198 | struct scatterlist *sg; |
1199 | unsigned int blk; |
1200 | u32 sw_tag = 0; |
1201 | |
1202 | /* |
1203 | * Last window's next_win should point to the first window |
1204 | * and MSC_SW_TAG_LASTWIN should be set. |
1205 | */ |
1206 | if (msc_is_last_win(win)) { |
1207 | sw_tag |= MSC_SW_TAG_LASTWIN; |
1208 | next_win = list_first_entry(&msc->win_list, |
1209 | struct msc_window, entry); |
1210 | } else { |
1211 | next_win = list_next_entry(win, entry); |
1212 | } |
1213 | |
1214 | for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { |
1215 | struct msc_block_desc *bdesc = sg_virt(sg); |
1216 | |
1217 | memset(bdesc, 0, sizeof(*bdesc)); |
1218 | |
1219 | bdesc->next_win = msc_win_base_pfn(win: next_win); |
1220 | |
1221 | /* |
1222 | * Similarly to last window, last block should point |
1223 | * to the first one. |
1224 | */ |
1225 | if (blk == win->nr_segs - 1) { |
1226 | sw_tag |= MSC_SW_TAG_LASTBLK; |
1227 | bdesc->next_blk = msc_win_base_pfn(win); |
1228 | } else { |
1229 | dma_addr_t addr = sg_dma_address(sg_next(sg)); |
1230 | |
1231 | bdesc->next_blk = PFN_DOWN(addr); |
1232 | } |
1233 | |
1234 | bdesc->sw_tag = sw_tag; |
1235 | bdesc->block_sz = sg->length / 64; |
1236 | } |
1237 | } |
1238 | |
1239 | /* |
1240 | * Make the above writes globally visible before tracing is |
1241 | * enabled to make sure hardware sees them coherently. |
1242 | */ |
1243 | wmb(); |
1244 | } |
1245 | |
1246 | static void msc_buffer_multi_free(struct msc *msc) |
1247 | { |
1248 | struct msc_window *win, *iter; |
1249 | |
1250 | list_for_each_entry_safe(win, iter, &msc->win_list, entry) |
1251 | msc_buffer_win_free(msc, win); |
1252 | } |
1253 | |
1254 | static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages, |
1255 | unsigned int nr_wins) |
1256 | { |
1257 | int ret, i; |
1258 | |
1259 | for (i = 0; i < nr_wins; i++) { |
1260 | ret = msc_buffer_win_alloc(msc, nr_blocks: nr_pages[i]); |
1261 | if (ret) { |
1262 | msc_buffer_multi_free(msc); |
1263 | return ret; |
1264 | } |
1265 | } |
1266 | |
1267 | msc_buffer_relink(msc); |
1268 | |
1269 | return 0; |
1270 | } |
1271 | |
1272 | /** |
1273 | * msc_buffer_free() - free buffers for MSC |
1274 | * @msc: MSC device |
1275 | * |
1276 | * Free MSC's storage buffers. |
1277 | * |
1278 | * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to |
1279 | * serialize, so the caller is expected to hold it. |
1280 | */ |
1281 | static void msc_buffer_free(struct msc *msc) |
1282 | { |
1283 | msc_buffer_set_wb(msc); |
1284 | |
1285 | if (msc->mode == MSC_MODE_SINGLE) |
1286 | msc_buffer_contig_free(msc); |
1287 | else if (msc->mode == MSC_MODE_MULTI) |
1288 | msc_buffer_multi_free(msc); |
1289 | } |
1290 | |
1291 | /** |
1292 | * msc_buffer_alloc() - allocate a buffer for MSC |
1293 | * @msc: MSC device |
1294 | * @size: allocation size in bytes |
1295 | * |
1296 | * Allocate a storage buffer for MSC, depending on the msc::mode, it will be |
1297 | * either done via msc_buffer_contig_alloc() for SINGLE operation mode or |
1298 | * msc_buffer_win_alloc() for multiblock operation. The latter allocates one |
1299 | * window per invocation, so in multiblock mode this can be called multiple |
1300 | * times for the same MSC to allocate multiple windows. |
1301 | * |
1302 | * This modifies msc::win_list and msc::base, which requires msc::buf_mutex |
1303 | * to serialize, so the caller is expected to hold it. |
1304 | * |
1305 | * Return: 0 on success, -errno otherwise. |
1306 | */ |
1307 | static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages, |
1308 | unsigned int nr_wins) |
1309 | { |
1310 | int ret; |
1311 | |
1312 | /* -1: buffer not allocated */ |
1313 | if (atomic_read(v: &msc->user_count) != -1) |
1314 | return -EBUSY; |
1315 | |
1316 | if (msc->mode == MSC_MODE_SINGLE) { |
1317 | if (nr_wins != 1) |
1318 | return -EINVAL; |
1319 | |
1320 | ret = msc_buffer_contig_alloc(msc, size: nr_pages[0] << PAGE_SHIFT); |
1321 | } else if (msc->mode == MSC_MODE_MULTI) { |
1322 | ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins); |
1323 | } else { |
1324 | ret = -EINVAL; |
1325 | } |
1326 | |
1327 | if (!ret) { |
1328 | msc_buffer_set_uc(msc); |
1329 | |
1330 | /* allocation should be visible before the counter goes to 0 */ |
1331 | smp_mb__before_atomic(); |
1332 | |
1333 | if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1)) |
1334 | return -EINVAL; |
1335 | } |
1336 | |
1337 | return ret; |
1338 | } |
1339 | |
1340 | /** |
1341 | * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use |
1342 | * @msc: MSC device |
1343 | * |
1344 | * This will free MSC buffer unless it is in use or there is no allocated |
1345 | * buffer. |
1346 | * Caller needs to hold msc::buf_mutex. |
1347 | * |
1348 | * Return: 0 on successful deallocation or if there was no buffer to |
1349 | * deallocate, -EBUSY if there are active users. |
1350 | */ |
1351 | static int msc_buffer_unlocked_free_unless_used(struct msc *msc) |
1352 | { |
1353 | int count, ret = 0; |
1354 | |
1355 | count = atomic_cmpxchg(v: &msc->user_count, old: 0, new: -1); |
1356 | |
1357 | /* > 0: buffer is allocated and has users */ |
1358 | if (count > 0) |
1359 | ret = -EBUSY; |
1360 | /* 0: buffer is allocated, no users */ |
1361 | else if (!count) |
1362 | msc_buffer_free(msc); |
1363 | /* < 0: no buffer, nothing to do */ |
1364 | |
1365 | return ret; |
1366 | } |
1367 | |
1368 | /** |
1369 | * msc_buffer_free_unless_used() - free a buffer unless it's in use |
1370 | * @msc: MSC device |
1371 | * |
1372 | * This is a locked version of msc_buffer_unlocked_free_unless_used(). |
1373 | */ |
1374 | static int msc_buffer_free_unless_used(struct msc *msc) |
1375 | { |
1376 | int ret; |
1377 | |
1378 | mutex_lock(&msc->buf_mutex); |
1379 | ret = msc_buffer_unlocked_free_unless_used(msc); |
1380 | mutex_unlock(lock: &msc->buf_mutex); |
1381 | |
1382 | return ret; |
1383 | } |
1384 | |
1385 | /** |
1386 | * msc_buffer_get_page() - get MSC buffer page at a given offset |
1387 | * @msc: MSC device |
1388 | * @pgoff: page offset into the storage buffer |
1389 | * |
1390 | * This traverses msc::win_list, so holding msc::buf_mutex is expected from |
1391 | * the caller. |
1392 | * |
1393 | * Return: page if @pgoff corresponds to a valid buffer page or NULL. |
1394 | */ |
1395 | static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff) |
1396 | { |
1397 | struct msc_window *win; |
1398 | struct scatterlist *sg; |
1399 | unsigned int blk; |
1400 | |
1401 | if (msc->mode == MSC_MODE_SINGLE) |
1402 | return msc_buffer_contig_get_page(msc, pgoff); |
1403 | |
1404 | list_for_each_entry(win, &msc->win_list, entry) |
1405 | if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks) |
1406 | goto found; |
1407 | |
1408 | return NULL; |
1409 | |
1410 | found: |
1411 | pgoff -= win->pgoff; |
1412 | |
1413 | for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { |
1414 | struct page *page = msc_sg_page(sg); |
1415 | size_t pgsz = PFN_DOWN(sg->length); |
1416 | |
1417 | if (pgoff < pgsz) |
1418 | return page + pgoff; |
1419 | |
1420 | pgoff -= pgsz; |
1421 | } |
1422 | |
1423 | return NULL; |
1424 | } |
1425 | |
1426 | /** |
1427 | * struct msc_win_to_user_struct - data for copy_to_user() callback |
1428 | * @buf: userspace buffer to copy data to |
1429 | * @offset: running offset |
1430 | */ |
1431 | struct msc_win_to_user_struct { |
1432 | char __user *buf; |
1433 | unsigned long offset; |
1434 | }; |
1435 | |
1436 | /** |
1437 | * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user |
1438 | * @data: callback's private data |
1439 | * @src: source buffer |
1440 | * @len: amount of data to copy from the source buffer |
1441 | */ |
1442 | static unsigned long msc_win_to_user(void *data, void *src, size_t len) |
1443 | { |
1444 | struct msc_win_to_user_struct *u = data; |
1445 | unsigned long ret; |
1446 | |
1447 | ret = copy_to_user(to: u->buf + u->offset, from: src, n: len); |
1448 | u->offset += len - ret; |
1449 | |
1450 | return ret; |
1451 | } |
1452 | |
1453 | |
1454 | /* |
1455 | * file operations' callbacks |
1456 | */ |
1457 | |
1458 | static int intel_th_msc_open(struct inode *inode, struct file *file) |
1459 | { |
1460 | struct intel_th_device *thdev = file->private_data; |
1461 | struct msc *msc = dev_get_drvdata(dev: &thdev->dev); |
1462 | struct msc_iter *iter; |
1463 | |
1464 | if (!capable(CAP_SYS_RAWIO)) |
1465 | return -EPERM; |
1466 | |
1467 | iter = msc_iter_install(msc); |
1468 | if (IS_ERR(ptr: iter)) |
1469 | return PTR_ERR(ptr: iter); |
1470 | |
1471 | file->private_data = iter; |
1472 | |
1473 | return nonseekable_open(inode, filp: file); |
1474 | } |
1475 | |
1476 | static int intel_th_msc_release(struct inode *inode, struct file *file) |
1477 | { |
1478 | struct msc_iter *iter = file->private_data; |
1479 | struct msc *msc = iter->msc; |
1480 | |
1481 | msc_iter_remove(iter, msc); |
1482 | |
1483 | return 0; |
1484 | } |
1485 | |
1486 | static ssize_t |
1487 | msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) |
1488 | { |
1489 | unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len; |
1490 | unsigned long start = off, tocopy = 0; |
1491 | |
1492 | if (msc->single_wrap) { |
1493 | start += msc->single_sz; |
1494 | if (start < size) { |
1495 | tocopy = min(rem, size - start); |
1496 | if (copy_to_user(to: buf, from: msc->base + start, n: tocopy)) |
1497 | return -EFAULT; |
1498 | |
1499 | buf += tocopy; |
1500 | rem -= tocopy; |
1501 | start += tocopy; |
1502 | } |
1503 | |
1504 | start &= size - 1; |
1505 | if (rem) { |
1506 | tocopy = min(rem, msc->single_sz - start); |
1507 | if (copy_to_user(to: buf, from: msc->base + start, n: tocopy)) |
1508 | return -EFAULT; |
1509 | |
1510 | rem -= tocopy; |
1511 | } |
1512 | |
1513 | return len - rem; |
1514 | } |
1515 | |
1516 | if (copy_to_user(to: buf, from: msc->base + start, n: rem)) |
1517 | return -EFAULT; |
1518 | |
1519 | return len; |
1520 | } |
1521 | |
1522 | static ssize_t intel_th_msc_read(struct file *file, char __user *buf, |
1523 | size_t len, loff_t *ppos) |
1524 | { |
1525 | struct msc_iter *iter = file->private_data; |
1526 | struct msc *msc = iter->msc; |
1527 | size_t size; |
1528 | loff_t off = *ppos; |
1529 | ssize_t ret = 0; |
1530 | |
1531 | if (!atomic_inc_unless_negative(v: &msc->user_count)) |
1532 | return 0; |
1533 | |
1534 | if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap) |
1535 | size = msc->single_sz; |
1536 | else |
1537 | size = msc->nr_pages << PAGE_SHIFT; |
1538 | |
1539 | if (!size) |
1540 | goto put_count; |
1541 | |
1542 | if (off >= size) |
1543 | goto put_count; |
1544 | |
1545 | if (off + len >= size) |
1546 | len = size - off; |
1547 | |
1548 | if (msc->mode == MSC_MODE_SINGLE) { |
1549 | ret = msc_single_to_user(msc, buf, off, len); |
1550 | if (ret >= 0) |
1551 | *ppos += ret; |
1552 | } else if (msc->mode == MSC_MODE_MULTI) { |
1553 | struct msc_win_to_user_struct u = { |
1554 | .buf = buf, |
1555 | .offset = 0, |
1556 | }; |
1557 | |
1558 | ret = msc_buffer_iterate(iter, size: len, data: &u, fn: msc_win_to_user); |
1559 | if (ret >= 0) |
1560 | *ppos = iter->offset; |
1561 | } else { |
1562 | ret = -EINVAL; |
1563 | } |
1564 | |
1565 | put_count: |
1566 | atomic_dec(v: &msc->user_count); |
1567 | |
1568 | return ret; |
1569 | } |
1570 | |
1571 | /* |
1572 | * vm operations callbacks (vm_ops) |
1573 | */ |
1574 | |
1575 | static void msc_mmap_open(struct vm_area_struct *vma) |
1576 | { |
1577 | struct msc_iter *iter = vma->vm_file->private_data; |
1578 | struct msc *msc = iter->msc; |
1579 | |
1580 | atomic_inc(v: &msc->mmap_count); |
1581 | } |
1582 | |
1583 | static void msc_mmap_close(struct vm_area_struct *vma) |
1584 | { |
1585 | struct msc_iter *iter = vma->vm_file->private_data; |
1586 | struct msc *msc = iter->msc; |
1587 | unsigned long pg; |
1588 | |
1589 | if (!atomic_dec_and_mutex_lock(cnt: &msc->mmap_count, lock: &msc->buf_mutex)) |
1590 | return; |
1591 | |
1592 | /* drop page _refcounts */ |
1593 | for (pg = 0; pg < msc->nr_pages; pg++) { |
1594 | struct page *page = msc_buffer_get_page(msc, pgoff: pg); |
1595 | |
1596 | if (WARN_ON_ONCE(!page)) |
1597 | continue; |
1598 | |
1599 | if (page->mapping) |
1600 | page->mapping = NULL; |
1601 | } |
1602 | |
1603 | /* last mapping -- drop user_count */ |
1604 | atomic_dec(v: &msc->user_count); |
1605 | mutex_unlock(lock: &msc->buf_mutex); |
1606 | } |
1607 | |
1608 | static vm_fault_t msc_mmap_fault(struct vm_fault *vmf) |
1609 | { |
1610 | struct msc_iter *iter = vmf->vma->vm_file->private_data; |
1611 | struct msc *msc = iter->msc; |
1612 | |
1613 | vmf->page = msc_buffer_get_page(msc, pgoff: vmf->pgoff); |
1614 | if (!vmf->page) |
1615 | return VM_FAULT_SIGBUS; |
1616 | |
1617 | get_page(page: vmf->page); |
1618 | vmf->page->mapping = vmf->vma->vm_file->f_mapping; |
1619 | vmf->page->index = vmf->pgoff; |
1620 | |
1621 | return 0; |
1622 | } |
1623 | |
1624 | static const struct vm_operations_struct msc_mmap_ops = { |
1625 | .open = msc_mmap_open, |
1626 | .close = msc_mmap_close, |
1627 | .fault = msc_mmap_fault, |
1628 | }; |
1629 | |
1630 | static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma) |
1631 | { |
1632 | unsigned long size = vma->vm_end - vma->vm_start; |
1633 | struct msc_iter *iter = vma->vm_file->private_data; |
1634 | struct msc *msc = iter->msc; |
1635 | int ret = -EINVAL; |
1636 | |
1637 | if (!size || offset_in_page(size)) |
1638 | return -EINVAL; |
1639 | |
1640 | if (vma->vm_pgoff) |
1641 | return -EINVAL; |
1642 | |
1643 | /* grab user_count once per mmap; drop in msc_mmap_close() */ |
1644 | if (!atomic_inc_unless_negative(v: &msc->user_count)) |
1645 | return -EINVAL; |
1646 | |
1647 | if (msc->mode != MSC_MODE_SINGLE && |
1648 | msc->mode != MSC_MODE_MULTI) |
1649 | goto out; |
1650 | |
1651 | if (size >> PAGE_SHIFT != msc->nr_pages) |
1652 | goto out; |
1653 | |
1654 | atomic_set(v: &msc->mmap_count, i: 1); |
1655 | ret = 0; |
1656 | |
1657 | out: |
1658 | if (ret) |
1659 | atomic_dec(v: &msc->user_count); |
1660 | |
1661 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1662 | vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY); |
1663 | vma->vm_ops = &msc_mmap_ops; |
1664 | return ret; |
1665 | } |
1666 | |
1667 | static const struct file_operations intel_th_msc_fops = { |
1668 | .open = intel_th_msc_open, |
1669 | .release = intel_th_msc_release, |
1670 | .read = intel_th_msc_read, |
1671 | .mmap = intel_th_msc_mmap, |
1672 | .llseek = no_llseek, |
1673 | .owner = THIS_MODULE, |
1674 | }; |
1675 | |
1676 | static void intel_th_msc_wait_empty(struct intel_th_device *thdev) |
1677 | { |
1678 | struct msc *msc = dev_get_drvdata(dev: &thdev->dev); |
1679 | unsigned long count; |
1680 | u32 reg; |
1681 | |
1682 | for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; |
1683 | count && !(reg & MSCSTS_PLE); count--) { |
1684 | reg = __raw_readl(addr: msc->reg_base + REG_MSU_MSC0STS); |
1685 | cpu_relax(); |
1686 | } |
1687 | |
1688 | if (!count) |
1689 | dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n" ); |
1690 | } |
1691 | |
1692 | static int intel_th_msc_init(struct msc *msc) |
1693 | { |
1694 | atomic_set(v: &msc->user_count, i: -1); |
1695 | |
1696 | msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI; |
1697 | mutex_init(&msc->buf_mutex); |
1698 | INIT_LIST_HEAD(list: &msc->win_list); |
1699 | INIT_LIST_HEAD(list: &msc->iter_list); |
1700 | |
1701 | msc->burst_len = |
1702 | (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >> |
1703 | __ffs(MSC_LEN); |
1704 | |
1705 | return 0; |
1706 | } |
1707 | |
1708 | static int msc_win_switch(struct msc *msc) |
1709 | { |
1710 | struct msc_window *first; |
1711 | |
1712 | if (list_empty(head: &msc->win_list)) |
1713 | return -EINVAL; |
1714 | |
1715 | first = list_first_entry(&msc->win_list, struct msc_window, entry); |
1716 | |
1717 | if (msc_is_last_win(win: msc->cur_win)) |
1718 | msc->cur_win = first; |
1719 | else |
1720 | msc->cur_win = list_next_entry(msc->cur_win, entry); |
1721 | |
1722 | msc->base = msc_win_base(win: msc->cur_win); |
1723 | msc->base_addr = msc_win_base_dma(win: msc->cur_win); |
1724 | |
1725 | intel_th_trace_switch(thdev: msc->thdev); |
1726 | |
1727 | return 0; |
1728 | } |
1729 | |
1730 | /** |
1731 | * intel_th_msc_window_unlock - put the window back in rotation |
1732 | * @dev: MSC device to which this relates |
1733 | * @sgt: buffer's sg_table for the window, does nothing if NULL |
1734 | */ |
1735 | void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt) |
1736 | { |
1737 | struct msc *msc = dev_get_drvdata(dev); |
1738 | struct msc_window *win; |
1739 | |
1740 | if (!sgt) |
1741 | return; |
1742 | |
1743 | win = msc_find_window(msc, sgt, nonempty: false); |
1744 | if (!win) |
1745 | return; |
1746 | |
1747 | msc_win_set_lockout(win, expect: WIN_LOCKED, new: WIN_READY); |
1748 | if (msc->switch_on_unlock == win) { |
1749 | msc->switch_on_unlock = NULL; |
1750 | msc_win_switch(msc); |
1751 | } |
1752 | } |
1753 | EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock); |
1754 | |
1755 | static void msc_work(struct work_struct *work) |
1756 | { |
1757 | struct msc *msc = container_of(work, struct msc, work); |
1758 | |
1759 | intel_th_msc_deactivate(thdev: msc->thdev); |
1760 | } |
1761 | |
1762 | static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev) |
1763 | { |
1764 | struct msc *msc = dev_get_drvdata(dev: &thdev->dev); |
1765 | u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); |
1766 | u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; |
1767 | struct msc_window *win, *next_win; |
1768 | |
1769 | if (!msc->do_irq || !msc->mbuf) |
1770 | return IRQ_NONE; |
1771 | |
1772 | msusts &= mask; |
1773 | |
1774 | if (!msusts) |
1775 | return msc->enabled ? IRQ_HANDLED : IRQ_NONE; |
1776 | |
1777 | iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); |
1778 | |
1779 | if (!msc->enabled) |
1780 | return IRQ_NONE; |
1781 | |
1782 | /* grab the window before we do the switch */ |
1783 | win = msc->cur_win; |
1784 | if (!win) |
1785 | return IRQ_HANDLED; |
1786 | next_win = msc_next_window(win); |
1787 | if (!next_win) |
1788 | return IRQ_HANDLED; |
1789 | |
1790 | /* next window: if READY, proceed, if LOCKED, stop the trace */ |
1791 | if (msc_win_set_lockout(win: next_win, expect: WIN_READY, new: WIN_INUSE)) { |
1792 | if (msc->stop_on_full) |
1793 | schedule_work(work: &msc->work); |
1794 | else |
1795 | msc->switch_on_unlock = next_win; |
1796 | |
1797 | return IRQ_HANDLED; |
1798 | } |
1799 | |
1800 | /* current window: INUSE -> LOCKED */ |
1801 | msc_win_set_lockout(win, expect: WIN_INUSE, new: WIN_LOCKED); |
1802 | |
1803 | msc_win_switch(msc); |
1804 | |
1805 | if (msc->mbuf && msc->mbuf->ready) |
1806 | msc->mbuf->ready(msc->mbuf_priv, win->sgt, |
1807 | msc_win_total_sz(win)); |
1808 | |
1809 | return IRQ_HANDLED; |
1810 | } |
1811 | |
1812 | static const char * const msc_mode[] = { |
1813 | [MSC_MODE_SINGLE] = "single" , |
1814 | [MSC_MODE_MULTI] = "multi" , |
1815 | [MSC_MODE_EXI] = "ExI" , |
1816 | [MSC_MODE_DEBUG] = "debug" , |
1817 | }; |
1818 | |
1819 | static ssize_t |
1820 | wrap_show(struct device *dev, struct device_attribute *attr, char *buf) |
1821 | { |
1822 | struct msc *msc = dev_get_drvdata(dev); |
1823 | |
1824 | return scnprintf(buf, PAGE_SIZE, fmt: "%d\n" , msc->wrap); |
1825 | } |
1826 | |
1827 | static ssize_t |
1828 | wrap_store(struct device *dev, struct device_attribute *attr, const char *buf, |
1829 | size_t size) |
1830 | { |
1831 | struct msc *msc = dev_get_drvdata(dev); |
1832 | unsigned long val; |
1833 | int ret; |
1834 | |
1835 | ret = kstrtoul(s: buf, base: 10, res: &val); |
1836 | if (ret) |
1837 | return ret; |
1838 | |
1839 | msc->wrap = !!val; |
1840 | |
1841 | return size; |
1842 | } |
1843 | |
1844 | static DEVICE_ATTR_RW(wrap); |
1845 | |
1846 | static void msc_buffer_unassign(struct msc *msc) |
1847 | { |
1848 | lockdep_assert_held(&msc->buf_mutex); |
1849 | |
1850 | if (!msc->mbuf) |
1851 | return; |
1852 | |
1853 | msc->mbuf->unassign(msc->mbuf_priv); |
1854 | msu_buffer_put(mbuf: msc->mbuf); |
1855 | msc->mbuf_priv = NULL; |
1856 | msc->mbuf = NULL; |
1857 | } |
1858 | |
1859 | static ssize_t |
1860 | mode_show(struct device *dev, struct device_attribute *attr, char *buf) |
1861 | { |
1862 | struct msc *msc = dev_get_drvdata(dev); |
1863 | const char *mode = msc_mode[msc->mode]; |
1864 | ssize_t ret; |
1865 | |
1866 | mutex_lock(&msc->buf_mutex); |
1867 | if (msc->mbuf) |
1868 | mode = msc->mbuf->name; |
1869 | ret = scnprintf(buf, PAGE_SIZE, fmt: "%s\n" , mode); |
1870 | mutex_unlock(lock: &msc->buf_mutex); |
1871 | |
1872 | return ret; |
1873 | } |
1874 | |
1875 | static ssize_t |
1876 | mode_store(struct device *dev, struct device_attribute *attr, const char *buf, |
1877 | size_t size) |
1878 | { |
1879 | const struct msu_buffer *mbuf = NULL; |
1880 | struct msc *msc = dev_get_drvdata(dev); |
1881 | size_t len = size; |
1882 | char *cp, *mode; |
1883 | int i, ret; |
1884 | |
1885 | if (!capable(CAP_SYS_RAWIO)) |
1886 | return -EPERM; |
1887 | |
1888 | cp = memchr(p: buf, c: '\n', size: len); |
1889 | if (cp) |
1890 | len = cp - buf; |
1891 | |
1892 | mode = kstrndup(s: buf, len, GFP_KERNEL); |
1893 | if (!mode) |
1894 | return -ENOMEM; |
1895 | |
1896 | i = match_string(array: msc_mode, ARRAY_SIZE(msc_mode), string: mode); |
1897 | if (i >= 0) { |
1898 | kfree(objp: mode); |
1899 | goto found; |
1900 | } |
1901 | |
1902 | /* Buffer sinks only work with a usable IRQ */ |
1903 | if (!msc->do_irq) { |
1904 | kfree(objp: mode); |
1905 | return -EINVAL; |
1906 | } |
1907 | |
1908 | mbuf = msu_buffer_get(name: mode); |
1909 | kfree(objp: mode); |
1910 | if (mbuf) |
1911 | goto found; |
1912 | |
1913 | return -EINVAL; |
1914 | |
1915 | found: |
1916 | if (i == MSC_MODE_MULTI && msc->multi_is_broken) |
1917 | return -EOPNOTSUPP; |
1918 | |
1919 | mutex_lock(&msc->buf_mutex); |
1920 | ret = 0; |
1921 | |
1922 | /* Same buffer: do nothing */ |
1923 | if (mbuf && mbuf == msc->mbuf) { |
1924 | /* put the extra reference we just got */ |
1925 | msu_buffer_put(mbuf); |
1926 | goto unlock; |
1927 | } |
1928 | |
1929 | ret = msc_buffer_unlocked_free_unless_used(msc); |
1930 | if (ret) |
1931 | goto unlock; |
1932 | |
1933 | if (mbuf) { |
1934 | void *mbuf_priv = mbuf->assign(dev, &i); |
1935 | |
1936 | if (!mbuf_priv) { |
1937 | ret = -ENOMEM; |
1938 | goto unlock; |
1939 | } |
1940 | |
1941 | msc_buffer_unassign(msc); |
1942 | msc->mbuf_priv = mbuf_priv; |
1943 | msc->mbuf = mbuf; |
1944 | } else { |
1945 | msc_buffer_unassign(msc); |
1946 | } |
1947 | |
1948 | msc->mode = i; |
1949 | |
1950 | unlock: |
1951 | if (ret && mbuf) |
1952 | msu_buffer_put(mbuf); |
1953 | mutex_unlock(lock: &msc->buf_mutex); |
1954 | |
1955 | return ret ? ret : size; |
1956 | } |
1957 | |
1958 | static DEVICE_ATTR_RW(mode); |
1959 | |
1960 | static ssize_t |
1961 | nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf) |
1962 | { |
1963 | struct msc *msc = dev_get_drvdata(dev); |
1964 | struct msc_window *win; |
1965 | size_t count = 0; |
1966 | |
1967 | mutex_lock(&msc->buf_mutex); |
1968 | |
1969 | if (msc->mode == MSC_MODE_SINGLE) |
1970 | count = scnprintf(buf, PAGE_SIZE, fmt: "%ld\n" , msc->nr_pages); |
1971 | else if (msc->mode == MSC_MODE_MULTI) { |
1972 | list_for_each_entry(win, &msc->win_list, entry) { |
1973 | count += scnprintf(buf: buf + count, PAGE_SIZE - count, |
1974 | fmt: "%d%c" , win->nr_blocks, |
1975 | msc_is_last_win(win) ? '\n' : ','); |
1976 | } |
1977 | } else { |
1978 | count = scnprintf(buf, PAGE_SIZE, fmt: "unsupported\n" ); |
1979 | } |
1980 | |
1981 | mutex_unlock(lock: &msc->buf_mutex); |
1982 | |
1983 | return count; |
1984 | } |
1985 | |
1986 | static ssize_t |
1987 | nr_pages_store(struct device *dev, struct device_attribute *attr, |
1988 | const char *buf, size_t size) |
1989 | { |
1990 | struct msc *msc = dev_get_drvdata(dev); |
1991 | unsigned long val, *win = NULL, *rewin; |
1992 | size_t len = size; |
1993 | const char *p = buf; |
1994 | char *end, *s; |
1995 | int ret, nr_wins = 0; |
1996 | |
1997 | if (!capable(CAP_SYS_RAWIO)) |
1998 | return -EPERM; |
1999 | |
2000 | ret = msc_buffer_free_unless_used(msc); |
2001 | if (ret) |
2002 | return ret; |
2003 | |
2004 | /* scan the comma-separated list of allocation sizes */ |
2005 | end = memchr(p: buf, c: '\n', size: len); |
2006 | if (end) |
2007 | len = end - buf; |
2008 | |
2009 | do { |
2010 | end = memchr(p, c: ',', size: len); |
2011 | s = kstrndup(s: p, len: end ? end - p : len, GFP_KERNEL); |
2012 | if (!s) { |
2013 | ret = -ENOMEM; |
2014 | goto free_win; |
2015 | } |
2016 | |
2017 | ret = kstrtoul(s, base: 10, res: &val); |
2018 | kfree(objp: s); |
2019 | |
2020 | if (ret || !val) |
2021 | goto free_win; |
2022 | |
2023 | if (nr_wins && msc->mode == MSC_MODE_SINGLE) { |
2024 | ret = -EINVAL; |
2025 | goto free_win; |
2026 | } |
2027 | |
2028 | nr_wins++; |
2029 | rewin = krealloc_array(p: win, new_n: nr_wins, new_size: sizeof(*win), GFP_KERNEL); |
2030 | if (!rewin) { |
2031 | kfree(objp: win); |
2032 | return -ENOMEM; |
2033 | } |
2034 | |
2035 | win = rewin; |
2036 | win[nr_wins - 1] = val; |
2037 | |
2038 | if (!end) |
2039 | break; |
2040 | |
2041 | /* consume the number and the following comma, hence +1 */ |
2042 | len -= end - p + 1; |
2043 | p = end + 1; |
2044 | } while (len); |
2045 | |
2046 | mutex_lock(&msc->buf_mutex); |
2047 | ret = msc_buffer_alloc(msc, nr_pages: win, nr_wins); |
2048 | mutex_unlock(lock: &msc->buf_mutex); |
2049 | |
2050 | free_win: |
2051 | kfree(objp: win); |
2052 | |
2053 | return ret ? ret : size; |
2054 | } |
2055 | |
2056 | static DEVICE_ATTR_RW(nr_pages); |
2057 | |
2058 | static ssize_t |
2059 | win_switch_store(struct device *dev, struct device_attribute *attr, |
2060 | const char *buf, size_t size) |
2061 | { |
2062 | struct msc *msc = dev_get_drvdata(dev); |
2063 | unsigned long val; |
2064 | int ret; |
2065 | |
2066 | ret = kstrtoul(s: buf, base: 10, res: &val); |
2067 | if (ret) |
2068 | return ret; |
2069 | |
2070 | if (val != 1) |
2071 | return -EINVAL; |
2072 | |
2073 | ret = -EINVAL; |
2074 | mutex_lock(&msc->buf_mutex); |
2075 | /* |
2076 | * Window switch can only happen in the "multi" mode. |
2077 | * If a external buffer is engaged, they have the full |
2078 | * control over window switching. |
2079 | */ |
2080 | if (msc->mode == MSC_MODE_MULTI && !msc->mbuf) |
2081 | ret = msc_win_switch(msc); |
2082 | mutex_unlock(lock: &msc->buf_mutex); |
2083 | |
2084 | return ret ? ret : size; |
2085 | } |
2086 | |
2087 | static DEVICE_ATTR_WO(win_switch); |
2088 | |
2089 | static ssize_t stop_on_full_show(struct device *dev, |
2090 | struct device_attribute *attr, char *buf) |
2091 | { |
2092 | struct msc *msc = dev_get_drvdata(dev); |
2093 | |
2094 | return sprintf(buf, fmt: "%d\n" , msc->stop_on_full); |
2095 | } |
2096 | |
2097 | static ssize_t stop_on_full_store(struct device *dev, |
2098 | struct device_attribute *attr, |
2099 | const char *buf, size_t size) |
2100 | { |
2101 | struct msc *msc = dev_get_drvdata(dev); |
2102 | int ret; |
2103 | |
2104 | ret = kstrtobool(s: buf, res: &msc->stop_on_full); |
2105 | if (ret) |
2106 | return ret; |
2107 | |
2108 | return size; |
2109 | } |
2110 | |
2111 | static DEVICE_ATTR_RW(stop_on_full); |
2112 | |
2113 | static struct attribute *msc_output_attrs[] = { |
2114 | &dev_attr_wrap.attr, |
2115 | &dev_attr_mode.attr, |
2116 | &dev_attr_nr_pages.attr, |
2117 | &dev_attr_win_switch.attr, |
2118 | &dev_attr_stop_on_full.attr, |
2119 | NULL, |
2120 | }; |
2121 | |
2122 | static const struct attribute_group msc_output_group = { |
2123 | .attrs = msc_output_attrs, |
2124 | }; |
2125 | |
2126 | static int intel_th_msc_probe(struct intel_th_device *thdev) |
2127 | { |
2128 | struct device *dev = &thdev->dev; |
2129 | struct resource *res; |
2130 | struct msc *msc; |
2131 | void __iomem *base; |
2132 | int err; |
2133 | |
2134 | res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, num: 0); |
2135 | if (!res) |
2136 | return -ENODEV; |
2137 | |
2138 | base = devm_ioremap(dev, offset: res->start, size: resource_size(res)); |
2139 | if (!base) |
2140 | return -ENOMEM; |
2141 | |
2142 | msc = devm_kzalloc(dev, size: sizeof(*msc), GFP_KERNEL); |
2143 | if (!msc) |
2144 | return -ENOMEM; |
2145 | |
2146 | res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, num: 1); |
2147 | if (!res) |
2148 | msc->do_irq = 1; |
2149 | |
2150 | if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken)) |
2151 | msc->multi_is_broken = 1; |
2152 | |
2153 | msc->index = thdev->id; |
2154 | |
2155 | msc->thdev = thdev; |
2156 | msc->reg_base = base + msc->index * 0x100; |
2157 | msc->msu_base = base; |
2158 | |
2159 | INIT_WORK(&msc->work, msc_work); |
2160 | err = intel_th_msc_init(msc); |
2161 | if (err) |
2162 | return err; |
2163 | |
2164 | dev_set_drvdata(dev, data: msc); |
2165 | |
2166 | return 0; |
2167 | } |
2168 | |
2169 | static void intel_th_msc_remove(struct intel_th_device *thdev) |
2170 | { |
2171 | struct msc *msc = dev_get_drvdata(dev: &thdev->dev); |
2172 | int ret; |
2173 | |
2174 | intel_th_msc_deactivate(thdev); |
2175 | |
2176 | /* |
2177 | * Buffers should not be used at this point except if the |
2178 | * output character device is still open and the parent |
2179 | * device gets detached from its bus, which is a FIXME. |
2180 | */ |
2181 | ret = msc_buffer_free_unless_used(msc); |
2182 | WARN_ON_ONCE(ret); |
2183 | } |
2184 | |
2185 | static struct intel_th_driver intel_th_msc_driver = { |
2186 | .probe = intel_th_msc_probe, |
2187 | .remove = intel_th_msc_remove, |
2188 | .irq = intel_th_msc_interrupt, |
2189 | .wait_empty = intel_th_msc_wait_empty, |
2190 | .activate = intel_th_msc_activate, |
2191 | .deactivate = intel_th_msc_deactivate, |
2192 | .fops = &intel_th_msc_fops, |
2193 | .attr_group = &msc_output_group, |
2194 | .driver = { |
2195 | .name = "msc" , |
2196 | .owner = THIS_MODULE, |
2197 | }, |
2198 | }; |
2199 | |
2200 | module_driver(intel_th_msc_driver, |
2201 | intel_th_driver_register, |
2202 | intel_th_driver_unregister); |
2203 | |
2204 | MODULE_LICENSE("GPL v2" ); |
2205 | MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver" ); |
2206 | MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>" ); |
2207 | |