1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace_events_trigger - trace event triggers
4 *
5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
7
8#include <linux/module.h>
9#include <linux/ctype.h>
10#include <linux/mutex.h>
11#include <linux/slab.h>
12#include <linux/rculist.h>
13
14#include "trace.h"
15
16static LIST_HEAD(trigger_commands);
17static DEFINE_MUTEX(trigger_cmd_mutex);
18
19void trigger_data_free(struct event_trigger_data *data)
20{
21 if (data->cmd_ops->set_filter)
22 data->cmd_ops->set_filter(NULL, data, NULL);
23
24 /* make sure current triggers exit before free */
25 tracepoint_synchronize_unregister();
26
27 kfree(data);
28}
29
30/**
31 * event_triggers_call - Call triggers associated with a trace event
32 * @file: The trace_event_file associated with the event
33 * @rec: The trace entry for the event, NULL for unconditional invocation
34 *
35 * For each trigger associated with an event, invoke the trigger
36 * function registered with the associated trigger command. If rec is
37 * non-NULL, it means that the trigger requires further processing and
38 * shouldn't be unconditionally invoked. If rec is non-NULL and the
39 * trigger has a filter associated with it, rec will checked against
40 * the filter and if the record matches the trigger will be invoked.
41 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
42 * in any case until the current event is written, the trigger
43 * function isn't invoked but the bit associated with the deferred
44 * trigger is set in the return value.
45 *
46 * Returns an enum event_trigger_type value containing a set bit for
47 * any trigger that should be deferred, ETT_NONE if nothing to defer.
48 *
49 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
50 *
51 * Return: an enum event_trigger_type value containing a set bit for
52 * any trigger that should be deferred, ETT_NONE if nothing to defer.
53 */
54enum event_trigger_type
55event_triggers_call(struct trace_event_file *file, void *rec,
56 struct ring_buffer_event *event)
57{
58 struct event_trigger_data *data;
59 enum event_trigger_type tt = ETT_NONE;
60 struct event_filter *filter;
61
62 if (list_empty(&file->triggers))
63 return tt;
64
65 list_for_each_entry_rcu(data, &file->triggers, list) {
66 if (data->paused)
67 continue;
68 if (!rec) {
69 data->ops->func(data, rec, event);
70 continue;
71 }
72 filter = rcu_dereference_sched(data->filter);
73 if (filter && !filter_match_preds(filter, rec))
74 continue;
75 if (event_command_post_trigger(data->cmd_ops)) {
76 tt |= data->cmd_ops->trigger_type;
77 continue;
78 }
79 data->ops->func(data, rec, event);
80 }
81 return tt;
82}
83EXPORT_SYMBOL_GPL(event_triggers_call);
84
85/**
86 * event_triggers_post_call - Call 'post_triggers' for a trace event
87 * @file: The trace_event_file associated with the event
88 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
89 *
90 * For each trigger associated with an event, invoke the trigger
91 * function registered with the associated trigger command, if the
92 * corresponding bit is set in the tt enum passed into this function.
93 * See @event_triggers_call for details on how those bits are set.
94 *
95 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
96 */
97void
98event_triggers_post_call(struct trace_event_file *file,
99 enum event_trigger_type tt)
100{
101 struct event_trigger_data *data;
102
103 list_for_each_entry_rcu(data, &file->triggers, list) {
104 if (data->paused)
105 continue;
106 if (data->cmd_ops->trigger_type & tt)
107 data->ops->func(data, NULL, NULL);
108 }
109}
110EXPORT_SYMBOL_GPL(event_triggers_post_call);
111
112#define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
113
114static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
115{
116 struct trace_event_file *event_file = event_file_data(m->private);
117
118 if (t == SHOW_AVAILABLE_TRIGGERS)
119 return NULL;
120
121 return seq_list_next(t, &event_file->triggers, pos);
122}
123
124static void *trigger_start(struct seq_file *m, loff_t *pos)
125{
126 struct trace_event_file *event_file;
127
128 /* ->stop() is called even if ->start() fails */
129 mutex_lock(&event_mutex);
130 event_file = event_file_data(m->private);
131 if (unlikely(!event_file))
132 return ERR_PTR(-ENODEV);
133
134 if (list_empty(&event_file->triggers))
135 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
136
137 return seq_list_start(&event_file->triggers, *pos);
138}
139
140static void trigger_stop(struct seq_file *m, void *t)
141{
142 mutex_unlock(&event_mutex);
143}
144
145static int trigger_show(struct seq_file *m, void *v)
146{
147 struct event_trigger_data *data;
148 struct event_command *p;
149
150 if (v == SHOW_AVAILABLE_TRIGGERS) {
151 seq_puts(m, "# Available triggers:\n");
152 seq_putc(m, '#');
153 mutex_lock(&trigger_cmd_mutex);
154 list_for_each_entry_reverse(p, &trigger_commands, list)
155 seq_printf(m, " %s", p->name);
156 seq_putc(m, '\n');
157 mutex_unlock(&trigger_cmd_mutex);
158 return 0;
159 }
160
161 data = list_entry(v, struct event_trigger_data, list);
162 data->ops->print(m, data->ops, data);
163
164 return 0;
165}
166
167static const struct seq_operations event_triggers_seq_ops = {
168 .start = trigger_start,
169 .next = trigger_next,
170 .stop = trigger_stop,
171 .show = trigger_show,
172};
173
174static int event_trigger_regex_open(struct inode *inode, struct file *file)
175{
176 int ret = 0;
177
178 mutex_lock(&event_mutex);
179
180 if (unlikely(!event_file_data(file))) {
181 mutex_unlock(&event_mutex);
182 return -ENODEV;
183 }
184
185 if ((file->f_mode & FMODE_WRITE) &&
186 (file->f_flags & O_TRUNC)) {
187 struct trace_event_file *event_file;
188 struct event_command *p;
189
190 event_file = event_file_data(file);
191
192 list_for_each_entry(p, &trigger_commands, list) {
193 if (p->unreg_all)
194 p->unreg_all(event_file);
195 }
196 }
197
198 if (file->f_mode & FMODE_READ) {
199 ret = seq_open(file, &event_triggers_seq_ops);
200 if (!ret) {
201 struct seq_file *m = file->private_data;
202 m->private = file;
203 }
204 }
205
206 mutex_unlock(&event_mutex);
207
208 return ret;
209}
210
211static int trigger_process_regex(struct trace_event_file *file, char *buff)
212{
213 char *command, *next = buff;
214 struct event_command *p;
215 int ret = -EINVAL;
216
217 command = strsep(&next, ": \t");
218 command = (command[0] != '!') ? command : command + 1;
219
220 mutex_lock(&trigger_cmd_mutex);
221 list_for_each_entry(p, &trigger_commands, list) {
222 if (strcmp(p->name, command) == 0) {
223 ret = p->func(p, file, buff, command, next);
224 goto out_unlock;
225 }
226 }
227 out_unlock:
228 mutex_unlock(&trigger_cmd_mutex);
229
230 return ret;
231}
232
233static ssize_t event_trigger_regex_write(struct file *file,
234 const char __user *ubuf,
235 size_t cnt, loff_t *ppos)
236{
237 struct trace_event_file *event_file;
238 ssize_t ret;
239 char *buf;
240
241 if (!cnt)
242 return 0;
243
244 if (cnt >= PAGE_SIZE)
245 return -EINVAL;
246
247 buf = memdup_user_nul(ubuf, cnt);
248 if (IS_ERR(buf))
249 return PTR_ERR(buf);
250
251 strim(buf);
252
253 mutex_lock(&event_mutex);
254 event_file = event_file_data(file);
255 if (unlikely(!event_file)) {
256 mutex_unlock(&event_mutex);
257 kfree(buf);
258 return -ENODEV;
259 }
260 ret = trigger_process_regex(event_file, buf);
261 mutex_unlock(&event_mutex);
262
263 kfree(buf);
264 if (ret < 0)
265 goto out;
266
267 *ppos += cnt;
268 ret = cnt;
269 out:
270 return ret;
271}
272
273static int event_trigger_regex_release(struct inode *inode, struct file *file)
274{
275 mutex_lock(&event_mutex);
276
277 if (file->f_mode & FMODE_READ)
278 seq_release(inode, file);
279
280 mutex_unlock(&event_mutex);
281
282 return 0;
283}
284
285static ssize_t
286event_trigger_write(struct file *filp, const char __user *ubuf,
287 size_t cnt, loff_t *ppos)
288{
289 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
290}
291
292static int
293event_trigger_open(struct inode *inode, struct file *filp)
294{
295 return event_trigger_regex_open(inode, filp);
296}
297
298static int
299event_trigger_release(struct inode *inode, struct file *file)
300{
301 return event_trigger_regex_release(inode, file);
302}
303
304const struct file_operations event_trigger_fops = {
305 .open = event_trigger_open,
306 .read = seq_read,
307 .write = event_trigger_write,
308 .llseek = tracing_lseek,
309 .release = event_trigger_release,
310};
311
312/*
313 * Currently we only register event commands from __init, so mark this
314 * __init too.
315 */
316__init int register_event_command(struct event_command *cmd)
317{
318 struct event_command *p;
319 int ret = 0;
320
321 mutex_lock(&trigger_cmd_mutex);
322 list_for_each_entry(p, &trigger_commands, list) {
323 if (strcmp(cmd->name, p->name) == 0) {
324 ret = -EBUSY;
325 goto out_unlock;
326 }
327 }
328 list_add(&cmd->list, &trigger_commands);
329 out_unlock:
330 mutex_unlock(&trigger_cmd_mutex);
331
332 return ret;
333}
334
335/*
336 * Currently we only unregister event commands from __init, so mark
337 * this __init too.
338 */
339__init int unregister_event_command(struct event_command *cmd)
340{
341 struct event_command *p, *n;
342 int ret = -ENODEV;
343
344 mutex_lock(&trigger_cmd_mutex);
345 list_for_each_entry_safe(p, n, &trigger_commands, list) {
346 if (strcmp(cmd->name, p->name) == 0) {
347 ret = 0;
348 list_del_init(&p->list);
349 goto out_unlock;
350 }
351 }
352 out_unlock:
353 mutex_unlock(&trigger_cmd_mutex);
354
355 return ret;
356}
357
358/**
359 * event_trigger_print - Generic event_trigger_ops @print implementation
360 * @name: The name of the event trigger
361 * @m: The seq_file being printed to
362 * @data: Trigger-specific data
363 * @filter_str: filter_str to print, if present
364 *
365 * Common implementation for event triggers to print themselves.
366 *
367 * Usually wrapped by a function that simply sets the @name of the
368 * trigger command and then invokes this.
369 *
370 * Return: 0 on success, errno otherwise
371 */
372static int
373event_trigger_print(const char *name, struct seq_file *m,
374 void *data, char *filter_str)
375{
376 long count = (long)data;
377
378 seq_puts(m, name);
379
380 if (count == -1)
381 seq_puts(m, ":unlimited");
382 else
383 seq_printf(m, ":count=%ld", count);
384
385 if (filter_str)
386 seq_printf(m, " if %s\n", filter_str);
387 else
388 seq_putc(m, '\n');
389
390 return 0;
391}
392
393/**
394 * event_trigger_init - Generic event_trigger_ops @init implementation
395 * @ops: The trigger ops associated with the trigger
396 * @data: Trigger-specific data
397 *
398 * Common implementation of event trigger initialization.
399 *
400 * Usually used directly as the @init method in event trigger
401 * implementations.
402 *
403 * Return: 0 on success, errno otherwise
404 */
405int event_trigger_init(struct event_trigger_ops *ops,
406 struct event_trigger_data *data)
407{
408 data->ref++;
409 return 0;
410}
411
412/**
413 * event_trigger_free - Generic event_trigger_ops @free implementation
414 * @ops: The trigger ops associated with the trigger
415 * @data: Trigger-specific data
416 *
417 * Common implementation of event trigger de-initialization.
418 *
419 * Usually used directly as the @free method in event trigger
420 * implementations.
421 */
422static void
423event_trigger_free(struct event_trigger_ops *ops,
424 struct event_trigger_data *data)
425{
426 if (WARN_ON_ONCE(data->ref <= 0))
427 return;
428
429 data->ref--;
430 if (!data->ref)
431 trigger_data_free(data);
432}
433
434int trace_event_trigger_enable_disable(struct trace_event_file *file,
435 int trigger_enable)
436{
437 int ret = 0;
438
439 if (trigger_enable) {
440 if (atomic_inc_return(&file->tm_ref) > 1)
441 return ret;
442 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
443 ret = trace_event_enable_disable(file, 1, 1);
444 } else {
445 if (atomic_dec_return(&file->tm_ref) > 0)
446 return ret;
447 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
448 ret = trace_event_enable_disable(file, 0, 1);
449 }
450
451 return ret;
452}
453
454/**
455 * clear_event_triggers - Clear all triggers associated with a trace array
456 * @tr: The trace array to clear
457 *
458 * For each trigger, the triggering event has its tm_ref decremented
459 * via trace_event_trigger_enable_disable(), and any associated event
460 * (in the case of enable/disable_event triggers) will have its sm_ref
461 * decremented via free()->trace_event_enable_disable(). That
462 * combination effectively reverses the soft-mode/trigger state added
463 * by trigger registration.
464 *
465 * Must be called with event_mutex held.
466 */
467void
468clear_event_triggers(struct trace_array *tr)
469{
470 struct trace_event_file *file;
471
472 list_for_each_entry(file, &tr->events, list) {
473 struct event_trigger_data *data, *n;
474 list_for_each_entry_safe(data, n, &file->triggers, list) {
475 trace_event_trigger_enable_disable(file, 0);
476 list_del_rcu(&data->list);
477 if (data->ops->free)
478 data->ops->free(data->ops, data);
479 }
480 }
481}
482
483/**
484 * update_cond_flag - Set or reset the TRIGGER_COND bit
485 * @file: The trace_event_file associated with the event
486 *
487 * If an event has triggers and any of those triggers has a filter or
488 * a post_trigger, trigger invocation needs to be deferred until after
489 * the current event has logged its data, and the event should have
490 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
491 * cleared.
492 */
493void update_cond_flag(struct trace_event_file *file)
494{
495 struct event_trigger_data *data;
496 bool set_cond = false;
497
498 list_for_each_entry_rcu(data, &file->triggers, list) {
499 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
500 event_command_needs_rec(data->cmd_ops)) {
501 set_cond = true;
502 break;
503 }
504 }
505
506 if (set_cond)
507 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
508 else
509 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
510}
511
512/**
513 * register_trigger - Generic event_command @reg implementation
514 * @glob: The raw string used to register the trigger
515 * @ops: The trigger ops associated with the trigger
516 * @data: Trigger-specific data to associate with the trigger
517 * @file: The trace_event_file associated with the event
518 *
519 * Common implementation for event trigger registration.
520 *
521 * Usually used directly as the @reg method in event command
522 * implementations.
523 *
524 * Return: 0 on success, errno otherwise
525 */
526static int register_trigger(char *glob, struct event_trigger_ops *ops,
527 struct event_trigger_data *data,
528 struct trace_event_file *file)
529{
530 struct event_trigger_data *test;
531 int ret = 0;
532
533 list_for_each_entry_rcu(test, &file->triggers, list) {
534 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
535 ret = -EEXIST;
536 goto out;
537 }
538 }
539
540 if (data->ops->init) {
541 ret = data->ops->init(data->ops, data);
542 if (ret < 0)
543 goto out;
544 }
545
546 list_add_rcu(&data->list, &file->triggers);
547 ret++;
548
549 update_cond_flag(file);
550 if (trace_event_trigger_enable_disable(file, 1) < 0) {
551 list_del_rcu(&data->list);
552 update_cond_flag(file);
553 ret--;
554 }
555out:
556 return ret;
557}
558
559/**
560 * unregister_trigger - Generic event_command @unreg implementation
561 * @glob: The raw string used to register the trigger
562 * @ops: The trigger ops associated with the trigger
563 * @test: Trigger-specific data used to find the trigger to remove
564 * @file: The trace_event_file associated with the event
565 *
566 * Common implementation for event trigger unregistration.
567 *
568 * Usually used directly as the @unreg method in event command
569 * implementations.
570 */
571static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
572 struct event_trigger_data *test,
573 struct trace_event_file *file)
574{
575 struct event_trigger_data *data;
576 bool unregistered = false;
577
578 list_for_each_entry_rcu(data, &file->triggers, list) {
579 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
580 unregistered = true;
581 list_del_rcu(&data->list);
582 trace_event_trigger_enable_disable(file, 0);
583 update_cond_flag(file);
584 break;
585 }
586 }
587
588 if (unregistered && data->ops->free)
589 data->ops->free(data->ops, data);
590}
591
592/**
593 * event_trigger_callback - Generic event_command @func implementation
594 * @cmd_ops: The command ops, used for trigger registration
595 * @file: The trace_event_file associated with the event
596 * @glob: The raw string used to register the trigger
597 * @cmd: The cmd portion of the string used to register the trigger
598 * @param: The params portion of the string used to register the trigger
599 *
600 * Common implementation for event command parsing and trigger
601 * instantiation.
602 *
603 * Usually used directly as the @func method in event command
604 * implementations.
605 *
606 * Return: 0 on success, errno otherwise
607 */
608static int
609event_trigger_callback(struct event_command *cmd_ops,
610 struct trace_event_file *file,
611 char *glob, char *cmd, char *param)
612{
613 struct event_trigger_data *trigger_data;
614 struct event_trigger_ops *trigger_ops;
615 char *trigger = NULL;
616 char *number;
617 int ret;
618
619 /* separate the trigger from the filter (t:n [if filter]) */
620 if (param && isdigit(param[0]))
621 trigger = strsep(&param, " \t");
622
623 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
624
625 ret = -ENOMEM;
626 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
627 if (!trigger_data)
628 goto out;
629
630 trigger_data->count = -1;
631 trigger_data->ops = trigger_ops;
632 trigger_data->cmd_ops = cmd_ops;
633 trigger_data->private_data = file;
634 INIT_LIST_HEAD(&trigger_data->list);
635 INIT_LIST_HEAD(&trigger_data->named_list);
636
637 if (glob[0] == '!') {
638 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
639 kfree(trigger_data);
640 ret = 0;
641 goto out;
642 }
643
644 if (trigger) {
645 number = strsep(&trigger, ":");
646
647 ret = -EINVAL;
648 if (!strlen(number))
649 goto out_free;
650
651 /*
652 * We use the callback data field (which is a pointer)
653 * as our counter.
654 */
655 ret = kstrtoul(number, 0, &trigger_data->count);
656 if (ret)
657 goto out_free;
658 }
659
660 if (!param) /* if param is non-empty, it's supposed to be a filter */
661 goto out_reg;
662
663 if (!cmd_ops->set_filter)
664 goto out_reg;
665
666 ret = cmd_ops->set_filter(param, trigger_data, file);
667 if (ret < 0)
668 goto out_free;
669
670 out_reg:
671 /* Up the trigger_data count to make sure reg doesn't free it on failure */
672 event_trigger_init(trigger_ops, trigger_data);
673 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
674 /*
675 * The above returns on success the # of functions enabled,
676 * but if it didn't find any functions it returns zero.
677 * Consider no functions a failure too.
678 */
679 if (!ret) {
680 cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
681 ret = -ENOENT;
682 } else if (ret > 0)
683 ret = 0;
684
685 /* Down the counter of trigger_data or free it if not used anymore */
686 event_trigger_free(trigger_ops, trigger_data);
687 out:
688 return ret;
689
690 out_free:
691 if (cmd_ops->set_filter)
692 cmd_ops->set_filter(NULL, trigger_data, NULL);
693 kfree(trigger_data);
694 goto out;
695}
696
697/**
698 * set_trigger_filter - Generic event_command @set_filter implementation
699 * @filter_str: The filter string for the trigger, NULL to remove filter
700 * @trigger_data: Trigger-specific data
701 * @file: The trace_event_file associated with the event
702 *
703 * Common implementation for event command filter parsing and filter
704 * instantiation.
705 *
706 * Usually used directly as the @set_filter method in event command
707 * implementations.
708 *
709 * Also used to remove a filter (if filter_str = NULL).
710 *
711 * Return: 0 on success, errno otherwise
712 */
713int set_trigger_filter(char *filter_str,
714 struct event_trigger_data *trigger_data,
715 struct trace_event_file *file)
716{
717 struct event_trigger_data *data = trigger_data;
718 struct event_filter *filter = NULL, *tmp;
719 int ret = -EINVAL;
720 char *s;
721
722 if (!filter_str) /* clear the current filter */
723 goto assign;
724
725 s = strsep(&filter_str, " \t");
726
727 if (!strlen(s) || strcmp(s, "if") != 0)
728 goto out;
729
730 if (!filter_str)
731 goto out;
732
733 /* The filter is for the 'trigger' event, not the triggered event */
734 ret = create_event_filter(file->event_call, filter_str, false, &filter);
735 /*
736 * If create_event_filter() fails, filter still needs to be freed.
737 * Which the calling code will do with data->filter.
738 */
739 assign:
740 tmp = rcu_access_pointer(data->filter);
741
742 rcu_assign_pointer(data->filter, filter);
743
744 if (tmp) {
745 /* Make sure the call is done with the filter */
746 tracepoint_synchronize_unregister();
747 free_event_filter(tmp);
748 }
749
750 kfree(data->filter_str);
751 data->filter_str = NULL;
752
753 if (filter_str) {
754 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
755 if (!data->filter_str) {
756 free_event_filter(rcu_access_pointer(data->filter));
757 data->filter = NULL;
758 ret = -ENOMEM;
759 }
760 }
761 out:
762 return ret;
763}
764
765static LIST_HEAD(named_triggers);
766
767/**
768 * find_named_trigger - Find the common named trigger associated with @name
769 * @name: The name of the set of named triggers to find the common data for
770 *
771 * Named triggers are sets of triggers that share a common set of
772 * trigger data. The first named trigger registered with a given name
773 * owns the common trigger data that the others subsequently
774 * registered with the same name will reference. This function
775 * returns the common trigger data associated with that first
776 * registered instance.
777 *
778 * Return: the common trigger data for the given named trigger on
779 * success, NULL otherwise.
780 */
781struct event_trigger_data *find_named_trigger(const char *name)
782{
783 struct event_trigger_data *data;
784
785 if (!name)
786 return NULL;
787
788 list_for_each_entry(data, &named_triggers, named_list) {
789 if (data->named_data)
790 continue;
791 if (strcmp(data->name, name) == 0)
792 return data;
793 }
794
795 return NULL;
796}
797
798/**
799 * is_named_trigger - determine if a given trigger is a named trigger
800 * @test: The trigger data to test
801 *
802 * Return: true if 'test' is a named trigger, false otherwise.
803 */
804bool is_named_trigger(struct event_trigger_data *test)
805{
806 struct event_trigger_data *data;
807
808 list_for_each_entry(data, &named_triggers, named_list) {
809 if (test == data)
810 return true;
811 }
812
813 return false;
814}
815
816/**
817 * save_named_trigger - save the trigger in the named trigger list
818 * @name: The name of the named trigger set
819 * @data: The trigger data to save
820 *
821 * Return: 0 if successful, negative error otherwise.
822 */
823int save_named_trigger(const char *name, struct event_trigger_data *data)
824{
825 data->name = kstrdup(name, GFP_KERNEL);
826 if (!data->name)
827 return -ENOMEM;
828
829 list_add(&data->named_list, &named_triggers);
830
831 return 0;
832}
833
834/**
835 * del_named_trigger - delete a trigger from the named trigger list
836 * @data: The trigger data to delete
837 */
838void del_named_trigger(struct event_trigger_data *data)
839{
840 kfree(data->name);
841 data->name = NULL;
842
843 list_del(&data->named_list);
844}
845
846static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
847{
848 struct event_trigger_data *test;
849
850 list_for_each_entry(test, &named_triggers, named_list) {
851 if (strcmp(test->name, data->name) == 0) {
852 if (pause) {
853 test->paused_tmp = test->paused;
854 test->paused = true;
855 } else {
856 test->paused = test->paused_tmp;
857 }
858 }
859 }
860}
861
862/**
863 * pause_named_trigger - Pause all named triggers with the same name
864 * @data: The trigger data of a named trigger to pause
865 *
866 * Pauses a named trigger along with all other triggers having the
867 * same name. Because named triggers share a common set of data,
868 * pausing only one is meaningless, so pausing one named trigger needs
869 * to pause all triggers with the same name.
870 */
871void pause_named_trigger(struct event_trigger_data *data)
872{
873 __pause_named_trigger(data, true);
874}
875
876/**
877 * unpause_named_trigger - Un-pause all named triggers with the same name
878 * @data: The trigger data of a named trigger to unpause
879 *
880 * Un-pauses a named trigger along with all other triggers having the
881 * same name. Because named triggers share a common set of data,
882 * unpausing only one is meaningless, so unpausing one named trigger
883 * needs to unpause all triggers with the same name.
884 */
885void unpause_named_trigger(struct event_trigger_data *data)
886{
887 __pause_named_trigger(data, false);
888}
889
890/**
891 * set_named_trigger_data - Associate common named trigger data
892 * @data: The trigger data of a named trigger to unpause
893 *
894 * Named triggers are sets of triggers that share a common set of
895 * trigger data. The first named trigger registered with a given name
896 * owns the common trigger data that the others subsequently
897 * registered with the same name will reference. This function
898 * associates the common trigger data from the first trigger with the
899 * given trigger.
900 */
901void set_named_trigger_data(struct event_trigger_data *data,
902 struct event_trigger_data *named_data)
903{
904 data->named_data = named_data;
905}
906
907struct event_trigger_data *
908get_named_trigger_data(struct event_trigger_data *data)
909{
910 return data->named_data;
911}
912
913static void
914traceon_trigger(struct event_trigger_data *data, void *rec,
915 struct ring_buffer_event *event)
916{
917 if (tracing_is_on())
918 return;
919
920 tracing_on();
921}
922
923static void
924traceon_count_trigger(struct event_trigger_data *data, void *rec,
925 struct ring_buffer_event *event)
926{
927 if (tracing_is_on())
928 return;
929
930 if (!data->count)
931 return;
932
933 if (data->count != -1)
934 (data->count)--;
935
936 tracing_on();
937}
938
939static void
940traceoff_trigger(struct event_trigger_data *data, void *rec,
941 struct ring_buffer_event *event)
942{
943 if (!tracing_is_on())
944 return;
945
946 tracing_off();
947}
948
949static void
950traceoff_count_trigger(struct event_trigger_data *data, void *rec,
951 struct ring_buffer_event *event)
952{
953 if (!tracing_is_on())
954 return;
955
956 if (!data->count)
957 return;
958
959 if (data->count != -1)
960 (data->count)--;
961
962 tracing_off();
963}
964
965static int
966traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
967 struct event_trigger_data *data)
968{
969 return event_trigger_print("traceon", m, (void *)data->count,
970 data->filter_str);
971}
972
973static int
974traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
975 struct event_trigger_data *data)
976{
977 return event_trigger_print("traceoff", m, (void *)data->count,
978 data->filter_str);
979}
980
981static struct event_trigger_ops traceon_trigger_ops = {
982 .func = traceon_trigger,
983 .print = traceon_trigger_print,
984 .init = event_trigger_init,
985 .free = event_trigger_free,
986};
987
988static struct event_trigger_ops traceon_count_trigger_ops = {
989 .func = traceon_count_trigger,
990 .print = traceon_trigger_print,
991 .init = event_trigger_init,
992 .free = event_trigger_free,
993};
994
995static struct event_trigger_ops traceoff_trigger_ops = {
996 .func = traceoff_trigger,
997 .print = traceoff_trigger_print,
998 .init = event_trigger_init,
999 .free = event_trigger_free,
1000};
1001
1002static struct event_trigger_ops traceoff_count_trigger_ops = {
1003 .func = traceoff_count_trigger,
1004 .print = traceoff_trigger_print,
1005 .init = event_trigger_init,
1006 .free = event_trigger_free,
1007};
1008
1009static struct event_trigger_ops *
1010onoff_get_trigger_ops(char *cmd, char *param)
1011{
1012 struct event_trigger_ops *ops;
1013
1014 /* we register both traceon and traceoff to this callback */
1015 if (strcmp(cmd, "traceon") == 0)
1016 ops = param ? &traceon_count_trigger_ops :
1017 &traceon_trigger_ops;
1018 else
1019 ops = param ? &traceoff_count_trigger_ops :
1020 &traceoff_trigger_ops;
1021
1022 return ops;
1023}
1024
1025static struct event_command trigger_traceon_cmd = {
1026 .name = "traceon",
1027 .trigger_type = ETT_TRACE_ONOFF,
1028 .func = event_trigger_callback,
1029 .reg = register_trigger,
1030 .unreg = unregister_trigger,
1031 .get_trigger_ops = onoff_get_trigger_ops,
1032 .set_filter = set_trigger_filter,
1033};
1034
1035static struct event_command trigger_traceoff_cmd = {
1036 .name = "traceoff",
1037 .trigger_type = ETT_TRACE_ONOFF,
1038 .flags = EVENT_CMD_FL_POST_TRIGGER,
1039 .func = event_trigger_callback,
1040 .reg = register_trigger,
1041 .unreg = unregister_trigger,
1042 .get_trigger_ops = onoff_get_trigger_ops,
1043 .set_filter = set_trigger_filter,
1044};
1045
1046#ifdef CONFIG_TRACER_SNAPSHOT
1047static void
1048snapshot_trigger(struct event_trigger_data *data, void *rec,
1049 struct ring_buffer_event *event)
1050{
1051 struct trace_event_file *file = data->private_data;
1052
1053 if (file)
1054 tracing_snapshot_instance(file->tr);
1055 else
1056 tracing_snapshot();
1057}
1058
1059static void
1060snapshot_count_trigger(struct event_trigger_data *data, void *rec,
1061 struct ring_buffer_event *event)
1062{
1063 if (!data->count)
1064 return;
1065
1066 if (data->count != -1)
1067 (data->count)--;
1068
1069 snapshot_trigger(data, rec, event);
1070}
1071
1072static int
1073register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1074 struct event_trigger_data *data,
1075 struct trace_event_file *file)
1076{
1077 int ret = register_trigger(glob, ops, data, file);
1078
1079 if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
1080 unregister_trigger(glob, ops, data, file);
1081 ret = 0;
1082 }
1083
1084 return ret;
1085}
1086
1087static int
1088snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1089 struct event_trigger_data *data)
1090{
1091 return event_trigger_print("snapshot", m, (void *)data->count,
1092 data->filter_str);
1093}
1094
1095static struct event_trigger_ops snapshot_trigger_ops = {
1096 .func = snapshot_trigger,
1097 .print = snapshot_trigger_print,
1098 .init = event_trigger_init,
1099 .free = event_trigger_free,
1100};
1101
1102static struct event_trigger_ops snapshot_count_trigger_ops = {
1103 .func = snapshot_count_trigger,
1104 .print = snapshot_trigger_print,
1105 .init = event_trigger_init,
1106 .free = event_trigger_free,
1107};
1108
1109static struct event_trigger_ops *
1110snapshot_get_trigger_ops(char *cmd, char *param)
1111{
1112 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1113}
1114
1115static struct event_command trigger_snapshot_cmd = {
1116 .name = "snapshot",
1117 .trigger_type = ETT_SNAPSHOT,
1118 .func = event_trigger_callback,
1119 .reg = register_snapshot_trigger,
1120 .unreg = unregister_trigger,
1121 .get_trigger_ops = snapshot_get_trigger_ops,
1122 .set_filter = set_trigger_filter,
1123};
1124
1125static __init int register_trigger_snapshot_cmd(void)
1126{
1127 int ret;
1128
1129 ret = register_event_command(&trigger_snapshot_cmd);
1130 WARN_ON(ret < 0);
1131
1132 return ret;
1133}
1134#else
1135static __init int register_trigger_snapshot_cmd(void) { return 0; }
1136#endif /* CONFIG_TRACER_SNAPSHOT */
1137
1138#ifdef CONFIG_STACKTRACE
1139#ifdef CONFIG_UNWINDER_ORC
1140/* Skip 2:
1141 * event_triggers_post_call()
1142 * trace_event_raw_event_xxx()
1143 */
1144# define STACK_SKIP 2
1145#else
1146/*
1147 * Skip 4:
1148 * stacktrace_trigger()
1149 * event_triggers_post_call()
1150 * trace_event_buffer_commit()
1151 * trace_event_raw_event_xxx()
1152 */
1153#define STACK_SKIP 4
1154#endif
1155
1156static void
1157stacktrace_trigger(struct event_trigger_data *data, void *rec,
1158 struct ring_buffer_event *event)
1159{
1160 trace_dump_stack(STACK_SKIP);
1161}
1162
1163static void
1164stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
1165 struct ring_buffer_event *event)
1166{
1167 if (!data->count)
1168 return;
1169
1170 if (data->count != -1)
1171 (data->count)--;
1172
1173 stacktrace_trigger(data, rec, event);
1174}
1175
1176static int
1177stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1178 struct event_trigger_data *data)
1179{
1180 return event_trigger_print("stacktrace", m, (void *)data->count,
1181 data->filter_str);
1182}
1183
1184static struct event_trigger_ops stacktrace_trigger_ops = {
1185 .func = stacktrace_trigger,
1186 .print = stacktrace_trigger_print,
1187 .init = event_trigger_init,
1188 .free = event_trigger_free,
1189};
1190
1191static struct event_trigger_ops stacktrace_count_trigger_ops = {
1192 .func = stacktrace_count_trigger,
1193 .print = stacktrace_trigger_print,
1194 .init = event_trigger_init,
1195 .free = event_trigger_free,
1196};
1197
1198static struct event_trigger_ops *
1199stacktrace_get_trigger_ops(char *cmd, char *param)
1200{
1201 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1202}
1203
1204static struct event_command trigger_stacktrace_cmd = {
1205 .name = "stacktrace",
1206 .trigger_type = ETT_STACKTRACE,
1207 .flags = EVENT_CMD_FL_POST_TRIGGER,
1208 .func = event_trigger_callback,
1209 .reg = register_trigger,
1210 .unreg = unregister_trigger,
1211 .get_trigger_ops = stacktrace_get_trigger_ops,
1212 .set_filter = set_trigger_filter,
1213};
1214
1215static __init int register_trigger_stacktrace_cmd(void)
1216{
1217 int ret;
1218
1219 ret = register_event_command(&trigger_stacktrace_cmd);
1220 WARN_ON(ret < 0);
1221
1222 return ret;
1223}
1224#else
1225static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1226#endif /* CONFIG_STACKTRACE */
1227
1228static __init void unregister_trigger_traceon_traceoff_cmds(void)
1229{
1230 unregister_event_command(&trigger_traceon_cmd);
1231 unregister_event_command(&trigger_traceoff_cmd);
1232}
1233
1234static void
1235event_enable_trigger(struct event_trigger_data *data, void *rec,
1236 struct ring_buffer_event *event)
1237{
1238 struct enable_trigger_data *enable_data = data->private_data;
1239
1240 if (enable_data->enable)
1241 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1242 else
1243 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1244}
1245
1246static void
1247event_enable_count_trigger(struct event_trigger_data *data, void *rec,
1248 struct ring_buffer_event *event)
1249{
1250 struct enable_trigger_data *enable_data = data->private_data;
1251
1252 if (!data->count)
1253 return;
1254
1255 /* Skip if the event is in a state we want to switch to */
1256 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1257 return;
1258
1259 if (data->count != -1)
1260 (data->count)--;
1261
1262 event_enable_trigger(data, rec, event);
1263}
1264
1265int event_enable_trigger_print(struct seq_file *m,
1266 struct event_trigger_ops *ops,
1267 struct event_trigger_data *data)
1268{
1269 struct enable_trigger_data *enable_data = data->private_data;
1270
1271 seq_printf(m, "%s:%s:%s",
1272 enable_data->hist ?
1273 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1274 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1275 enable_data->file->event_call->class->system,
1276 trace_event_name(enable_data->file->event_call));
1277
1278 if (data->count == -1)
1279 seq_puts(m, ":unlimited");
1280 else
1281 seq_printf(m, ":count=%ld", data->count);
1282
1283 if (data->filter_str)
1284 seq_printf(m, " if %s\n", data->filter_str);
1285 else
1286 seq_putc(m, '\n');
1287
1288 return 0;
1289}
1290
1291void event_enable_trigger_free(struct event_trigger_ops *ops,
1292 struct event_trigger_data *data)
1293{
1294 struct enable_trigger_data *enable_data = data->private_data;
1295
1296 if (WARN_ON_ONCE(data->ref <= 0))
1297 return;
1298
1299 data->ref--;
1300 if (!data->ref) {
1301 /* Remove the SOFT_MODE flag */
1302 trace_event_enable_disable(enable_data->file, 0, 1);
1303 module_put(enable_data->file->event_call->mod);
1304 trigger_data_free(data);
1305 kfree(enable_data);
1306 }
1307}
1308
1309static struct event_trigger_ops event_enable_trigger_ops = {
1310 .func = event_enable_trigger,
1311 .print = event_enable_trigger_print,
1312 .init = event_trigger_init,
1313 .free = event_enable_trigger_free,
1314};
1315
1316static struct event_trigger_ops event_enable_count_trigger_ops = {
1317 .func = event_enable_count_trigger,
1318 .print = event_enable_trigger_print,
1319 .init = event_trigger_init,
1320 .free = event_enable_trigger_free,
1321};
1322
1323static struct event_trigger_ops event_disable_trigger_ops = {
1324 .func = event_enable_trigger,
1325 .print = event_enable_trigger_print,
1326 .init = event_trigger_init,
1327 .free = event_enable_trigger_free,
1328};
1329
1330static struct event_trigger_ops event_disable_count_trigger_ops = {
1331 .func = event_enable_count_trigger,
1332 .print = event_enable_trigger_print,
1333 .init = event_trigger_init,
1334 .free = event_enable_trigger_free,
1335};
1336
1337int event_enable_trigger_func(struct event_command *cmd_ops,
1338 struct trace_event_file *file,
1339 char *glob, char *cmd, char *param)
1340{
1341 struct trace_event_file *event_enable_file;
1342 struct enable_trigger_data *enable_data;
1343 struct event_trigger_data *trigger_data;
1344 struct event_trigger_ops *trigger_ops;
1345 struct trace_array *tr = file->tr;
1346 const char *system;
1347 const char *event;
1348 bool hist = false;
1349 char *trigger;
1350 char *number;
1351 bool enable;
1352 int ret;
1353
1354 if (!param)
1355 return -EINVAL;
1356
1357 /* separate the trigger from the filter (s:e:n [if filter]) */
1358 trigger = strsep(&param, " \t");
1359 if (!trigger)
1360 return -EINVAL;
1361
1362 system = strsep(&trigger, ":");
1363 if (!trigger)
1364 return -EINVAL;
1365
1366 event = strsep(&trigger, ":");
1367
1368 ret = -EINVAL;
1369 event_enable_file = find_event_file(tr, system, event);
1370 if (!event_enable_file)
1371 goto out;
1372
1373#ifdef CONFIG_HIST_TRIGGERS
1374 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1375 (strcmp(cmd, DISABLE_HIST_STR) == 0));
1376
1377 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1378 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1379#else
1380 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1381#endif
1382 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1383
1384 ret = -ENOMEM;
1385 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1386 if (!trigger_data)
1387 goto out;
1388
1389 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1390 if (!enable_data) {
1391 kfree(trigger_data);
1392 goto out;
1393 }
1394
1395 trigger_data->count = -1;
1396 trigger_data->ops = trigger_ops;
1397 trigger_data->cmd_ops = cmd_ops;
1398 INIT_LIST_HEAD(&trigger_data->list);
1399 RCU_INIT_POINTER(trigger_data->filter, NULL);
1400
1401 enable_data->hist = hist;
1402 enable_data->enable = enable;
1403 enable_data->file = event_enable_file;
1404 trigger_data->private_data = enable_data;
1405
1406 if (glob[0] == '!') {
1407 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1408 kfree(trigger_data);
1409 kfree(enable_data);
1410 ret = 0;
1411 goto out;
1412 }
1413
1414 /* Up the trigger_data count to make sure nothing frees it on failure */
1415 event_trigger_init(trigger_ops, trigger_data);
1416
1417 if (trigger) {
1418 number = strsep(&trigger, ":");
1419
1420 ret = -EINVAL;
1421 if (!strlen(number))
1422 goto out_free;
1423
1424 /*
1425 * We use the callback data field (which is a pointer)
1426 * as our counter.
1427 */
1428 ret = kstrtoul(number, 0, &trigger_data->count);
1429 if (ret)
1430 goto out_free;
1431 }
1432
1433 if (!param) /* if param is non-empty, it's supposed to be a filter */
1434 goto out_reg;
1435
1436 if (!cmd_ops->set_filter)
1437 goto out_reg;
1438
1439 ret = cmd_ops->set_filter(param, trigger_data, file);
1440 if (ret < 0)
1441 goto out_free;
1442
1443 out_reg:
1444 /* Don't let event modules unload while probe registered */
1445 ret = try_module_get(event_enable_file->event_call->mod);
1446 if (!ret) {
1447 ret = -EBUSY;
1448 goto out_free;
1449 }
1450
1451 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1452 if (ret < 0)
1453 goto out_put;
1454 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1455 /*
1456 * The above returns on success the # of functions enabled,
1457 * but if it didn't find any functions it returns zero.
1458 * Consider no functions a failure too.
1459 */
1460 if (!ret) {
1461 ret = -ENOENT;
1462 goto out_disable;
1463 } else if (ret < 0)
1464 goto out_disable;
1465 /* Just return zero, not the number of enabled functions */
1466 ret = 0;
1467 event_trigger_free(trigger_ops, trigger_data);
1468 out:
1469 return ret;
1470
1471 out_disable:
1472 trace_event_enable_disable(event_enable_file, 0, 1);
1473 out_put:
1474 module_put(event_enable_file->event_call->mod);
1475 out_free:
1476 if (cmd_ops->set_filter)
1477 cmd_ops->set_filter(NULL, trigger_data, NULL);
1478 event_trigger_free(trigger_ops, trigger_data);
1479 kfree(enable_data);
1480 goto out;
1481}
1482
1483int event_enable_register_trigger(char *glob,
1484 struct event_trigger_ops *ops,
1485 struct event_trigger_data *data,
1486 struct trace_event_file *file)
1487{
1488 struct enable_trigger_data *enable_data = data->private_data;
1489 struct enable_trigger_data *test_enable_data;
1490 struct event_trigger_data *test;
1491 int ret = 0;
1492
1493 list_for_each_entry_rcu(test, &file->triggers, list) {
1494 test_enable_data = test->private_data;
1495 if (test_enable_data &&
1496 (test->cmd_ops->trigger_type ==
1497 data->cmd_ops->trigger_type) &&
1498 (test_enable_data->file == enable_data->file)) {
1499 ret = -EEXIST;
1500 goto out;
1501 }
1502 }
1503
1504 if (data->ops->init) {
1505 ret = data->ops->init(data->ops, data);
1506 if (ret < 0)
1507 goto out;
1508 }
1509
1510 list_add_rcu(&data->list, &file->triggers);
1511 ret++;
1512
1513 update_cond_flag(file);
1514 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1515 list_del_rcu(&data->list);
1516 update_cond_flag(file);
1517 ret--;
1518 }
1519out:
1520 return ret;
1521}
1522
1523void event_enable_unregister_trigger(char *glob,
1524 struct event_trigger_ops *ops,
1525 struct event_trigger_data *test,
1526 struct trace_event_file *file)
1527{
1528 struct enable_trigger_data *test_enable_data = test->private_data;
1529 struct enable_trigger_data *enable_data;
1530 struct event_trigger_data *data;
1531 bool unregistered = false;
1532
1533 list_for_each_entry_rcu(data, &file->triggers, list) {
1534 enable_data = data->private_data;
1535 if (enable_data &&
1536 (data->cmd_ops->trigger_type ==
1537 test->cmd_ops->trigger_type) &&
1538 (enable_data->file == test_enable_data->file)) {
1539 unregistered = true;
1540 list_del_rcu(&data->list);
1541 trace_event_trigger_enable_disable(file, 0);
1542 update_cond_flag(file);
1543 break;
1544 }
1545 }
1546
1547 if (unregistered && data->ops->free)
1548 data->ops->free(data->ops, data);
1549}
1550
1551static struct event_trigger_ops *
1552event_enable_get_trigger_ops(char *cmd, char *param)
1553{
1554 struct event_trigger_ops *ops;
1555 bool enable;
1556
1557#ifdef CONFIG_HIST_TRIGGERS
1558 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1559 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1560#else
1561 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1562#endif
1563 if (enable)
1564 ops = param ? &event_enable_count_trigger_ops :
1565 &event_enable_trigger_ops;
1566 else
1567 ops = param ? &event_disable_count_trigger_ops :
1568 &event_disable_trigger_ops;
1569
1570 return ops;
1571}
1572
1573static struct event_command trigger_enable_cmd = {
1574 .name = ENABLE_EVENT_STR,
1575 .trigger_type = ETT_EVENT_ENABLE,
1576 .func = event_enable_trigger_func,
1577 .reg = event_enable_register_trigger,
1578 .unreg = event_enable_unregister_trigger,
1579 .get_trigger_ops = event_enable_get_trigger_ops,
1580 .set_filter = set_trigger_filter,
1581};
1582
1583static struct event_command trigger_disable_cmd = {
1584 .name = DISABLE_EVENT_STR,
1585 .trigger_type = ETT_EVENT_ENABLE,
1586 .func = event_enable_trigger_func,
1587 .reg = event_enable_register_trigger,
1588 .unreg = event_enable_unregister_trigger,
1589 .get_trigger_ops = event_enable_get_trigger_ops,
1590 .set_filter = set_trigger_filter,
1591};
1592
1593static __init void unregister_trigger_enable_disable_cmds(void)
1594{
1595 unregister_event_command(&trigger_enable_cmd);
1596 unregister_event_command(&trigger_disable_cmd);
1597}
1598
1599static __init int register_trigger_enable_disable_cmds(void)
1600{
1601 int ret;
1602
1603 ret = register_event_command(&trigger_enable_cmd);
1604 if (WARN_ON(ret < 0))
1605 return ret;
1606 ret = register_event_command(&trigger_disable_cmd);
1607 if (WARN_ON(ret < 0))
1608 unregister_trigger_enable_disable_cmds();
1609
1610 return ret;
1611}
1612
1613static __init int register_trigger_traceon_traceoff_cmds(void)
1614{
1615 int ret;
1616
1617 ret = register_event_command(&trigger_traceon_cmd);
1618 if (WARN_ON(ret < 0))
1619 return ret;
1620 ret = register_event_command(&trigger_traceoff_cmd);
1621 if (WARN_ON(ret < 0))
1622 unregister_trigger_traceon_traceoff_cmds();
1623
1624 return ret;
1625}
1626
1627__init int register_trigger_cmds(void)
1628{
1629 register_trigger_traceon_traceoff_cmds();
1630 register_trigger_snapshot_cmd();
1631 register_trigger_stacktrace_cmd();
1632 register_trigger_enable_disable_cmds();
1633 register_trigger_hist_enable_disable_cmds();
1634 register_trigger_hist_cmd();
1635
1636 return 0;
1637}
1638