1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Runtime locking correctness validator
4 *
5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7 *
8 * see Documentation/locking/lockdep-design.txt for more details.
9 */
10#ifndef __LINUX_LOCKDEP_H
11#define __LINUX_LOCKDEP_H
12
13struct task_struct;
14struct lockdep_map;
15
16/* for sysctl */
17extern int prove_locking;
18extern int lock_stat;
19
20#define MAX_LOCKDEP_SUBCLASSES 8UL
21
22#include <linux/types.h>
23
24#ifdef CONFIG_LOCKDEP
25
26#include <linux/linkage.h>
27#include <linux/list.h>
28#include <linux/debug_locks.h>
29#include <linux/stacktrace.h>
30
31/*
32 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
33 * the total number of states... :-(
34 */
35#define XXX_LOCK_USAGE_STATES (1+2*4)
36
37/*
38 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
39 * cached in the instance of lockdep_map
40 *
41 * Currently main class (subclass == 0) and signle depth subclass
42 * are cached in lockdep_map. This optimization is mainly targeting
43 * on rq->lock. double_rq_lock() acquires this highly competitive with
44 * single depth.
45 */
46#define NR_LOCKDEP_CACHING_CLASSES 2
47
48/*
49 * A lockdep key is associated with each lock object. For static locks we use
50 * the lock address itself as the key. Dynamically allocated lock objects can
51 * have a statically or dynamically allocated key. Dynamically allocated lock
52 * keys must be registered before being used and must be unregistered before
53 * the key memory is freed.
54 */
55struct lockdep_subclass_key {
56 char __one_byte;
57} __attribute__ ((__packed__));
58
59/* hash_entry is used to keep track of dynamically allocated keys. */
60struct lock_class_key {
61 union {
62 struct hlist_node hash_entry;
63 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
64 };
65};
66
67extern struct lock_class_key __lockdep_no_validate__;
68
69#define LOCKSTAT_POINTS 4
70
71/*
72 * The lock-class itself. The order of the structure members matters.
73 * reinit_class() zeroes the key member and all subsequent members.
74 */
75struct lock_class {
76 /*
77 * class-hash:
78 */
79 struct hlist_node hash_entry;
80
81 /*
82 * Entry in all_lock_classes when in use. Entry in free_lock_classes
83 * when not in use. Instances that are being freed are on one of the
84 * zapped_classes lists.
85 */
86 struct list_head lock_entry;
87
88 /*
89 * These fields represent a directed graph of lock dependencies,
90 * to every node we attach a list of "forward" and a list of
91 * "backward" graph nodes.
92 */
93 struct list_head locks_after, locks_before;
94
95 struct lockdep_subclass_key *key;
96 unsigned int subclass;
97 unsigned int dep_gen_id;
98
99 /*
100 * IRQ/softirq usage tracking bits:
101 */
102 unsigned long usage_mask;
103 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
104
105 /*
106 * Generation counter, when doing certain classes of graph walking,
107 * to ensure that we check one node only once:
108 */
109 int name_version;
110 const char *name;
111
112#ifdef CONFIG_LOCK_STAT
113 unsigned long contention_point[LOCKSTAT_POINTS];
114 unsigned long contending_point[LOCKSTAT_POINTS];
115#endif
116} __no_randomize_layout;
117
118#ifdef CONFIG_LOCK_STAT
119struct lock_time {
120 s64 min;
121 s64 max;
122 s64 total;
123 unsigned long nr;
124};
125
126enum bounce_type {
127 bounce_acquired_write,
128 bounce_acquired_read,
129 bounce_contended_write,
130 bounce_contended_read,
131 nr_bounce_types,
132
133 bounce_acquired = bounce_acquired_write,
134 bounce_contended = bounce_contended_write,
135};
136
137struct lock_class_stats {
138 unsigned long contention_point[LOCKSTAT_POINTS];
139 unsigned long contending_point[LOCKSTAT_POINTS];
140 struct lock_time read_waittime;
141 struct lock_time write_waittime;
142 struct lock_time read_holdtime;
143 struct lock_time write_holdtime;
144 unsigned long bounces[nr_bounce_types];
145};
146
147struct lock_class_stats lock_stats(struct lock_class *class);
148void clear_lock_stats(struct lock_class *class);
149#endif
150
151/*
152 * Map the lock object (the lock instance) to the lock-class object.
153 * This is embedded into specific lock instances:
154 */
155struct lockdep_map {
156 struct lock_class_key *key;
157 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
158 const char *name;
159#ifdef CONFIG_LOCK_STAT
160 int cpu;
161 unsigned long ip;
162#endif
163};
164
165static inline void lockdep_copy_map(struct lockdep_map *to,
166 struct lockdep_map *from)
167{
168 int i;
169
170 *to = *from;
171 /*
172 * Since the class cache can be modified concurrently we could observe
173 * half pointers (64bit arch using 32bit copy insns). Therefore clear
174 * the caches and take the performance hit.
175 *
176 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
177 * that relies on cache abuse.
178 */
179 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
180 to->class_cache[i] = NULL;
181}
182
183/*
184 * Every lock has a list of other locks that were taken after it.
185 * We only grow the list, never remove from it:
186 */
187struct lock_list {
188 struct list_head entry;
189 struct lock_class *class;
190 struct lock_class *links_to;
191 struct stack_trace trace;
192 int distance;
193
194 /*
195 * The parent field is used to implement breadth-first search, and the
196 * bit 0 is reused to indicate if the lock has been accessed in BFS.
197 */
198 struct lock_list *parent;
199};
200
201/*
202 * We record lock dependency chains, so that we can cache them:
203 */
204struct lock_chain {
205 /* see BUILD_BUG_ON()s in lookup_chain_cache() */
206 unsigned int irq_context : 2,
207 depth : 6,
208 base : 24;
209 /* 4 byte hole */
210 struct hlist_node entry;
211 u64 chain_key;
212};
213
214#define MAX_LOCKDEP_KEYS_BITS 13
215/*
216 * Subtract one because we offset hlock->class_idx by 1 in order
217 * to make 0 mean no class. This avoids overflowing the class_idx
218 * bitfield and hitting the BUG in hlock_class().
219 */
220#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
221
222struct held_lock {
223 /*
224 * One-way hash of the dependency chain up to this point. We
225 * hash the hashes step by step as the dependency chain grows.
226 *
227 * We use it for dependency-caching and we skip detection
228 * passes and dependency-updates if there is a cache-hit, so
229 * it is absolutely critical for 100% coverage of the validator
230 * to have a unique key value for every unique dependency path
231 * that can occur in the system, to make a unique hash value
232 * as likely as possible - hence the 64-bit width.
233 *
234 * The task struct holds the current hash value (initialized
235 * with zero), here we store the previous hash value:
236 */
237 u64 prev_chain_key;
238 unsigned long acquire_ip;
239 struct lockdep_map *instance;
240 struct lockdep_map *nest_lock;
241#ifdef CONFIG_LOCK_STAT
242 u64 waittime_stamp;
243 u64 holdtime_stamp;
244#endif
245 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
246 /*
247 * The lock-stack is unified in that the lock chains of interrupt
248 * contexts nest ontop of process context chains, but we 'separate'
249 * the hashes by starting with 0 if we cross into an interrupt
250 * context, and we also keep do not add cross-context lock
251 * dependencies - the lock usage graph walking covers that area
252 * anyway, and we'd just unnecessarily increase the number of
253 * dependencies otherwise. [Note: hardirq and softirq contexts
254 * are separated from each other too.]
255 *
256 * The following field is used to detect when we cross into an
257 * interrupt context:
258 */
259 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
260 unsigned int trylock:1; /* 16 bits */
261
262 unsigned int read:2; /* see lock_acquire() comment */
263 unsigned int check:1; /* see lock_acquire() comment */
264 unsigned int hardirqs_off:1;
265 unsigned int references:12; /* 32 bits */
266 unsigned int pin_count;
267};
268
269/*
270 * Initialization, self-test and debugging-output methods:
271 */
272extern void lockdep_init(void);
273extern void lockdep_reset(void);
274extern void lockdep_reset_lock(struct lockdep_map *lock);
275extern void lockdep_free_key_range(void *start, unsigned long size);
276extern asmlinkage void lockdep_sys_exit(void);
277extern void lockdep_set_selftest_task(struct task_struct *task);
278
279extern void lockdep_off(void);
280extern void lockdep_on(void);
281
282extern void lockdep_register_key(struct lock_class_key *key);
283extern void lockdep_unregister_key(struct lock_class_key *key);
284
285/*
286 * These methods are used by specific locking variants (spinlocks,
287 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
288 * to lockdep:
289 */
290
291extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
292 struct lock_class_key *key, int subclass);
293
294/*
295 * Reinitialize a lock key - for cases where there is special locking or
296 * special initialization of locks so that the validator gets the scope
297 * of dependencies wrong: they are either too broad (they need a class-split)
298 * or they are too narrow (they suffer from a false class-split):
299 */
300#define lockdep_set_class(lock, key) \
301 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
302#define lockdep_set_class_and_name(lock, key, name) \
303 lockdep_init_map(&(lock)->dep_map, name, key, 0)
304#define lockdep_set_class_and_subclass(lock, key, sub) \
305 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
306#define lockdep_set_subclass(lock, sub) \
307 lockdep_init_map(&(lock)->dep_map, #lock, \
308 (lock)->dep_map.key, sub)
309
310#define lockdep_set_novalidate_class(lock) \
311 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
312/*
313 * Compare locking classes
314 */
315#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
316
317static inline int lockdep_match_key(struct lockdep_map *lock,
318 struct lock_class_key *key)
319{
320 return lock->key == key;
321}
322
323/*
324 * Acquire a lock.
325 *
326 * Values for "read":
327 *
328 * 0: exclusive (write) acquire
329 * 1: read-acquire (no recursion allowed)
330 * 2: read-acquire with same-instance recursion allowed
331 *
332 * Values for check:
333 *
334 * 0: simple checks (freeing, held-at-exit-time, etc.)
335 * 1: full validation
336 */
337extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
338 int trylock, int read, int check,
339 struct lockdep_map *nest_lock, unsigned long ip);
340
341extern void lock_release(struct lockdep_map *lock, int nested,
342 unsigned long ip);
343
344/*
345 * Same "read" as for lock_acquire(), except -1 means any.
346 */
347extern int lock_is_held_type(const struct lockdep_map *lock, int read);
348
349static inline int lock_is_held(const struct lockdep_map *lock)
350{
351 return lock_is_held_type(lock, -1);
352}
353
354#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
355#define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
356
357extern void lock_set_class(struct lockdep_map *lock, const char *name,
358 struct lock_class_key *key, unsigned int subclass,
359 unsigned long ip);
360
361static inline void lock_set_subclass(struct lockdep_map *lock,
362 unsigned int subclass, unsigned long ip)
363{
364 lock_set_class(lock, lock->name, lock->key, subclass, ip);
365}
366
367extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
368
369struct pin_cookie { unsigned int val; };
370
371#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
372
373extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
374extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
375extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
376
377#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
378
379#define lockdep_assert_held(l) do { \
380 WARN_ON(debug_locks && !lockdep_is_held(l)); \
381 } while (0)
382
383#define lockdep_assert_held_exclusive(l) do { \
384 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
385 } while (0)
386
387#define lockdep_assert_held_read(l) do { \
388 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
389 } while (0)
390
391#define lockdep_assert_held_once(l) do { \
392 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
393 } while (0)
394
395#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
396
397#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
398#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
399#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
400
401#else /* !CONFIG_LOCKDEP */
402
403static inline void lockdep_off(void)
404{
405}
406
407static inline void lockdep_on(void)
408{
409}
410
411static inline void lockdep_set_selftest_task(struct task_struct *task)
412{
413}
414
415# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
416# define lock_release(l, n, i) do { } while (0)
417# define lock_downgrade(l, i) do { } while (0)
418# define lock_set_class(l, n, k, s, i) do { } while (0)
419# define lock_set_subclass(l, s, i) do { } while (0)
420# define lockdep_init() do { } while (0)
421# define lockdep_init_map(lock, name, key, sub) \
422 do { (void)(name); (void)(key); } while (0)
423# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
424# define lockdep_set_class_and_name(lock, key, name) \
425 do { (void)(key); (void)(name); } while (0)
426#define lockdep_set_class_and_subclass(lock, key, sub) \
427 do { (void)(key); } while (0)
428#define lockdep_set_subclass(lock, sub) do { } while (0)
429
430#define lockdep_set_novalidate_class(lock) do { } while (0)
431
432/*
433 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
434 * case since the result is not well defined and the caller should rather
435 * #ifdef the call himself.
436 */
437
438# define lockdep_reset() do { debug_locks = 1; } while (0)
439# define lockdep_free_key_range(start, size) do { } while (0)
440# define lockdep_sys_exit() do { } while (0)
441/*
442 * The class key takes no space if lockdep is disabled:
443 */
444struct lock_class_key { };
445
446static inline void lockdep_register_key(struct lock_class_key *key)
447{
448}
449
450static inline void lockdep_unregister_key(struct lock_class_key *key)
451{
452}
453
454/*
455 * The lockdep_map takes no space if lockdep is disabled:
456 */
457struct lockdep_map { };
458
459#define lockdep_depth(tsk) (0)
460
461#define lockdep_is_held_type(l, r) (1)
462
463#define lockdep_assert_held(l) do { (void)(l); } while (0)
464#define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0)
465#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
466#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
467
468#define lockdep_recursing(tsk) (0)
469
470struct pin_cookie { };
471
472#define NIL_COOKIE (struct pin_cookie){ }
473
474#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
475#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
476#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
477
478#endif /* !LOCKDEP */
479
480enum xhlock_context_t {
481 XHLOCK_HARD,
482 XHLOCK_SOFT,
483 XHLOCK_CTX_NR,
484};
485
486#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
487/*
488 * To initialize a lockdep_map statically use this macro.
489 * Note that _name must not be NULL.
490 */
491#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
492 { .name = (_name), .key = (void *)(_key), }
493
494static inline void lockdep_invariant_state(bool force) {}
495static inline void lockdep_init_task(struct task_struct *task) {}
496static inline void lockdep_free_task(struct task_struct *task) {}
497
498#ifdef CONFIG_LOCK_STAT
499
500extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
501extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
502
503#define LOCK_CONTENDED(_lock, try, lock) \
504do { \
505 if (!try(_lock)) { \
506 lock_contended(&(_lock)->dep_map, _RET_IP_); \
507 lock(_lock); \
508 } \
509 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
510} while (0)
511
512#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
513({ \
514 int ____err = 0; \
515 if (!try(_lock)) { \
516 lock_contended(&(_lock)->dep_map, _RET_IP_); \
517 ____err = lock(_lock); \
518 } \
519 if (!____err) \
520 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
521 ____err; \
522})
523
524#else /* CONFIG_LOCK_STAT */
525
526#define lock_contended(lockdep_map, ip) do {} while (0)
527#define lock_acquired(lockdep_map, ip) do {} while (0)
528
529#define LOCK_CONTENDED(_lock, try, lock) \
530 lock(_lock)
531
532#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
533 lock(_lock)
534
535#endif /* CONFIG_LOCK_STAT */
536
537#ifdef CONFIG_LOCKDEP
538
539/*
540 * On lockdep we dont want the hand-coded irq-enable of
541 * _raw_*_lock_flags() code, because lockdep assumes
542 * that interrupts are not re-enabled during lock-acquire:
543 */
544#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
545 LOCK_CONTENDED((_lock), (try), (lock))
546
547#else /* CONFIG_LOCKDEP */
548
549#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
550 lockfl((_lock), (flags))
551
552#endif /* CONFIG_LOCKDEP */
553
554#ifdef CONFIG_PROVE_LOCKING
555extern void print_irqtrace_events(struct task_struct *curr);
556#else
557static inline void print_irqtrace_events(struct task_struct *curr)
558{
559}
560#endif
561
562/*
563 * For trivial one-depth nesting of a lock-class, the following
564 * global define can be used. (Subsystems with multiple levels
565 * of nesting should define their own lock-nesting subclasses.)
566 */
567#define SINGLE_DEPTH_NESTING 1
568
569/*
570 * Map the dependency ops to NOP or to real lockdep ops, depending
571 * on the per lock-class debug mode:
572 */
573
574#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
575#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
576#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
577
578#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
579#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
580#define spin_release(l, n, i) lock_release(l, n, i)
581
582#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
583#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
584#define rwlock_release(l, n, i) lock_release(l, n, i)
585
586#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
587#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
588#define seqcount_release(l, n, i) lock_release(l, n, i)
589
590#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
591#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
592#define mutex_release(l, n, i) lock_release(l, n, i)
593
594#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
595#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
596#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
597#define rwsem_release(l, n, i) lock_release(l, n, i)
598
599#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
600#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
601#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
602#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
603
604#ifdef CONFIG_PROVE_LOCKING
605# define might_lock(lock) \
606do { \
607 typecheck(struct lockdep_map *, &(lock)->dep_map); \
608 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
609 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
610} while (0)
611# define might_lock_read(lock) \
612do { \
613 typecheck(struct lockdep_map *, &(lock)->dep_map); \
614 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
615 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
616} while (0)
617
618#define lockdep_assert_irqs_enabled() do { \
619 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
620 !current->hardirqs_enabled, \
621 "IRQs not enabled as expected\n"); \
622 } while (0)
623
624#define lockdep_assert_irqs_disabled() do { \
625 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
626 current->hardirqs_enabled, \
627 "IRQs not disabled as expected\n"); \
628 } while (0)
629
630#else
631# define might_lock(lock) do { } while (0)
632# define might_lock_read(lock) do { } while (0)
633# define lockdep_assert_irqs_enabled() do { } while (0)
634# define lockdep_assert_irqs_disabled() do { } while (0)
635#endif
636
637#ifdef CONFIG_LOCKDEP
638void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
639#else
640static inline void
641lockdep_rcu_suspicious(const char *file, const int line, const char *s)
642{
643}
644#endif
645
646#endif /* __LINUX_LOCKDEP_H */
647