1/*
2 * Queued spinlock
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 *
16 * Authors: Waiman Long <waiman.long@hp.com>
17 */
18#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
19#define __ASM_GENERIC_QSPINLOCK_TYPES_H
20
21/*
22 * Including atomic.h with PARAVIRT on will cause compilation errors because
23 * of recursive header file incluson via paravirt_types.h. So don't include
24 * it if PARAVIRT is on.
25 */
26#ifndef CONFIG_PARAVIRT
27#include <linux/types.h>
28#include <linux/atomic.h>
29#endif
30
31typedef struct qspinlock {
32 union {
33 atomic_t val;
34
35 /*
36 * By using the whole 2nd least significant byte for the
37 * pending bit, we can allow better optimization of the lock
38 * acquisition for the pending bit holder.
39 */
40#ifdef __LITTLE_ENDIAN
41 struct {
42 u8 locked;
43 u8 pending;
44 };
45 struct {
46 u16 locked_pending;
47 u16 tail;
48 };
49#else
50 struct {
51 u16 tail;
52 u16 locked_pending;
53 };
54 struct {
55 u8 reserved[2];
56 u8 pending;
57 u8 locked;
58 };
59#endif
60 };
61} arch_spinlock_t;
62
63/*
64 * Initializier
65 */
66#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
67
68/*
69 * Bitfields in the atomic value:
70 *
71 * When NR_CPUS < 16K
72 * 0- 7: locked byte
73 * 8: pending
74 * 9-15: not used
75 * 16-17: tail index
76 * 18-31: tail cpu (+1)
77 *
78 * When NR_CPUS >= 16K
79 * 0- 7: locked byte
80 * 8: pending
81 * 9-10: tail index
82 * 11-31: tail cpu (+1)
83 */
84#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
85 << _Q_ ## type ## _OFFSET)
86#define _Q_LOCKED_OFFSET 0
87#define _Q_LOCKED_BITS 8
88#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
89
90#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
91#if CONFIG_NR_CPUS < (1U << 14)
92#define _Q_PENDING_BITS 8
93#else
94#define _Q_PENDING_BITS 1
95#endif
96#define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
97
98#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
99#define _Q_TAIL_IDX_BITS 2
100#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
101
102#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
103#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
104#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
105
106#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
107#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
108
109#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
110#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
111
112#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */
113