1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. |
4 | * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. |
5 | */ |
6 | |
7 | #ifndef __GLOCK_DOT_H__ |
8 | #define __GLOCK_DOT_H__ |
9 | |
10 | #include <linux/sched.h> |
11 | #include <linux/parser.h> |
12 | #include "incore.h" |
13 | #include "util.h" |
14 | |
15 | /* Options for hostdata parser */ |
16 | |
17 | enum { |
18 | Opt_jid, |
19 | Opt_id, |
20 | Opt_first, |
21 | Opt_nodir, |
22 | Opt_err, |
23 | }; |
24 | |
25 | /* |
26 | * lm_lockname types |
27 | */ |
28 | |
29 | #define LM_TYPE_RESERVED 0x00 |
30 | #define LM_TYPE_NONDISK 0x01 |
31 | #define LM_TYPE_INODE 0x02 |
32 | #define LM_TYPE_RGRP 0x03 |
33 | #define LM_TYPE_META 0x04 |
34 | #define LM_TYPE_IOPEN 0x05 |
35 | #define LM_TYPE_FLOCK 0x06 |
36 | #define LM_TYPE_PLOCK 0x07 |
37 | #define LM_TYPE_QUOTA 0x08 |
38 | #define LM_TYPE_JOURNAL 0x09 |
39 | |
40 | /* |
41 | * lm_lock() states |
42 | * |
43 | * SHARED is compatible with SHARED, not with DEFERRED or EX. |
44 | * DEFERRED is compatible with DEFERRED, not with SHARED or EX. |
45 | */ |
46 | |
47 | #define LM_ST_UNLOCKED 0 |
48 | #define LM_ST_EXCLUSIVE 1 |
49 | #define LM_ST_DEFERRED 2 |
50 | #define LM_ST_SHARED 3 |
51 | |
52 | /* |
53 | * lm_lock() flags |
54 | * |
55 | * LM_FLAG_TRY |
56 | * Don't wait to acquire the lock if it can't be granted immediately. |
57 | * |
58 | * LM_FLAG_TRY_1CB |
59 | * Send one blocking callback if TRY is set and the lock is not granted. |
60 | * |
61 | * LM_FLAG_NOEXP |
62 | * GFS sets this flag on lock requests it makes while doing journal recovery. |
63 | * These special requests should not be blocked due to the recovery like |
64 | * ordinary locks would be. |
65 | * |
66 | * LM_FLAG_ANY |
67 | * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may |
68 | * also be granted in SHARED. The preferred state is whichever is compatible |
69 | * with other granted locks, or the specified state if no other locks exist. |
70 | * |
71 | * LM_FLAG_NODE_SCOPE |
72 | * This holder agrees to share the lock within this node. In other words, |
73 | * the glock is held in EX mode according to DLM, but local holders on the |
74 | * same node can share it. |
75 | */ |
76 | |
77 | #define LM_FLAG_TRY 0x0001 |
78 | #define LM_FLAG_TRY_1CB 0x0002 |
79 | #define LM_FLAG_NOEXP 0x0004 |
80 | #define LM_FLAG_ANY 0x0008 |
81 | #define LM_FLAG_NODE_SCOPE 0x0020 |
82 | #define GL_ASYNC 0x0040 |
83 | #define GL_EXACT 0x0080 |
84 | #define GL_SKIP 0x0100 |
85 | #define GL_NOPID 0x0200 |
86 | #define GL_NOCACHE 0x0400 |
87 | #define GL_NOBLOCK 0x0800 |
88 | |
89 | /* |
90 | * lm_async_cb return flags |
91 | * |
92 | * LM_OUT_ST_MASK |
93 | * Masks the lower two bits of lock state in the returned value. |
94 | * |
95 | * LM_OUT_CANCELED |
96 | * The lock request was canceled. |
97 | * |
98 | */ |
99 | |
100 | #define LM_OUT_ST_MASK 0x00000003 |
101 | #define LM_OUT_CANCELED 0x00000008 |
102 | #define LM_OUT_ERROR 0x00000004 |
103 | |
104 | /* |
105 | * lm_recovery_done() messages |
106 | */ |
107 | |
108 | #define LM_RD_GAVEUP 308 |
109 | #define LM_RD_SUCCESS 309 |
110 | |
111 | #define GLR_TRYFAILED 13 |
112 | |
113 | #define GL_GLOCK_MAX_HOLD (long)(HZ / 5) |
114 | #define GL_GLOCK_DFT_HOLD (long)(HZ / 5) |
115 | #define GL_GLOCK_MIN_HOLD (long)(10) |
116 | #define GL_GLOCK_HOLD_INCR (long)(HZ / 20) |
117 | #define GL_GLOCK_HOLD_DECR (long)(HZ / 40) |
118 | |
119 | struct lm_lockops { |
120 | const char *lm_proto_name; |
121 | int (*lm_mount) (struct gfs2_sbd *sdp, const char *table); |
122 | void (*lm_first_done) (struct gfs2_sbd *sdp); |
123 | void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid, |
124 | unsigned int result); |
125 | void (*lm_unmount) (struct gfs2_sbd *sdp); |
126 | void (*lm_withdraw) (struct gfs2_sbd *sdp); |
127 | void (*lm_put_lock) (struct gfs2_glock *gl); |
128 | int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state, |
129 | unsigned int flags); |
130 | void (*lm_cancel) (struct gfs2_glock *gl); |
131 | const match_table_t *lm_tokens; |
132 | }; |
133 | |
134 | struct gfs2_glock_aspace { |
135 | struct gfs2_glock glock; |
136 | struct address_space mapping; |
137 | }; |
138 | |
139 | static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) |
140 | { |
141 | struct gfs2_holder *gh; |
142 | struct pid *pid; |
143 | |
144 | /* Look in glock's list of holders for one with current task as owner */ |
145 | spin_lock(lock: &gl->gl_lockref.lock); |
146 | pid = task_pid(current); |
147 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { |
148 | if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) |
149 | break; |
150 | if (gh->gh_owner_pid == pid) |
151 | goto out; |
152 | } |
153 | gh = NULL; |
154 | out: |
155 | spin_unlock(lock: &gl->gl_lockref.lock); |
156 | |
157 | return gh; |
158 | } |
159 | |
160 | static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl) |
161 | { |
162 | if (gl->gl_ops->go_flags & GLOF_ASPACE) { |
163 | struct gfs2_glock_aspace *gla = |
164 | container_of(gl, struct gfs2_glock_aspace, glock); |
165 | return &gla->mapping; |
166 | } |
167 | return NULL; |
168 | } |
169 | |
170 | int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, |
171 | const struct gfs2_glock_operations *glops, |
172 | int create, struct gfs2_glock **glp); |
173 | struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl); |
174 | void gfs2_glock_put(struct gfs2_glock *gl); |
175 | void gfs2_glock_queue_put(struct gfs2_glock *gl); |
176 | |
177 | void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, |
178 | u16 flags, struct gfs2_holder *gh, |
179 | unsigned long ip); |
180 | static inline void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, |
181 | u16 flags, struct gfs2_holder *gh) { |
182 | __gfs2_holder_init(gl, state, flags, gh, _RET_IP_); |
183 | } |
184 | |
185 | void gfs2_holder_reinit(unsigned int state, u16 flags, |
186 | struct gfs2_holder *gh); |
187 | void gfs2_holder_uninit(struct gfs2_holder *gh); |
188 | int gfs2_glock_nq(struct gfs2_holder *gh); |
189 | int gfs2_glock_poll(struct gfs2_holder *gh); |
190 | int gfs2_instantiate(struct gfs2_holder *gh); |
191 | int gfs2_glock_holder_ready(struct gfs2_holder *gh); |
192 | int gfs2_glock_wait(struct gfs2_holder *gh); |
193 | int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs); |
194 | void gfs2_glock_dq(struct gfs2_holder *gh); |
195 | void gfs2_glock_dq_wait(struct gfs2_holder *gh); |
196 | void gfs2_glock_dq_uninit(struct gfs2_holder *gh); |
197 | int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, |
198 | const struct gfs2_glock_operations *glops, |
199 | unsigned int state, u16 flags, |
200 | struct gfs2_holder *gh); |
201 | int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs); |
202 | void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs); |
203 | void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, |
204 | bool fsid); |
205 | #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \ |
206 | gfs2_dump_glock(NULL, gl, true); \ |
207 | BUG(); } } while(0) |
208 | #define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \ |
209 | gfs2_dump_glock(NULL, gl, true); \ |
210 | gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \ |
211 | while (0) |
212 | #define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \ |
213 | gfs2_dump_glock(NULL, gl, true); \ |
214 | gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \ |
215 | while (0) |
216 | |
217 | __printf(2, 3) |
218 | void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...); |
219 | |
220 | /** |
221 | * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock |
222 | * @gl: the glock |
223 | * @state: the state we're requesting |
224 | * @flags: the modifier flags |
225 | * @gh: the holder structure |
226 | * |
227 | * Returns: 0, GLR_*, or errno |
228 | */ |
229 | |
230 | static inline int gfs2_glock_nq_init(struct gfs2_glock *gl, |
231 | unsigned int state, u16 flags, |
232 | struct gfs2_holder *gh) |
233 | { |
234 | int error; |
235 | |
236 | __gfs2_holder_init(gl, state, flags, gh, _RET_IP_); |
237 | |
238 | error = gfs2_glock_nq(gh); |
239 | if (error) |
240 | gfs2_holder_uninit(gh); |
241 | |
242 | return error; |
243 | } |
244 | |
245 | void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state); |
246 | void gfs2_glock_complete(struct gfs2_glock *gl, int ret); |
247 | bool gfs2_queue_try_to_evict(struct gfs2_glock *gl); |
248 | void gfs2_cancel_delete_work(struct gfs2_glock *gl); |
249 | void gfs2_flush_delete_work(struct gfs2_sbd *sdp); |
250 | void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); |
251 | void gfs2_gl_dq_holders(struct gfs2_sbd *sdp); |
252 | void gfs2_glock_thaw(struct gfs2_sbd *sdp); |
253 | void gfs2_glock_add_to_lru(struct gfs2_glock *gl); |
254 | void gfs2_glock_free(struct gfs2_glock *gl); |
255 | |
256 | int __init gfs2_glock_init(void); |
257 | void gfs2_glock_exit(void); |
258 | |
259 | void gfs2_create_debugfs_file(struct gfs2_sbd *sdp); |
260 | void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp); |
261 | void gfs2_register_debugfs(void); |
262 | void gfs2_unregister_debugfs(void); |
263 | |
264 | void glock_set_object(struct gfs2_glock *gl, void *object); |
265 | void glock_clear_object(struct gfs2_glock *gl, void *object); |
266 | |
267 | extern const struct lm_lockops gfs2_dlm_ops; |
268 | |
269 | static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh) |
270 | { |
271 | gh->gh_gl = NULL; |
272 | } |
273 | |
274 | static inline bool gfs2_holder_initialized(struct gfs2_holder *gh) |
275 | { |
276 | return gh->gh_gl; |
277 | } |
278 | |
279 | static inline bool gfs2_holder_queued(struct gfs2_holder *gh) |
280 | { |
281 | return !list_empty(head: &gh->gh_list); |
282 | } |
283 | |
284 | void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation); |
285 | bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation); |
286 | |
287 | #endif /* __GLOCK_DOT_H__ */ |
288 | |