1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * dir.c |
4 | * |
5 | * Creates, reads, walks and deletes directory-nodes |
6 | * |
7 | * Copyright (C) 2002, 2004 Oracle. All rights reserved. |
8 | * |
9 | * Portions of this code from linux/fs/ext3/dir.c |
10 | * |
11 | * Copyright (C) 1992, 1993, 1994, 1995 |
12 | * Remy Card (card@masi.ibp.fr) |
13 | * Laboratoire MASI - Institut Blaise pascal |
14 | * Universite Pierre et Marie Curie (Paris VI) |
15 | * |
16 | * from |
17 | * |
18 | * linux/fs/minix/dir.c |
19 | * |
20 | * Copyright (C) 1991, 1992 Linus Torvalds |
21 | */ |
22 | |
23 | #include <linux/fs.h> |
24 | #include <linux/types.h> |
25 | #include <linux/slab.h> |
26 | #include <linux/highmem.h> |
27 | #include <linux/quotaops.h> |
28 | #include <linux/sort.h> |
29 | #include <linux/iversion.h> |
30 | |
31 | #include <cluster/masklog.h> |
32 | |
33 | #include "ocfs2.h" |
34 | |
35 | #include "alloc.h" |
36 | #include "blockcheck.h" |
37 | #include "dir.h" |
38 | #include "dlmglue.h" |
39 | #include "extent_map.h" |
40 | #include "file.h" |
41 | #include "inode.h" |
42 | #include "journal.h" |
43 | #include "namei.h" |
44 | #include "suballoc.h" |
45 | #include "super.h" |
46 | #include "sysfile.h" |
47 | #include "uptodate.h" |
48 | #include "ocfs2_trace.h" |
49 | |
50 | #include "buffer_head_io.h" |
51 | |
52 | #define NAMEI_RA_CHUNKS 2 |
53 | #define NAMEI_RA_BLOCKS 4 |
54 | #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) |
55 | |
56 | static int ocfs2_do_extend_dir(struct super_block *sb, |
57 | handle_t *handle, |
58 | struct inode *dir, |
59 | struct buffer_head *parent_fe_bh, |
60 | struct ocfs2_alloc_context *data_ac, |
61 | struct ocfs2_alloc_context *meta_ac, |
62 | struct buffer_head **new_bh); |
63 | static int ocfs2_dir_indexed(struct inode *inode); |
64 | |
65 | /* |
66 | * These are distinct checks because future versions of the file system will |
67 | * want to have a trailing dirent structure independent of indexing. |
68 | */ |
69 | static int ocfs2_supports_dir_trailer(struct inode *dir) |
70 | { |
71 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
72 | |
73 | if (OCFS2_I(inode: dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) |
74 | return 0; |
75 | |
76 | return ocfs2_meta_ecc(osb) || ocfs2_dir_indexed(inode: dir); |
77 | } |
78 | |
79 | /* |
80 | * "new' here refers to the point at which we're creating a new |
81 | * directory via "mkdir()", but also when we're expanding an inline |
82 | * directory. In either case, we don't yet have the indexing bit set |
83 | * on the directory, so the standard checks will fail in when metaecc |
84 | * is turned off. Only directory-initialization type functions should |
85 | * use this then. Everything else wants ocfs2_supports_dir_trailer() |
86 | */ |
87 | static int ocfs2_new_dir_wants_trailer(struct inode *dir) |
88 | { |
89 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
90 | |
91 | return ocfs2_meta_ecc(osb) || |
92 | ocfs2_supports_indexed_dirs(osb); |
93 | } |
94 | |
95 | static inline unsigned int ocfs2_dir_trailer_blk_off(struct super_block *sb) |
96 | { |
97 | return sb->s_blocksize - sizeof(struct ocfs2_dir_block_trailer); |
98 | } |
99 | |
100 | #define ocfs2_trailer_from_bh(_bh, _sb) ((struct ocfs2_dir_block_trailer *) ((_bh)->b_data + ocfs2_dir_trailer_blk_off((_sb)))) |
101 | |
102 | /* XXX ocfs2_block_dqtrailer() is similar but not quite - can we make |
103 | * them more consistent? */ |
104 | struct ocfs2_dir_block_trailer *ocfs2_dir_trailer_from_size(int blocksize, |
105 | void *data) |
106 | { |
107 | char *p = data; |
108 | |
109 | p += blocksize - sizeof(struct ocfs2_dir_block_trailer); |
110 | return (struct ocfs2_dir_block_trailer *)p; |
111 | } |
112 | |
113 | /* |
114 | * XXX: This is executed once on every dirent. We should consider optimizing |
115 | * it. |
116 | */ |
117 | static int ocfs2_skip_dir_trailer(struct inode *dir, |
118 | struct ocfs2_dir_entry *de, |
119 | unsigned long offset, |
120 | unsigned long blklen) |
121 | { |
122 | unsigned long toff = blklen - sizeof(struct ocfs2_dir_block_trailer); |
123 | |
124 | if (!ocfs2_supports_dir_trailer(dir)) |
125 | return 0; |
126 | |
127 | if (offset != toff) |
128 | return 0; |
129 | |
130 | return 1; |
131 | } |
132 | |
133 | static void ocfs2_init_dir_trailer(struct inode *inode, |
134 | struct buffer_head *bh, u16 rec_len) |
135 | { |
136 | struct ocfs2_dir_block_trailer *trailer; |
137 | |
138 | trailer = ocfs2_trailer_from_bh(bh, inode->i_sb); |
139 | strcpy(p: trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE); |
140 | trailer->db_compat_rec_len = |
141 | cpu_to_le16(sizeof(struct ocfs2_dir_block_trailer)); |
142 | trailer->db_parent_dinode = cpu_to_le64(OCFS2_I(inode)->ip_blkno); |
143 | trailer->db_blkno = cpu_to_le64(bh->b_blocknr); |
144 | trailer->db_free_rec_len = cpu_to_le16(rec_len); |
145 | } |
146 | /* |
147 | * Link an unindexed block with a dir trailer structure into the index free |
148 | * list. This function will modify dirdata_bh, but assumes you've already |
149 | * passed it to the journal. |
150 | */ |
151 | static int ocfs2_dx_dir_link_trailer(struct inode *dir, handle_t *handle, |
152 | struct buffer_head *dx_root_bh, |
153 | struct buffer_head *dirdata_bh) |
154 | { |
155 | int ret; |
156 | struct ocfs2_dx_root_block *dx_root; |
157 | struct ocfs2_dir_block_trailer *trailer; |
158 | |
159 | ret = ocfs2_journal_access_dr(handle, ci: INODE_CACHE(inode: dir), bh: dx_root_bh, |
160 | OCFS2_JOURNAL_ACCESS_WRITE); |
161 | if (ret) { |
162 | mlog_errno(ret); |
163 | goto out; |
164 | } |
165 | trailer = ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb); |
166 | dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; |
167 | |
168 | trailer->db_free_next = dx_root->dr_free_blk; |
169 | dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr); |
170 | |
171 | ocfs2_journal_dirty(handle, bh: dx_root_bh); |
172 | |
173 | out: |
174 | return ret; |
175 | } |
176 | |
177 | static int ocfs2_free_list_at_root(struct ocfs2_dir_lookup_result *res) |
178 | { |
179 | return res->dl_prev_leaf_bh == NULL; |
180 | } |
181 | |
182 | void ocfs2_free_dir_lookup_result(struct ocfs2_dir_lookup_result *res) |
183 | { |
184 | brelse(bh: res->dl_dx_root_bh); |
185 | brelse(bh: res->dl_leaf_bh); |
186 | brelse(bh: res->dl_dx_leaf_bh); |
187 | brelse(bh: res->dl_prev_leaf_bh); |
188 | } |
189 | |
190 | static int ocfs2_dir_indexed(struct inode *inode) |
191 | { |
192 | if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INDEXED_DIR_FL) |
193 | return 1; |
194 | return 0; |
195 | } |
196 | |
197 | static inline int ocfs2_dx_root_inline(struct ocfs2_dx_root_block *dx_root) |
198 | { |
199 | return dx_root->dr_flags & OCFS2_DX_FLAG_INLINE; |
200 | } |
201 | |
202 | /* |
203 | * Hashing code adapted from ext3 |
204 | */ |
205 | #define DELTA 0x9E3779B9 |
206 | |
207 | static void TEA_transform(__u32 buf[4], __u32 const in[]) |
208 | { |
209 | __u32 sum = 0; |
210 | __u32 b0 = buf[0], b1 = buf[1]; |
211 | __u32 a = in[0], b = in[1], c = in[2], d = in[3]; |
212 | int n = 16; |
213 | |
214 | do { |
215 | sum += DELTA; |
216 | b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); |
217 | b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); |
218 | } while (--n); |
219 | |
220 | buf[0] += b0; |
221 | buf[1] += b1; |
222 | } |
223 | |
224 | static void str2hashbuf(const char *msg, int len, __u32 *buf, int num) |
225 | { |
226 | __u32 pad, val; |
227 | int i; |
228 | |
229 | pad = (__u32)len | ((__u32)len << 8); |
230 | pad |= pad << 16; |
231 | |
232 | val = pad; |
233 | if (len > num*4) |
234 | len = num * 4; |
235 | for (i = 0; i < len; i++) { |
236 | if ((i % 4) == 0) |
237 | val = pad; |
238 | val = msg[i] + (val << 8); |
239 | if ((i % 4) == 3) { |
240 | *buf++ = val; |
241 | val = pad; |
242 | num--; |
243 | } |
244 | } |
245 | if (--num >= 0) |
246 | *buf++ = val; |
247 | while (--num >= 0) |
248 | *buf++ = pad; |
249 | } |
250 | |
251 | static void ocfs2_dx_dir_name_hash(struct inode *dir, const char *name, int len, |
252 | struct ocfs2_dx_hinfo *hinfo) |
253 | { |
254 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
255 | const char *p; |
256 | __u32 in[8], buf[4]; |
257 | |
258 | /* |
259 | * XXX: Is this really necessary, if the index is never looked |
260 | * at by readdir? Is a hash value of '0' a bad idea? |
261 | */ |
262 | if ((len == 1 && !strncmp("." , name, 1)) || |
263 | (len == 2 && !strncmp(".." , name, 2))) { |
264 | buf[0] = buf[1] = 0; |
265 | goto out; |
266 | } |
267 | |
268 | #ifdef OCFS2_DEBUG_DX_DIRS |
269 | /* |
270 | * This makes it very easy to debug indexing problems. We |
271 | * should never allow this to be selected without hand editing |
272 | * this file though. |
273 | */ |
274 | buf[0] = buf[1] = len; |
275 | goto out; |
276 | #endif |
277 | |
278 | memcpy(buf, osb->osb_dx_seed, sizeof(buf)); |
279 | |
280 | p = name; |
281 | while (len > 0) { |
282 | str2hashbuf(msg: p, len, buf: in, num: 4); |
283 | TEA_transform(buf, in); |
284 | len -= 16; |
285 | p += 16; |
286 | } |
287 | |
288 | out: |
289 | hinfo->major_hash = buf[0]; |
290 | hinfo->minor_hash = buf[1]; |
291 | } |
292 | |
293 | /* |
294 | * bh passed here can be an inode block or a dir data block, depending |
295 | * on the inode inline data flag. |
296 | */ |
297 | static int ocfs2_check_dir_entry(struct inode * dir, |
298 | struct ocfs2_dir_entry * de, |
299 | struct buffer_head * bh, |
300 | unsigned long offset) |
301 | { |
302 | const char *error_msg = NULL; |
303 | const int rlen = le16_to_cpu(de->rec_len); |
304 | |
305 | if (unlikely(rlen < OCFS2_DIR_REC_LEN(1))) |
306 | error_msg = "rec_len is smaller than minimal" ; |
307 | else if (unlikely(rlen % 4 != 0)) |
308 | error_msg = "rec_len % 4 != 0" ; |
309 | else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len))) |
310 | error_msg = "rec_len is too small for name_len" ; |
311 | else if (unlikely( |
312 | ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)) |
313 | error_msg = "directory entry across blocks" ; |
314 | |
315 | if (unlikely(error_msg != NULL)) |
316 | mlog(ML_ERROR, "bad entry in directory #%llu: %s - " |
317 | "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n" , |
318 | (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg, |
319 | offset, (unsigned long long)le64_to_cpu(de->inode), rlen, |
320 | de->name_len); |
321 | |
322 | return error_msg == NULL ? 1 : 0; |
323 | } |
324 | |
325 | static inline int ocfs2_match(int len, |
326 | const char * const name, |
327 | struct ocfs2_dir_entry *de) |
328 | { |
329 | if (len != de->name_len) |
330 | return 0; |
331 | if (!de->inode) |
332 | return 0; |
333 | return !memcmp(p: name, q: de->name, size: len); |
334 | } |
335 | |
336 | /* |
337 | * Returns 0 if not found, -1 on failure, and 1 on success |
338 | */ |
339 | static inline int ocfs2_search_dirblock(struct buffer_head *bh, |
340 | struct inode *dir, |
341 | const char *name, int namelen, |
342 | unsigned long offset, |
343 | char *first_de, |
344 | unsigned int bytes, |
345 | struct ocfs2_dir_entry **res_dir) |
346 | { |
347 | struct ocfs2_dir_entry *de; |
348 | char *dlimit, *de_buf; |
349 | int de_len; |
350 | int ret = 0; |
351 | |
352 | de_buf = first_de; |
353 | dlimit = de_buf + bytes; |
354 | |
355 | while (de_buf < dlimit) { |
356 | /* this code is executed quadratically often */ |
357 | /* do minimal checking `by hand' */ |
358 | |
359 | de = (struct ocfs2_dir_entry *) de_buf; |
360 | |
361 | if (de_buf + namelen <= dlimit && |
362 | ocfs2_match(len: namelen, name, de)) { |
363 | /* found a match - just to be sure, do a full check */ |
364 | if (!ocfs2_check_dir_entry(dir, de, bh, offset)) { |
365 | ret = -1; |
366 | goto bail; |
367 | } |
368 | *res_dir = de; |
369 | ret = 1; |
370 | goto bail; |
371 | } |
372 | |
373 | /* prevent looping on a bad block */ |
374 | de_len = le16_to_cpu(de->rec_len); |
375 | if (de_len <= 0) { |
376 | ret = -1; |
377 | goto bail; |
378 | } |
379 | |
380 | de_buf += de_len; |
381 | offset += de_len; |
382 | } |
383 | |
384 | bail: |
385 | trace_ocfs2_search_dirblock(num: ret); |
386 | return ret; |
387 | } |
388 | |
389 | static struct buffer_head *ocfs2_find_entry_id(const char *name, |
390 | int namelen, |
391 | struct inode *dir, |
392 | struct ocfs2_dir_entry **res_dir) |
393 | { |
394 | int ret, found; |
395 | struct buffer_head *di_bh = NULL; |
396 | struct ocfs2_dinode *di; |
397 | struct ocfs2_inline_data *data; |
398 | |
399 | ret = ocfs2_read_inode_block(inode: dir, bh: &di_bh); |
400 | if (ret) { |
401 | mlog_errno(ret); |
402 | goto out; |
403 | } |
404 | |
405 | di = (struct ocfs2_dinode *)di_bh->b_data; |
406 | data = &di->id2.i_data; |
407 | |
408 | found = ocfs2_search_dirblock(bh: di_bh, dir, name, namelen, offset: 0, |
409 | first_de: data->id_data, bytes: i_size_read(inode: dir), res_dir); |
410 | if (found == 1) |
411 | return di_bh; |
412 | |
413 | brelse(bh: di_bh); |
414 | out: |
415 | return NULL; |
416 | } |
417 | |
418 | static int ocfs2_validate_dir_block(struct super_block *sb, |
419 | struct buffer_head *bh) |
420 | { |
421 | int rc; |
422 | struct ocfs2_dir_block_trailer *trailer = |
423 | ocfs2_trailer_from_bh(bh, sb); |
424 | |
425 | |
426 | /* |
427 | * We don't validate dirents here, that's handled |
428 | * in-place when the code walks them. |
429 | */ |
430 | trace_ocfs2_validate_dir_block(num: (unsigned long long)bh->b_blocknr); |
431 | |
432 | BUG_ON(!buffer_uptodate(bh)); |
433 | |
434 | /* |
435 | * If the ecc fails, we return the error but otherwise |
436 | * leave the filesystem running. We know any error is |
437 | * local to this block. |
438 | * |
439 | * Note that we are safe to call this even if the directory |
440 | * doesn't have a trailer. Filesystems without metaecc will do |
441 | * nothing, and filesystems with it will have one. |
442 | */ |
443 | rc = ocfs2_validate_meta_ecc(sb, data: bh->b_data, bc: &trailer->db_check); |
444 | if (rc) |
445 | mlog(ML_ERROR, "Checksum failed for dinode %llu\n" , |
446 | (unsigned long long)bh->b_blocknr); |
447 | |
448 | return rc; |
449 | } |
450 | |
451 | /* |
452 | * Validate a directory trailer. |
453 | * |
454 | * We check the trailer here rather than in ocfs2_validate_dir_block() |
455 | * because that function doesn't have the inode to test. |
456 | */ |
457 | static int ocfs2_check_dir_trailer(struct inode *dir, struct buffer_head *bh) |
458 | { |
459 | int rc = 0; |
460 | struct ocfs2_dir_block_trailer *trailer; |
461 | |
462 | trailer = ocfs2_trailer_from_bh(bh, dir->i_sb); |
463 | if (!OCFS2_IS_VALID_DIR_TRAILER(trailer)) { |
464 | rc = ocfs2_error(dir->i_sb, |
465 | "Invalid dirblock #%llu: signature = %.*s\n" , |
466 | (unsigned long long)bh->b_blocknr, 7, |
467 | trailer->db_signature); |
468 | goto out; |
469 | } |
470 | if (le64_to_cpu(trailer->db_blkno) != bh->b_blocknr) { |
471 | rc = ocfs2_error(dir->i_sb, |
472 | "Directory block #%llu has an invalid db_blkno of %llu\n" , |
473 | (unsigned long long)bh->b_blocknr, |
474 | (unsigned long long)le64_to_cpu(trailer->db_blkno)); |
475 | goto out; |
476 | } |
477 | if (le64_to_cpu(trailer->db_parent_dinode) != |
478 | OCFS2_I(inode: dir)->ip_blkno) { |
479 | rc = ocfs2_error(dir->i_sb, |
480 | "Directory block #%llu on dinode #%llu has an invalid parent_dinode of %llu\n" , |
481 | (unsigned long long)bh->b_blocknr, |
482 | (unsigned long long)OCFS2_I(dir)->ip_blkno, |
483 | (unsigned long long)le64_to_cpu(trailer->db_blkno)); |
484 | goto out; |
485 | } |
486 | out: |
487 | return rc; |
488 | } |
489 | |
490 | /* |
491 | * This function forces all errors to -EIO for consistency with its |
492 | * predecessor, ocfs2_bread(). We haven't audited what returning the |
493 | * real error codes would do to callers. We log the real codes with |
494 | * mlog_errno() before we squash them. |
495 | */ |
496 | static int ocfs2_read_dir_block(struct inode *inode, u64 v_block, |
497 | struct buffer_head **bh, int flags) |
498 | { |
499 | int rc = 0; |
500 | struct buffer_head *tmp = *bh; |
501 | |
502 | rc = ocfs2_read_virt_blocks(inode, v_block, nr: 1, bhs: &tmp, flags, |
503 | validate: ocfs2_validate_dir_block); |
504 | if (rc) { |
505 | mlog_errno(rc); |
506 | goto out; |
507 | } |
508 | |
509 | if (!(flags & OCFS2_BH_READAHEAD) && |
510 | ocfs2_supports_dir_trailer(dir: inode)) { |
511 | rc = ocfs2_check_dir_trailer(dir: inode, bh: tmp); |
512 | if (rc) { |
513 | if (!*bh) |
514 | brelse(bh: tmp); |
515 | mlog_errno(rc); |
516 | goto out; |
517 | } |
518 | } |
519 | |
520 | /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */ |
521 | if (!*bh) |
522 | *bh = tmp; |
523 | |
524 | out: |
525 | return rc ? -EIO : 0; |
526 | } |
527 | |
528 | /* |
529 | * Read the block at 'phys' which belongs to this directory |
530 | * inode. This function does no virtual->physical block translation - |
531 | * what's passed in is assumed to be a valid directory block. |
532 | */ |
533 | static int ocfs2_read_dir_block_direct(struct inode *dir, u64 phys, |
534 | struct buffer_head **bh) |
535 | { |
536 | int ret; |
537 | struct buffer_head *tmp = *bh; |
538 | |
539 | ret = ocfs2_read_block(ci: INODE_CACHE(inode: dir), off: phys, bh: &tmp, |
540 | validate: ocfs2_validate_dir_block); |
541 | if (ret) { |
542 | mlog_errno(ret); |
543 | goto out; |
544 | } |
545 | |
546 | if (ocfs2_supports_dir_trailer(dir)) { |
547 | ret = ocfs2_check_dir_trailer(dir, bh: tmp); |
548 | if (ret) { |
549 | if (!*bh) |
550 | brelse(bh: tmp); |
551 | mlog_errno(ret); |
552 | goto out; |
553 | } |
554 | } |
555 | |
556 | if (!ret && !*bh) |
557 | *bh = tmp; |
558 | out: |
559 | return ret; |
560 | } |
561 | |
562 | static int ocfs2_validate_dx_root(struct super_block *sb, |
563 | struct buffer_head *bh) |
564 | { |
565 | int ret; |
566 | struct ocfs2_dx_root_block *dx_root; |
567 | |
568 | BUG_ON(!buffer_uptodate(bh)); |
569 | |
570 | dx_root = (struct ocfs2_dx_root_block *) bh->b_data; |
571 | |
572 | ret = ocfs2_validate_meta_ecc(sb, data: bh->b_data, bc: &dx_root->dr_check); |
573 | if (ret) { |
574 | mlog(ML_ERROR, |
575 | "Checksum failed for dir index root block %llu\n" , |
576 | (unsigned long long)bh->b_blocknr); |
577 | return ret; |
578 | } |
579 | |
580 | if (!OCFS2_IS_VALID_DX_ROOT(dx_root)) { |
581 | ret = ocfs2_error(sb, |
582 | "Dir Index Root # %llu has bad signature %.*s\n" , |
583 | (unsigned long long)le64_to_cpu(dx_root->dr_blkno), |
584 | 7, dx_root->dr_signature); |
585 | } |
586 | |
587 | return ret; |
588 | } |
589 | |
590 | static int ocfs2_read_dx_root(struct inode *dir, struct ocfs2_dinode *di, |
591 | struct buffer_head **dx_root_bh) |
592 | { |
593 | int ret; |
594 | u64 blkno = le64_to_cpu(di->i_dx_root); |
595 | struct buffer_head *tmp = *dx_root_bh; |
596 | |
597 | ret = ocfs2_read_block(ci: INODE_CACHE(inode: dir), off: blkno, bh: &tmp, |
598 | validate: ocfs2_validate_dx_root); |
599 | |
600 | /* If ocfs2_read_block() got us a new bh, pass it up. */ |
601 | if (!ret && !*dx_root_bh) |
602 | *dx_root_bh = tmp; |
603 | |
604 | return ret; |
605 | } |
606 | |
607 | static int ocfs2_validate_dx_leaf(struct super_block *sb, |
608 | struct buffer_head *bh) |
609 | { |
610 | int ret; |
611 | struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)bh->b_data; |
612 | |
613 | BUG_ON(!buffer_uptodate(bh)); |
614 | |
615 | ret = ocfs2_validate_meta_ecc(sb, data: bh->b_data, bc: &dx_leaf->dl_check); |
616 | if (ret) { |
617 | mlog(ML_ERROR, |
618 | "Checksum failed for dir index leaf block %llu\n" , |
619 | (unsigned long long)bh->b_blocknr); |
620 | return ret; |
621 | } |
622 | |
623 | if (!OCFS2_IS_VALID_DX_LEAF(dx_leaf)) { |
624 | ret = ocfs2_error(sb, "Dir Index Leaf has bad signature %.*s\n" , |
625 | 7, dx_leaf->dl_signature); |
626 | } |
627 | |
628 | return ret; |
629 | } |
630 | |
631 | static int ocfs2_read_dx_leaf(struct inode *dir, u64 blkno, |
632 | struct buffer_head **dx_leaf_bh) |
633 | { |
634 | int ret; |
635 | struct buffer_head *tmp = *dx_leaf_bh; |
636 | |
637 | ret = ocfs2_read_block(ci: INODE_CACHE(inode: dir), off: blkno, bh: &tmp, |
638 | validate: ocfs2_validate_dx_leaf); |
639 | |
640 | /* If ocfs2_read_block() got us a new bh, pass it up. */ |
641 | if (!ret && !*dx_leaf_bh) |
642 | *dx_leaf_bh = tmp; |
643 | |
644 | return ret; |
645 | } |
646 | |
647 | /* |
648 | * Read a series of dx_leaf blocks. This expects all buffer_head |
649 | * pointers to be NULL on function entry. |
650 | */ |
651 | static int ocfs2_read_dx_leaves(struct inode *dir, u64 start, int num, |
652 | struct buffer_head **dx_leaf_bhs) |
653 | { |
654 | int ret; |
655 | |
656 | ret = ocfs2_read_blocks(ci: INODE_CACHE(inode: dir), block: start, nr: num, bhs: dx_leaf_bhs, flags: 0, |
657 | validate: ocfs2_validate_dx_leaf); |
658 | if (ret) |
659 | mlog_errno(ret); |
660 | |
661 | return ret; |
662 | } |
663 | |
664 | static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen, |
665 | struct inode *dir, |
666 | struct ocfs2_dir_entry **res_dir) |
667 | { |
668 | struct super_block *sb; |
669 | struct buffer_head *bh_use[NAMEI_RA_SIZE]; |
670 | struct buffer_head *bh, *ret = NULL; |
671 | unsigned long start, block, b; |
672 | int ra_max = 0; /* Number of bh's in the readahead |
673 | buffer, bh_use[] */ |
674 | int ra_ptr = 0; /* Current index into readahead |
675 | buffer */ |
676 | int num = 0; |
677 | int nblocks, i; |
678 | |
679 | sb = dir->i_sb; |
680 | |
681 | nblocks = i_size_read(inode: dir) >> sb->s_blocksize_bits; |
682 | start = OCFS2_I(inode: dir)->ip_dir_start_lookup; |
683 | if (start >= nblocks) |
684 | start = 0; |
685 | block = start; |
686 | |
687 | restart: |
688 | do { |
689 | /* |
690 | * We deal with the read-ahead logic here. |
691 | */ |
692 | if (ra_ptr >= ra_max) { |
693 | /* Refill the readahead buffer */ |
694 | ra_ptr = 0; |
695 | b = block; |
696 | for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) { |
697 | /* |
698 | * Terminate if we reach the end of the |
699 | * directory and must wrap, or if our |
700 | * search has finished at this block. |
701 | */ |
702 | if (b >= nblocks || (num && block == start)) { |
703 | bh_use[ra_max] = NULL; |
704 | break; |
705 | } |
706 | num++; |
707 | |
708 | bh = NULL; |
709 | ocfs2_read_dir_block(inode: dir, v_block: b++, bh: &bh, |
710 | OCFS2_BH_READAHEAD); |
711 | bh_use[ra_max] = bh; |
712 | } |
713 | } |
714 | if ((bh = bh_use[ra_ptr++]) == NULL) |
715 | goto next; |
716 | if (ocfs2_read_dir_block(inode: dir, v_block: block, bh: &bh, flags: 0)) { |
717 | /* read error, skip block & hope for the best. |
718 | * ocfs2_read_dir_block() has released the bh. */ |
719 | mlog(ML_ERROR, "reading directory %llu, " |
720 | "offset %lu\n" , |
721 | (unsigned long long)OCFS2_I(dir)->ip_blkno, |
722 | block); |
723 | goto next; |
724 | } |
725 | i = ocfs2_search_dirblock(bh, dir, name, namelen, |
726 | offset: block << sb->s_blocksize_bits, |
727 | first_de: bh->b_data, bytes: sb->s_blocksize, |
728 | res_dir); |
729 | if (i == 1) { |
730 | OCFS2_I(inode: dir)->ip_dir_start_lookup = block; |
731 | ret = bh; |
732 | goto cleanup_and_exit; |
733 | } else { |
734 | brelse(bh); |
735 | if (i < 0) |
736 | goto cleanup_and_exit; |
737 | } |
738 | next: |
739 | if (++block >= nblocks) |
740 | block = 0; |
741 | } while (block != start); |
742 | |
743 | /* |
744 | * If the directory has grown while we were searching, then |
745 | * search the last part of the directory before giving up. |
746 | */ |
747 | block = nblocks; |
748 | nblocks = i_size_read(inode: dir) >> sb->s_blocksize_bits; |
749 | if (block < nblocks) { |
750 | start = 0; |
751 | goto restart; |
752 | } |
753 | |
754 | cleanup_and_exit: |
755 | /* Clean up the read-ahead blocks */ |
756 | for (; ra_ptr < ra_max; ra_ptr++) |
757 | brelse(bh: bh_use[ra_ptr]); |
758 | |
759 | trace_ocfs2_find_entry_el(pointer: ret); |
760 | return ret; |
761 | } |
762 | |
763 | static int ocfs2_dx_dir_lookup_rec(struct inode *inode, |
764 | struct ocfs2_extent_list *el, |
765 | u32 major_hash, |
766 | u32 *ret_cpos, |
767 | u64 *ret_phys_blkno, |
768 | unsigned int *ret_clen) |
769 | { |
770 | int ret = 0, i, found; |
771 | struct buffer_head *eb_bh = NULL; |
772 | struct ocfs2_extent_block *eb; |
773 | struct ocfs2_extent_rec *rec = NULL; |
774 | |
775 | if (el->l_tree_depth) { |
776 | ret = ocfs2_find_leaf(ci: INODE_CACHE(inode), root_el: el, cpos: major_hash, |
777 | leaf_bh: &eb_bh); |
778 | if (ret) { |
779 | mlog_errno(ret); |
780 | goto out; |
781 | } |
782 | |
783 | eb = (struct ocfs2_extent_block *) eb_bh->b_data; |
784 | el = &eb->h_list; |
785 | |
786 | if (el->l_tree_depth) { |
787 | ret = ocfs2_error(inode->i_sb, |
788 | "Inode %lu has non zero tree depth in btree tree block %llu\n" , |
789 | inode->i_ino, |
790 | (unsigned long long)eb_bh->b_blocknr); |
791 | goto out; |
792 | } |
793 | } |
794 | |
795 | found = 0; |
796 | for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) { |
797 | rec = &el->l_recs[i]; |
798 | |
799 | if (le32_to_cpu(rec->e_cpos) <= major_hash) { |
800 | found = 1; |
801 | break; |
802 | } |
803 | } |
804 | |
805 | if (!found) { |
806 | ret = ocfs2_error(inode->i_sb, |
807 | "Inode %lu has bad extent record (%u, %u, 0) in btree\n" , |
808 | inode->i_ino, |
809 | le32_to_cpu(rec->e_cpos), |
810 | ocfs2_rec_clusters(el, rec)); |
811 | goto out; |
812 | } |
813 | |
814 | if (ret_phys_blkno) |
815 | *ret_phys_blkno = le64_to_cpu(rec->e_blkno); |
816 | if (ret_cpos) |
817 | *ret_cpos = le32_to_cpu(rec->e_cpos); |
818 | if (ret_clen) |
819 | *ret_clen = le16_to_cpu(rec->e_leaf_clusters); |
820 | |
821 | out: |
822 | brelse(bh: eb_bh); |
823 | return ret; |
824 | } |
825 | |
826 | /* |
827 | * Returns the block index, from the start of the cluster which this |
828 | * hash belongs too. |
829 | */ |
830 | static inline unsigned int __ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb, |
831 | u32 minor_hash) |
832 | { |
833 | return minor_hash & osb->osb_dx_mask; |
834 | } |
835 | |
836 | static inline unsigned int ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb, |
837 | struct ocfs2_dx_hinfo *hinfo) |
838 | { |
839 | return __ocfs2_dx_dir_hash_idx(osb, minor_hash: hinfo->minor_hash); |
840 | } |
841 | |
842 | static int ocfs2_dx_dir_lookup(struct inode *inode, |
843 | struct ocfs2_extent_list *el, |
844 | struct ocfs2_dx_hinfo *hinfo, |
845 | u32 *ret_cpos, |
846 | u64 *ret_phys_blkno) |
847 | { |
848 | int ret = 0; |
849 | unsigned int cend, clen; |
850 | u32 cpos; |
851 | u64 blkno; |
852 | u32 name_hash = hinfo->major_hash; |
853 | |
854 | ret = ocfs2_dx_dir_lookup_rec(inode, el, major_hash: name_hash, ret_cpos: &cpos, ret_phys_blkno: &blkno, |
855 | ret_clen: &clen); |
856 | if (ret) { |
857 | mlog_errno(ret); |
858 | goto out; |
859 | } |
860 | |
861 | cend = cpos + clen; |
862 | if (name_hash >= cend) { |
863 | /* We want the last cluster */ |
864 | blkno += ocfs2_clusters_to_blocks(sb: inode->i_sb, clusters: clen - 1); |
865 | cpos += clen - 1; |
866 | } else { |
867 | blkno += ocfs2_clusters_to_blocks(sb: inode->i_sb, |
868 | clusters: name_hash - cpos); |
869 | cpos = name_hash; |
870 | } |
871 | |
872 | /* |
873 | * We now have the cluster which should hold our entry. To |
874 | * find the exact block from the start of the cluster to |
875 | * search, we take the lower bits of the hash. |
876 | */ |
877 | blkno += ocfs2_dx_dir_hash_idx(OCFS2_SB(inode->i_sb), hinfo); |
878 | |
879 | if (ret_phys_blkno) |
880 | *ret_phys_blkno = blkno; |
881 | if (ret_cpos) |
882 | *ret_cpos = cpos; |
883 | |
884 | out: |
885 | |
886 | return ret; |
887 | } |
888 | |
889 | static int ocfs2_dx_dir_search(const char *name, int namelen, |
890 | struct inode *dir, |
891 | struct ocfs2_dx_root_block *dx_root, |
892 | struct ocfs2_dir_lookup_result *res) |
893 | { |
894 | int ret, i, found; |
895 | u64 phys; |
896 | struct buffer_head *dx_leaf_bh = NULL; |
897 | struct ocfs2_dx_leaf *dx_leaf; |
898 | struct ocfs2_dx_entry *dx_entry = NULL; |
899 | struct buffer_head *dir_ent_bh = NULL; |
900 | struct ocfs2_dir_entry *dir_ent = NULL; |
901 | struct ocfs2_dx_hinfo *hinfo = &res->dl_hinfo; |
902 | struct ocfs2_extent_list *dr_el; |
903 | struct ocfs2_dx_entry_list *entry_list; |
904 | |
905 | ocfs2_dx_dir_name_hash(dir, name, len: namelen, hinfo: &res->dl_hinfo); |
906 | |
907 | if (ocfs2_dx_root_inline(dx_root)) { |
908 | entry_list = &dx_root->dr_entries; |
909 | goto search; |
910 | } |
911 | |
912 | dr_el = &dx_root->dr_list; |
913 | |
914 | ret = ocfs2_dx_dir_lookup(inode: dir, el: dr_el, hinfo, NULL, ret_phys_blkno: &phys); |
915 | if (ret) { |
916 | mlog_errno(ret); |
917 | goto out; |
918 | } |
919 | |
920 | trace_ocfs2_dx_dir_search(ino: (unsigned long long)OCFS2_I(inode: dir)->ip_blkno, |
921 | namelen, name, major_hash: hinfo->major_hash, |
922 | minor_hash: hinfo->minor_hash, blkno: (unsigned long long)phys); |
923 | |
924 | ret = ocfs2_read_dx_leaf(dir, blkno: phys, dx_leaf_bh: &dx_leaf_bh); |
925 | if (ret) { |
926 | mlog_errno(ret); |
927 | goto out; |
928 | } |
929 | |
930 | dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data; |
931 | |
932 | trace_ocfs2_dx_dir_search_leaf_info( |
933 | le16_to_cpu(dx_leaf->dl_list.de_num_used), |
934 | le16_to_cpu(dx_leaf->dl_list.de_count)); |
935 | |
936 | entry_list = &dx_leaf->dl_list; |
937 | |
938 | search: |
939 | /* |
940 | * Empty leaf is legal, so no need to check for that. |
941 | */ |
942 | found = 0; |
943 | for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) { |
944 | dx_entry = &entry_list->de_entries[i]; |
945 | |
946 | if (hinfo->major_hash != le32_to_cpu(dx_entry->dx_major_hash) |
947 | || hinfo->minor_hash != le32_to_cpu(dx_entry->dx_minor_hash)) |
948 | continue; |
949 | |
950 | /* |
951 | * Search unindexed leaf block now. We're not |
952 | * guaranteed to find anything. |
953 | */ |
954 | ret = ocfs2_read_dir_block_direct(dir, |
955 | le64_to_cpu(dx_entry->dx_dirent_blk), |
956 | bh: &dir_ent_bh); |
957 | if (ret) { |
958 | mlog_errno(ret); |
959 | goto out; |
960 | } |
961 | |
962 | /* |
963 | * XXX: We should check the unindexed block here, |
964 | * before using it. |
965 | */ |
966 | |
967 | found = ocfs2_search_dirblock(bh: dir_ent_bh, dir, name, namelen, |
968 | offset: 0, first_de: dir_ent_bh->b_data, |
969 | bytes: dir->i_sb->s_blocksize, res_dir: &dir_ent); |
970 | if (found == 1) |
971 | break; |
972 | |
973 | if (found == -1) { |
974 | /* This means we found a bad directory entry. */ |
975 | ret = -EIO; |
976 | mlog_errno(ret); |
977 | goto out; |
978 | } |
979 | |
980 | brelse(bh: dir_ent_bh); |
981 | dir_ent_bh = NULL; |
982 | } |
983 | |
984 | if (found <= 0) { |
985 | ret = -ENOENT; |
986 | goto out; |
987 | } |
988 | |
989 | res->dl_leaf_bh = dir_ent_bh; |
990 | res->dl_entry = dir_ent; |
991 | res->dl_dx_leaf_bh = dx_leaf_bh; |
992 | res->dl_dx_entry = dx_entry; |
993 | |
994 | ret = 0; |
995 | out: |
996 | if (ret) { |
997 | brelse(bh: dx_leaf_bh); |
998 | brelse(bh: dir_ent_bh); |
999 | } |
1000 | return ret; |
1001 | } |
1002 | |
1003 | static int ocfs2_find_entry_dx(const char *name, int namelen, |
1004 | struct inode *dir, |
1005 | struct ocfs2_dir_lookup_result *lookup) |
1006 | { |
1007 | int ret; |
1008 | struct buffer_head *di_bh = NULL; |
1009 | struct ocfs2_dinode *di; |
1010 | struct buffer_head *dx_root_bh = NULL; |
1011 | struct ocfs2_dx_root_block *dx_root; |
1012 | |
1013 | ret = ocfs2_read_inode_block(inode: dir, bh: &di_bh); |
1014 | if (ret) { |
1015 | mlog_errno(ret); |
1016 | goto out; |
1017 | } |
1018 | |
1019 | di = (struct ocfs2_dinode *)di_bh->b_data; |
1020 | |
1021 | ret = ocfs2_read_dx_root(dir, di, dx_root_bh: &dx_root_bh); |
1022 | if (ret) { |
1023 | mlog_errno(ret); |
1024 | goto out; |
1025 | } |
1026 | dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data; |
1027 | |
1028 | ret = ocfs2_dx_dir_search(name, namelen, dir, dx_root, res: lookup); |
1029 | if (ret) { |
1030 | if (ret != -ENOENT) |
1031 | mlog_errno(ret); |
1032 | goto out; |
1033 | } |
1034 | |
1035 | lookup->dl_dx_root_bh = dx_root_bh; |
1036 | dx_root_bh = NULL; |
1037 | out: |
1038 | brelse(bh: di_bh); |
1039 | brelse(bh: dx_root_bh); |
1040 | return ret; |
1041 | } |
1042 | |
1043 | /* |
1044 | * Try to find an entry of the provided name within 'dir'. |
1045 | * |
1046 | * If nothing was found, -ENOENT is returned. Otherwise, zero is |
1047 | * returned and the struct 'res' will contain information useful to |
1048 | * other directory manipulation functions. |
1049 | * |
1050 | * Caller can NOT assume anything about the contents of the |
1051 | * buffer_heads - they are passed back only so that it can be passed |
1052 | * into any one of the manipulation functions (add entry, delete |
1053 | * entry, etc). As an example, bh in the extent directory case is a |
1054 | * data block, in the inline-data case it actually points to an inode, |
1055 | * in the indexed directory case, multiple buffers are involved. |
1056 | */ |
1057 | int ocfs2_find_entry(const char *name, int namelen, |
1058 | struct inode *dir, struct ocfs2_dir_lookup_result *lookup) |
1059 | { |
1060 | struct buffer_head *bh; |
1061 | struct ocfs2_dir_entry *res_dir = NULL; |
1062 | |
1063 | if (ocfs2_dir_indexed(inode: dir)) |
1064 | return ocfs2_find_entry_dx(name, namelen, dir, lookup); |
1065 | |
1066 | /* |
1067 | * The unindexed dir code only uses part of the lookup |
1068 | * structure, so there's no reason to push it down further |
1069 | * than this. |
1070 | */ |
1071 | if (OCFS2_I(inode: dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) |
1072 | bh = ocfs2_find_entry_id(name, namelen, dir, res_dir: &res_dir); |
1073 | else |
1074 | bh = ocfs2_find_entry_el(name, namelen, dir, res_dir: &res_dir); |
1075 | |
1076 | if (bh == NULL) |
1077 | return -ENOENT; |
1078 | |
1079 | lookup->dl_leaf_bh = bh; |
1080 | lookup->dl_entry = res_dir; |
1081 | return 0; |
1082 | } |
1083 | |
1084 | /* |
1085 | * Update inode number and type of a previously found directory entry. |
1086 | */ |
1087 | int ocfs2_update_entry(struct inode *dir, handle_t *handle, |
1088 | struct ocfs2_dir_lookup_result *res, |
1089 | struct inode *new_entry_inode) |
1090 | { |
1091 | int ret; |
1092 | ocfs2_journal_access_func access = ocfs2_journal_access_db; |
1093 | struct ocfs2_dir_entry *de = res->dl_entry; |
1094 | struct buffer_head *de_bh = res->dl_leaf_bh; |
1095 | |
1096 | /* |
1097 | * The same code works fine for both inline-data and extent |
1098 | * based directories, so no need to split this up. The only |
1099 | * difference is the journal_access function. |
1100 | */ |
1101 | |
1102 | if (OCFS2_I(inode: dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) |
1103 | access = ocfs2_journal_access_di; |
1104 | |
1105 | ret = access(handle, INODE_CACHE(inode: dir), de_bh, |
1106 | OCFS2_JOURNAL_ACCESS_WRITE); |
1107 | if (ret) { |
1108 | mlog_errno(ret); |
1109 | goto out; |
1110 | } |
1111 | |
1112 | de->inode = cpu_to_le64(OCFS2_I(new_entry_inode)->ip_blkno); |
1113 | ocfs2_set_de_type(de, mode: new_entry_inode->i_mode); |
1114 | |
1115 | ocfs2_journal_dirty(handle, bh: de_bh); |
1116 | |
1117 | out: |
1118 | return ret; |
1119 | } |
1120 | |
1121 | /* |
1122 | * __ocfs2_delete_entry deletes a directory entry by merging it with the |
1123 | * previous entry |
1124 | */ |
1125 | static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir, |
1126 | struct ocfs2_dir_entry *de_del, |
1127 | struct buffer_head *bh, char *first_de, |
1128 | unsigned int bytes) |
1129 | { |
1130 | struct ocfs2_dir_entry *de, *pde; |
1131 | int i, status = -ENOENT; |
1132 | ocfs2_journal_access_func access = ocfs2_journal_access_db; |
1133 | |
1134 | if (OCFS2_I(inode: dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) |
1135 | access = ocfs2_journal_access_di; |
1136 | |
1137 | i = 0; |
1138 | pde = NULL; |
1139 | de = (struct ocfs2_dir_entry *) first_de; |
1140 | while (i < bytes) { |
1141 | if (!ocfs2_check_dir_entry(dir, de, bh, offset: i)) { |
1142 | status = -EIO; |
1143 | mlog_errno(status); |
1144 | goto bail; |
1145 | } |
1146 | if (de == de_del) { |
1147 | status = access(handle, INODE_CACHE(inode: dir), bh, |
1148 | OCFS2_JOURNAL_ACCESS_WRITE); |
1149 | if (status < 0) { |
1150 | status = -EIO; |
1151 | mlog_errno(status); |
1152 | goto bail; |
1153 | } |
1154 | if (pde) |
1155 | le16_add_cpu(var: &pde->rec_len, |
1156 | le16_to_cpu(de->rec_len)); |
1157 | de->inode = 0; |
1158 | inode_inc_iversion(inode: dir); |
1159 | ocfs2_journal_dirty(handle, bh); |
1160 | goto bail; |
1161 | } |
1162 | i += le16_to_cpu(de->rec_len); |
1163 | pde = de; |
1164 | de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len)); |
1165 | } |
1166 | bail: |
1167 | return status; |
1168 | } |
1169 | |
1170 | static unsigned int ocfs2_figure_dirent_hole(struct ocfs2_dir_entry *de) |
1171 | { |
1172 | unsigned int hole; |
1173 | |
1174 | if (le64_to_cpu(de->inode) == 0) |
1175 | hole = le16_to_cpu(de->rec_len); |
1176 | else |
1177 | hole = le16_to_cpu(de->rec_len) - |
1178 | OCFS2_DIR_REC_LEN(de->name_len); |
1179 | |
1180 | return hole; |
1181 | } |
1182 | |
1183 | static int ocfs2_find_max_rec_len(struct super_block *sb, |
1184 | struct buffer_head *dirblock_bh) |
1185 | { |
1186 | int size, this_hole, largest_hole = 0; |
1187 | char *trailer, *de_buf, *limit, *start = dirblock_bh->b_data; |
1188 | struct ocfs2_dir_entry *de; |
1189 | |
1190 | trailer = (char *)ocfs2_trailer_from_bh(dirblock_bh, sb); |
1191 | size = ocfs2_dir_trailer_blk_off(sb); |
1192 | limit = start + size; |
1193 | de_buf = start; |
1194 | de = (struct ocfs2_dir_entry *)de_buf; |
1195 | do { |
1196 | if (de_buf != trailer) { |
1197 | this_hole = ocfs2_figure_dirent_hole(de); |
1198 | if (this_hole > largest_hole) |
1199 | largest_hole = this_hole; |
1200 | } |
1201 | |
1202 | de_buf += le16_to_cpu(de->rec_len); |
1203 | de = (struct ocfs2_dir_entry *)de_buf; |
1204 | } while (de_buf < limit); |
1205 | |
1206 | if (largest_hole >= OCFS2_DIR_MIN_REC_LEN) |
1207 | return largest_hole; |
1208 | return 0; |
1209 | } |
1210 | |
1211 | static void ocfs2_dx_list_remove_entry(struct ocfs2_dx_entry_list *entry_list, |
1212 | int index) |
1213 | { |
1214 | int num_used = le16_to_cpu(entry_list->de_num_used); |
1215 | |
1216 | if (num_used == 1 || index == (num_used - 1)) |
1217 | goto clear; |
1218 | |
1219 | memmove(&entry_list->de_entries[index], |
1220 | &entry_list->de_entries[index + 1], |
1221 | (num_used - index - 1)*sizeof(struct ocfs2_dx_entry)); |
1222 | clear: |
1223 | num_used--; |
1224 | memset(&entry_list->de_entries[num_used], 0, |
1225 | sizeof(struct ocfs2_dx_entry)); |
1226 | entry_list->de_num_used = cpu_to_le16(num_used); |
1227 | } |
1228 | |
1229 | static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir, |
1230 | struct ocfs2_dir_lookup_result *lookup) |
1231 | { |
1232 | int ret, index, max_rec_len, add_to_free_list = 0; |
1233 | struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh; |
1234 | struct buffer_head *leaf_bh = lookup->dl_leaf_bh; |
1235 | struct ocfs2_dx_leaf *dx_leaf; |
1236 | struct ocfs2_dx_entry *dx_entry = lookup->dl_dx_entry; |
1237 | struct ocfs2_dir_block_trailer *trailer; |
1238 | struct ocfs2_dx_root_block *dx_root; |
1239 | struct ocfs2_dx_entry_list *entry_list; |
1240 | |
1241 | /* |
1242 | * This function gets a bit messy because we might have to |
1243 | * modify the root block, regardless of whether the indexed |
1244 | * entries are stored inline. |
1245 | */ |
1246 | |
1247 | /* |
1248 | * *Only* set 'entry_list' here, based on where we're looking |
1249 | * for the indexed entries. Later, we might still want to |
1250 | * journal both blocks, based on free list state. |
1251 | */ |
1252 | dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; |
1253 | if (ocfs2_dx_root_inline(dx_root)) { |
1254 | entry_list = &dx_root->dr_entries; |
1255 | } else { |
1256 | dx_leaf = (struct ocfs2_dx_leaf *) lookup->dl_dx_leaf_bh->b_data; |
1257 | entry_list = &dx_leaf->dl_list; |
1258 | } |
1259 | |
1260 | /* Neither of these are a disk corruption - that should have |
1261 | * been caught by lookup, before we got here. */ |
1262 | BUG_ON(le16_to_cpu(entry_list->de_count) <= 0); |
1263 | BUG_ON(le16_to_cpu(entry_list->de_num_used) <= 0); |
1264 | |
1265 | index = (char *)dx_entry - (char *)entry_list->de_entries; |
1266 | index /= sizeof(*dx_entry); |
1267 | |
1268 | if (index >= le16_to_cpu(entry_list->de_num_used)) { |
1269 | mlog(ML_ERROR, "Dir %llu: Bad dx_entry ptr idx %d, (%p, %p)\n" , |
1270 | (unsigned long long)OCFS2_I(dir)->ip_blkno, index, |
1271 | entry_list, dx_entry); |
1272 | return -EIO; |
1273 | } |
1274 | |
1275 | /* |
1276 | * We know that removal of this dirent will leave enough room |
1277 | * for a new one, so add this block to the free list if it |
1278 | * isn't already there. |
1279 | */ |
1280 | trailer = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb); |
1281 | if (trailer->db_free_rec_len == 0) |
1282 | add_to_free_list = 1; |
1283 | |
1284 | /* |
1285 | * Add the block holding our index into the journal before |
1286 | * removing the unindexed entry. If we get an error return |
1287 | * from __ocfs2_delete_entry(), then it hasn't removed the |
1288 | * entry yet. Likewise, successful return means we *must* |
1289 | * remove the indexed entry. |
1290 | * |
1291 | * We're also careful to journal the root tree block here as |
1292 | * the entry count needs to be updated. Also, we might be |
1293 | * adding to the start of the free list. |
1294 | */ |
1295 | ret = ocfs2_journal_access_dr(handle, ci: INODE_CACHE(inode: dir), bh: dx_root_bh, |
1296 | OCFS2_JOURNAL_ACCESS_WRITE); |
1297 | if (ret) { |
1298 | mlog_errno(ret); |
1299 | goto out; |
1300 | } |
1301 | |
1302 | if (!ocfs2_dx_root_inline(dx_root)) { |
1303 | ret = ocfs2_journal_access_dl(handle, ci: INODE_CACHE(inode: dir), |
1304 | bh: lookup->dl_dx_leaf_bh, |
1305 | OCFS2_JOURNAL_ACCESS_WRITE); |
1306 | if (ret) { |
1307 | mlog_errno(ret); |
1308 | goto out; |
1309 | } |
1310 | } |
1311 | |
1312 | trace_ocfs2_delete_entry_dx(val1: (unsigned long long)OCFS2_I(inode: dir)->ip_blkno, |
1313 | val2: index); |
1314 | |
1315 | ret = __ocfs2_delete_entry(handle, dir, de_del: lookup->dl_entry, |
1316 | bh: leaf_bh, first_de: leaf_bh->b_data, bytes: leaf_bh->b_size); |
1317 | if (ret) { |
1318 | mlog_errno(ret); |
1319 | goto out; |
1320 | } |
1321 | |
1322 | max_rec_len = ocfs2_find_max_rec_len(sb: dir->i_sb, dirblock_bh: leaf_bh); |
1323 | trailer->db_free_rec_len = cpu_to_le16(max_rec_len); |
1324 | if (add_to_free_list) { |
1325 | trailer->db_free_next = dx_root->dr_free_blk; |
1326 | dx_root->dr_free_blk = cpu_to_le64(leaf_bh->b_blocknr); |
1327 | ocfs2_journal_dirty(handle, bh: dx_root_bh); |
1328 | } |
1329 | |
1330 | /* leaf_bh was journal_accessed for us in __ocfs2_delete_entry */ |
1331 | ocfs2_journal_dirty(handle, bh: leaf_bh); |
1332 | |
1333 | le32_add_cpu(var: &dx_root->dr_num_entries, val: -1); |
1334 | ocfs2_journal_dirty(handle, bh: dx_root_bh); |
1335 | |
1336 | ocfs2_dx_list_remove_entry(entry_list, index); |
1337 | |
1338 | if (!ocfs2_dx_root_inline(dx_root)) |
1339 | ocfs2_journal_dirty(handle, bh: lookup->dl_dx_leaf_bh); |
1340 | |
1341 | out: |
1342 | return ret; |
1343 | } |
1344 | |
1345 | static inline int ocfs2_delete_entry_id(handle_t *handle, |
1346 | struct inode *dir, |
1347 | struct ocfs2_dir_entry *de_del, |
1348 | struct buffer_head *bh) |
1349 | { |
1350 | int ret; |
1351 | struct buffer_head *di_bh = NULL; |
1352 | struct ocfs2_dinode *di; |
1353 | struct ocfs2_inline_data *data; |
1354 | |
1355 | ret = ocfs2_read_inode_block(inode: dir, bh: &di_bh); |
1356 | if (ret) { |
1357 | mlog_errno(ret); |
1358 | goto out; |
1359 | } |
1360 | |
1361 | di = (struct ocfs2_dinode *)di_bh->b_data; |
1362 | data = &di->id2.i_data; |
1363 | |
1364 | ret = __ocfs2_delete_entry(handle, dir, de_del, bh, first_de: data->id_data, |
1365 | bytes: i_size_read(inode: dir)); |
1366 | |
1367 | brelse(bh: di_bh); |
1368 | out: |
1369 | return ret; |
1370 | } |
1371 | |
1372 | static inline int ocfs2_delete_entry_el(handle_t *handle, |
1373 | struct inode *dir, |
1374 | struct ocfs2_dir_entry *de_del, |
1375 | struct buffer_head *bh) |
1376 | { |
1377 | return __ocfs2_delete_entry(handle, dir, de_del, bh, first_de: bh->b_data, |
1378 | bytes: bh->b_size); |
1379 | } |
1380 | |
1381 | /* |
1382 | * Delete a directory entry. Hide the details of directory |
1383 | * implementation from the caller. |
1384 | */ |
1385 | int ocfs2_delete_entry(handle_t *handle, |
1386 | struct inode *dir, |
1387 | struct ocfs2_dir_lookup_result *res) |
1388 | { |
1389 | if (ocfs2_dir_indexed(inode: dir)) |
1390 | return ocfs2_delete_entry_dx(handle, dir, lookup: res); |
1391 | |
1392 | if (OCFS2_I(inode: dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) |
1393 | return ocfs2_delete_entry_id(handle, dir, de_del: res->dl_entry, |
1394 | bh: res->dl_leaf_bh); |
1395 | |
1396 | return ocfs2_delete_entry_el(handle, dir, de_del: res->dl_entry, |
1397 | bh: res->dl_leaf_bh); |
1398 | } |
1399 | |
1400 | /* |
1401 | * Check whether 'de' has enough room to hold an entry of |
1402 | * 'new_rec_len' bytes. |
1403 | */ |
1404 | static inline int ocfs2_dirent_would_fit(struct ocfs2_dir_entry *de, |
1405 | unsigned int new_rec_len) |
1406 | { |
1407 | unsigned int de_really_used; |
1408 | |
1409 | /* Check whether this is an empty record with enough space */ |
1410 | if (le64_to_cpu(de->inode) == 0 && |
1411 | le16_to_cpu(de->rec_len) >= new_rec_len) |
1412 | return 1; |
1413 | |
1414 | /* |
1415 | * Record might have free space at the end which we can |
1416 | * use. |
1417 | */ |
1418 | de_really_used = OCFS2_DIR_REC_LEN(de->name_len); |
1419 | if (le16_to_cpu(de->rec_len) >= (de_really_used + new_rec_len)) |
1420 | return 1; |
1421 | |
1422 | return 0; |
1423 | } |
1424 | |
1425 | static void ocfs2_dx_dir_leaf_insert_tail(struct ocfs2_dx_leaf *dx_leaf, |
1426 | struct ocfs2_dx_entry *dx_new_entry) |
1427 | { |
1428 | int i; |
1429 | |
1430 | i = le16_to_cpu(dx_leaf->dl_list.de_num_used); |
1431 | dx_leaf->dl_list.de_entries[i] = *dx_new_entry; |
1432 | |
1433 | le16_add_cpu(var: &dx_leaf->dl_list.de_num_used, val: 1); |
1434 | } |
1435 | |
1436 | static void ocfs2_dx_entry_list_insert(struct ocfs2_dx_entry_list *entry_list, |
1437 | struct ocfs2_dx_hinfo *hinfo, |
1438 | u64 dirent_blk) |
1439 | { |
1440 | int i; |
1441 | struct ocfs2_dx_entry *dx_entry; |
1442 | |
1443 | i = le16_to_cpu(entry_list->de_num_used); |
1444 | dx_entry = &entry_list->de_entries[i]; |
1445 | |
1446 | memset(dx_entry, 0, sizeof(*dx_entry)); |
1447 | dx_entry->dx_major_hash = cpu_to_le32(hinfo->major_hash); |
1448 | dx_entry->dx_minor_hash = cpu_to_le32(hinfo->minor_hash); |
1449 | dx_entry->dx_dirent_blk = cpu_to_le64(dirent_blk); |
1450 | |
1451 | le16_add_cpu(var: &entry_list->de_num_used, val: 1); |
1452 | } |
1453 | |
1454 | static int __ocfs2_dx_dir_leaf_insert(struct inode *dir, handle_t *handle, |
1455 | struct ocfs2_dx_hinfo *hinfo, |
1456 | u64 dirent_blk, |
1457 | struct buffer_head *dx_leaf_bh) |
1458 | { |
1459 | int ret; |
1460 | struct ocfs2_dx_leaf *dx_leaf; |
1461 | |
1462 | ret = ocfs2_journal_access_dl(handle, ci: INODE_CACHE(inode: dir), bh: dx_leaf_bh, |
1463 | OCFS2_JOURNAL_ACCESS_WRITE); |
1464 | if (ret) { |
1465 | mlog_errno(ret); |
1466 | goto out; |
1467 | } |
1468 | |
1469 | dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data; |
1470 | ocfs2_dx_entry_list_insert(entry_list: &dx_leaf->dl_list, hinfo, dirent_blk); |
1471 | ocfs2_journal_dirty(handle, bh: dx_leaf_bh); |
1472 | |
1473 | out: |
1474 | return ret; |
1475 | } |
1476 | |
1477 | static void ocfs2_dx_inline_root_insert(struct inode *dir, handle_t *handle, |
1478 | struct ocfs2_dx_hinfo *hinfo, |
1479 | u64 dirent_blk, |
1480 | struct ocfs2_dx_root_block *dx_root) |
1481 | { |
1482 | ocfs2_dx_entry_list_insert(entry_list: &dx_root->dr_entries, hinfo, dirent_blk); |
1483 | } |
1484 | |
1485 | static int ocfs2_dx_dir_insert(struct inode *dir, handle_t *handle, |
1486 | struct ocfs2_dir_lookup_result *lookup) |
1487 | { |
1488 | int ret = 0; |
1489 | struct ocfs2_dx_root_block *dx_root; |
1490 | struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh; |
1491 | |
1492 | ret = ocfs2_journal_access_dr(handle, ci: INODE_CACHE(inode: dir), bh: dx_root_bh, |
1493 | OCFS2_JOURNAL_ACCESS_WRITE); |
1494 | if (ret) { |
1495 | mlog_errno(ret); |
1496 | goto out; |
1497 | } |
1498 | |
1499 | dx_root = (struct ocfs2_dx_root_block *)lookup->dl_dx_root_bh->b_data; |
1500 | if (ocfs2_dx_root_inline(dx_root)) { |
1501 | ocfs2_dx_inline_root_insert(dir, handle, |
1502 | hinfo: &lookup->dl_hinfo, |
1503 | dirent_blk: lookup->dl_leaf_bh->b_blocknr, |
1504 | dx_root); |
1505 | } else { |
1506 | ret = __ocfs2_dx_dir_leaf_insert(dir, handle, hinfo: &lookup->dl_hinfo, |
1507 | dirent_blk: lookup->dl_leaf_bh->b_blocknr, |
1508 | dx_leaf_bh: lookup->dl_dx_leaf_bh); |
1509 | if (ret) |
1510 | goto out; |
1511 | } |
1512 | |
1513 | le32_add_cpu(var: &dx_root->dr_num_entries, val: 1); |
1514 | ocfs2_journal_dirty(handle, bh: dx_root_bh); |
1515 | |
1516 | out: |
1517 | return ret; |
1518 | } |
1519 | |
1520 | static void ocfs2_remove_block_from_free_list(struct inode *dir, |
1521 | handle_t *handle, |
1522 | struct ocfs2_dir_lookup_result *lookup) |
1523 | { |
1524 | struct ocfs2_dir_block_trailer *trailer, *prev; |
1525 | struct ocfs2_dx_root_block *dx_root; |
1526 | struct buffer_head *bh; |
1527 | |
1528 | trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb); |
1529 | |
1530 | if (ocfs2_free_list_at_root(res: lookup)) { |
1531 | bh = lookup->dl_dx_root_bh; |
1532 | dx_root = (struct ocfs2_dx_root_block *)bh->b_data; |
1533 | dx_root->dr_free_blk = trailer->db_free_next; |
1534 | } else { |
1535 | bh = lookup->dl_prev_leaf_bh; |
1536 | prev = ocfs2_trailer_from_bh(bh, dir->i_sb); |
1537 | prev->db_free_next = trailer->db_free_next; |
1538 | } |
1539 | |
1540 | trailer->db_free_rec_len = cpu_to_le16(0); |
1541 | trailer->db_free_next = cpu_to_le64(0); |
1542 | |
1543 | ocfs2_journal_dirty(handle, bh); |
1544 | ocfs2_journal_dirty(handle, bh: lookup->dl_leaf_bh); |
1545 | } |
1546 | |
1547 | /* |
1548 | * This expects that a journal write has been reserved on |
1549 | * lookup->dl_prev_leaf_bh or lookup->dl_dx_root_bh |
1550 | */ |
1551 | static void ocfs2_recalc_free_list(struct inode *dir, handle_t *handle, |
1552 | struct ocfs2_dir_lookup_result *lookup) |
1553 | { |
1554 | int max_rec_len; |
1555 | struct ocfs2_dir_block_trailer *trailer; |
1556 | |
1557 | /* Walk dl_leaf_bh to figure out what the new free rec_len is. */ |
1558 | max_rec_len = ocfs2_find_max_rec_len(sb: dir->i_sb, dirblock_bh: lookup->dl_leaf_bh); |
1559 | if (max_rec_len) { |
1560 | /* |
1561 | * There's still room in this block, so no need to remove it |
1562 | * from the free list. In this case, we just want to update |
1563 | * the rec len accounting. |
1564 | */ |
1565 | trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb); |
1566 | trailer->db_free_rec_len = cpu_to_le16(max_rec_len); |
1567 | ocfs2_journal_dirty(handle, bh: lookup->dl_leaf_bh); |
1568 | } else { |
1569 | ocfs2_remove_block_from_free_list(dir, handle, lookup); |
1570 | } |
1571 | } |
1572 | |
1573 | /* we don't always have a dentry for what we want to add, so people |
1574 | * like orphan dir can call this instead. |
1575 | * |
1576 | * The lookup context must have been filled from |
1577 | * ocfs2_prepare_dir_for_insert. |
1578 | */ |
1579 | int __ocfs2_add_entry(handle_t *handle, |
1580 | struct inode *dir, |
1581 | const char *name, int namelen, |
1582 | struct inode *inode, u64 blkno, |
1583 | struct buffer_head *parent_fe_bh, |
1584 | struct ocfs2_dir_lookup_result *lookup) |
1585 | { |
1586 | unsigned long offset; |
1587 | unsigned short rec_len; |
1588 | struct ocfs2_dir_entry *de, *de1; |
1589 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_fe_bh->b_data; |
1590 | struct super_block *sb = dir->i_sb; |
1591 | int retval; |
1592 | unsigned int size = sb->s_blocksize; |
1593 | struct buffer_head *insert_bh = lookup->dl_leaf_bh; |
1594 | char *data_start = insert_bh->b_data; |
1595 | |
1596 | if (!namelen) |
1597 | return -EINVAL; |
1598 | |
1599 | if (ocfs2_dir_indexed(inode: dir)) { |
1600 | struct buffer_head *bh; |
1601 | |
1602 | /* |
1603 | * An indexed dir may require that we update the free space |
1604 | * list. Reserve a write to the previous node in the list so |
1605 | * that we don't fail later. |
1606 | * |
1607 | * XXX: This can be either a dx_root_block, or an unindexed |
1608 | * directory tree leaf block. |
1609 | */ |
1610 | if (ocfs2_free_list_at_root(res: lookup)) { |
1611 | bh = lookup->dl_dx_root_bh; |
1612 | retval = ocfs2_journal_access_dr(handle, |
1613 | ci: INODE_CACHE(inode: dir), bh, |
1614 | OCFS2_JOURNAL_ACCESS_WRITE); |
1615 | } else { |
1616 | bh = lookup->dl_prev_leaf_bh; |
1617 | retval = ocfs2_journal_access_db(handle, |
1618 | ci: INODE_CACHE(inode: dir), bh, |
1619 | OCFS2_JOURNAL_ACCESS_WRITE); |
1620 | } |
1621 | if (retval) { |
1622 | mlog_errno(retval); |
1623 | return retval; |
1624 | } |
1625 | } else if (OCFS2_I(inode: dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { |
1626 | data_start = di->id2.i_data.id_data; |
1627 | size = i_size_read(inode: dir); |
1628 | |
1629 | BUG_ON(insert_bh != parent_fe_bh); |
1630 | } |
1631 | |
1632 | rec_len = OCFS2_DIR_REC_LEN(namelen); |
1633 | offset = 0; |
1634 | de = (struct ocfs2_dir_entry *) data_start; |
1635 | while (1) { |
1636 | BUG_ON((char *)de >= (size + data_start)); |
1637 | |
1638 | /* These checks should've already been passed by the |
1639 | * prepare function, but I guess we can leave them |
1640 | * here anyway. */ |
1641 | if (!ocfs2_check_dir_entry(dir, de, bh: insert_bh, offset)) { |
1642 | retval = -ENOENT; |
1643 | goto bail; |
1644 | } |
1645 | if (ocfs2_match(len: namelen, name, de)) { |
1646 | retval = -EEXIST; |
1647 | goto bail; |
1648 | } |
1649 | |
1650 | /* We're guaranteed that we should have space, so we |
1651 | * can't possibly have hit the trailer...right? */ |
1652 | mlog_bug_on_msg(ocfs2_skip_dir_trailer(dir, de, offset, size), |
1653 | "Hit dir trailer trying to insert %.*s " |
1654 | "(namelen %d) into directory %llu. " |
1655 | "offset is %lu, trailer offset is %d\n" , |
1656 | namelen, name, namelen, |
1657 | (unsigned long long)parent_fe_bh->b_blocknr, |
1658 | offset, ocfs2_dir_trailer_blk_off(dir->i_sb)); |
1659 | |
1660 | if (ocfs2_dirent_would_fit(de, new_rec_len: rec_len)) { |
1661 | inode_set_mtime_to_ts(inode: dir, |
1662 | ts: inode_set_ctime_current(inode: dir)); |
1663 | retval = ocfs2_mark_inode_dirty(handle, inode: dir, bh: parent_fe_bh); |
1664 | if (retval < 0) { |
1665 | mlog_errno(retval); |
1666 | goto bail; |
1667 | } |
1668 | |
1669 | if (insert_bh == parent_fe_bh) |
1670 | retval = ocfs2_journal_access_di(handle, |
1671 | ci: INODE_CACHE(inode: dir), |
1672 | bh: insert_bh, |
1673 | OCFS2_JOURNAL_ACCESS_WRITE); |
1674 | else { |
1675 | retval = ocfs2_journal_access_db(handle, |
1676 | ci: INODE_CACHE(inode: dir), |
1677 | bh: insert_bh, |
1678 | OCFS2_JOURNAL_ACCESS_WRITE); |
1679 | |
1680 | if (!retval && ocfs2_dir_indexed(inode: dir)) |
1681 | retval = ocfs2_dx_dir_insert(dir, |
1682 | handle, |
1683 | lookup); |
1684 | } |
1685 | |
1686 | if (retval) { |
1687 | mlog_errno(retval); |
1688 | goto bail; |
1689 | } |
1690 | |
1691 | /* By now the buffer is marked for journaling */ |
1692 | offset += le16_to_cpu(de->rec_len); |
1693 | if (le64_to_cpu(de->inode)) { |
1694 | de1 = (struct ocfs2_dir_entry *)((char *) de + |
1695 | OCFS2_DIR_REC_LEN(de->name_len)); |
1696 | de1->rec_len = |
1697 | cpu_to_le16(le16_to_cpu(de->rec_len) - |
1698 | OCFS2_DIR_REC_LEN(de->name_len)); |
1699 | de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len)); |
1700 | de = de1; |
1701 | } |
1702 | de->file_type = FT_UNKNOWN; |
1703 | if (blkno) { |
1704 | de->inode = cpu_to_le64(blkno); |
1705 | ocfs2_set_de_type(de, mode: inode->i_mode); |
1706 | } else |
1707 | de->inode = 0; |
1708 | de->name_len = namelen; |
1709 | memcpy(de->name, name, namelen); |
1710 | |
1711 | if (ocfs2_dir_indexed(inode: dir)) |
1712 | ocfs2_recalc_free_list(dir, handle, lookup); |
1713 | |
1714 | inode_inc_iversion(inode: dir); |
1715 | ocfs2_journal_dirty(handle, bh: insert_bh); |
1716 | retval = 0; |
1717 | goto bail; |
1718 | } |
1719 | |
1720 | offset += le16_to_cpu(de->rec_len); |
1721 | de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len)); |
1722 | } |
1723 | |
1724 | /* when you think about it, the assert above should prevent us |
1725 | * from ever getting here. */ |
1726 | retval = -ENOSPC; |
1727 | bail: |
1728 | if (retval) |
1729 | mlog_errno(retval); |
1730 | |
1731 | return retval; |
1732 | } |
1733 | |
1734 | static int ocfs2_dir_foreach_blk_id(struct inode *inode, |
1735 | u64 *f_version, |
1736 | struct dir_context *ctx) |
1737 | { |
1738 | int ret, i; |
1739 | unsigned long offset = ctx->pos; |
1740 | struct buffer_head *di_bh = NULL; |
1741 | struct ocfs2_dinode *di; |
1742 | struct ocfs2_inline_data *data; |
1743 | struct ocfs2_dir_entry *de; |
1744 | |
1745 | ret = ocfs2_read_inode_block(inode, bh: &di_bh); |
1746 | if (ret) { |
1747 | mlog(ML_ERROR, "Unable to read inode block for dir %llu\n" , |
1748 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
1749 | goto out; |
1750 | } |
1751 | |
1752 | di = (struct ocfs2_dinode *)di_bh->b_data; |
1753 | data = &di->id2.i_data; |
1754 | |
1755 | while (ctx->pos < i_size_read(inode)) { |
1756 | /* If the dir block has changed since the last call to |
1757 | * readdir(2), then we might be pointing to an invalid |
1758 | * dirent right now. Scan from the start of the block |
1759 | * to make sure. */ |
1760 | if (!inode_eq_iversion(inode, old: *f_version)) { |
1761 | for (i = 0; i < i_size_read(inode) && i < offset; ) { |
1762 | de = (struct ocfs2_dir_entry *) |
1763 | (data->id_data + i); |
1764 | /* It's too expensive to do a full |
1765 | * dirent test each time round this |
1766 | * loop, but we do have to test at |
1767 | * least that it is non-zero. A |
1768 | * failure will be detected in the |
1769 | * dirent test below. */ |
1770 | if (le16_to_cpu(de->rec_len) < |
1771 | OCFS2_DIR_REC_LEN(1)) |
1772 | break; |
1773 | i += le16_to_cpu(de->rec_len); |
1774 | } |
1775 | ctx->pos = offset = i; |
1776 | *f_version = inode_query_iversion(inode); |
1777 | } |
1778 | |
1779 | de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos); |
1780 | if (!ocfs2_check_dir_entry(dir: inode, de, bh: di_bh, offset: ctx->pos)) { |
1781 | /* On error, skip the f_pos to the end. */ |
1782 | ctx->pos = i_size_read(inode); |
1783 | break; |
1784 | } |
1785 | offset += le16_to_cpu(de->rec_len); |
1786 | if (le64_to_cpu(de->inode)) { |
1787 | if (!dir_emit(ctx, name: de->name, namelen: de->name_len, |
1788 | le64_to_cpu(de->inode), |
1789 | type: fs_ftype_to_dtype(filetype: de->file_type))) |
1790 | goto out; |
1791 | } |
1792 | ctx->pos += le16_to_cpu(de->rec_len); |
1793 | } |
1794 | out: |
1795 | brelse(bh: di_bh); |
1796 | return 0; |
1797 | } |
1798 | |
1799 | /* |
1800 | * NOTE: This function can be called against unindexed directories, |
1801 | * and indexed ones. |
1802 | */ |
1803 | static int ocfs2_dir_foreach_blk_el(struct inode *inode, |
1804 | u64 *f_version, |
1805 | struct dir_context *ctx, |
1806 | bool persist) |
1807 | { |
1808 | unsigned long offset, blk, last_ra_blk = 0; |
1809 | int i; |
1810 | struct buffer_head * bh, * tmp; |
1811 | struct ocfs2_dir_entry * de; |
1812 | struct super_block * sb = inode->i_sb; |
1813 | unsigned int ra_sectors = 16; |
1814 | int stored = 0; |
1815 | |
1816 | bh = NULL; |
1817 | |
1818 | offset = ctx->pos & (sb->s_blocksize - 1); |
1819 | |
1820 | while (ctx->pos < i_size_read(inode)) { |
1821 | blk = ctx->pos >> sb->s_blocksize_bits; |
1822 | if (ocfs2_read_dir_block(inode, v_block: blk, bh: &bh, flags: 0)) { |
1823 | /* Skip the corrupt dirblock and keep trying */ |
1824 | ctx->pos += sb->s_blocksize - offset; |
1825 | continue; |
1826 | } |
1827 | |
1828 | /* The idea here is to begin with 8k read-ahead and to stay |
1829 | * 4k ahead of our current position. |
1830 | * |
1831 | * TODO: Use the pagecache for this. We just need to |
1832 | * make sure it's cluster-safe... */ |
1833 | if (!last_ra_blk |
1834 | || (((last_ra_blk - blk) << 9) <= (ra_sectors / 2))) { |
1835 | for (i = ra_sectors >> (sb->s_blocksize_bits - 9); |
1836 | i > 0; i--) { |
1837 | tmp = NULL; |
1838 | if (!ocfs2_read_dir_block(inode, v_block: ++blk, bh: &tmp, |
1839 | OCFS2_BH_READAHEAD)) |
1840 | brelse(bh: tmp); |
1841 | } |
1842 | last_ra_blk = blk; |
1843 | ra_sectors = 8; |
1844 | } |
1845 | |
1846 | /* If the dir block has changed since the last call to |
1847 | * readdir(2), then we might be pointing to an invalid |
1848 | * dirent right now. Scan from the start of the block |
1849 | * to make sure. */ |
1850 | if (!inode_eq_iversion(inode, old: *f_version)) { |
1851 | for (i = 0; i < sb->s_blocksize && i < offset; ) { |
1852 | de = (struct ocfs2_dir_entry *) (bh->b_data + i); |
1853 | /* It's too expensive to do a full |
1854 | * dirent test each time round this |
1855 | * loop, but we do have to test at |
1856 | * least that it is non-zero. A |
1857 | * failure will be detected in the |
1858 | * dirent test below. */ |
1859 | if (le16_to_cpu(de->rec_len) < |
1860 | OCFS2_DIR_REC_LEN(1)) |
1861 | break; |
1862 | i += le16_to_cpu(de->rec_len); |
1863 | } |
1864 | offset = i; |
1865 | ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1)) |
1866 | | offset; |
1867 | *f_version = inode_query_iversion(inode); |
1868 | } |
1869 | |
1870 | while (ctx->pos < i_size_read(inode) |
1871 | && offset < sb->s_blocksize) { |
1872 | de = (struct ocfs2_dir_entry *) (bh->b_data + offset); |
1873 | if (!ocfs2_check_dir_entry(dir: inode, de, bh, offset)) { |
1874 | /* On error, skip the f_pos to the |
1875 | next block. */ |
1876 | ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1; |
1877 | break; |
1878 | } |
1879 | if (le64_to_cpu(de->inode)) { |
1880 | if (!dir_emit(ctx, name: de->name, |
1881 | namelen: de->name_len, |
1882 | le64_to_cpu(de->inode), |
1883 | type: fs_ftype_to_dtype(filetype: de->file_type))) { |
1884 | brelse(bh); |
1885 | return 0; |
1886 | } |
1887 | stored++; |
1888 | } |
1889 | offset += le16_to_cpu(de->rec_len); |
1890 | ctx->pos += le16_to_cpu(de->rec_len); |
1891 | } |
1892 | offset = 0; |
1893 | brelse(bh); |
1894 | bh = NULL; |
1895 | if (!persist && stored) |
1896 | break; |
1897 | } |
1898 | return 0; |
1899 | } |
1900 | |
1901 | static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version, |
1902 | struct dir_context *ctx, |
1903 | bool persist) |
1904 | { |
1905 | if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) |
1906 | return ocfs2_dir_foreach_blk_id(inode, f_version, ctx); |
1907 | return ocfs2_dir_foreach_blk_el(inode, f_version, ctx, persist); |
1908 | } |
1909 | |
1910 | /* |
1911 | * This is intended to be called from inside other kernel functions, |
1912 | * so we fake some arguments. |
1913 | */ |
1914 | int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx) |
1915 | { |
1916 | u64 version = inode_query_iversion(inode); |
1917 | ocfs2_dir_foreach_blk(inode, f_version: &version, ctx, persist: true); |
1918 | return 0; |
1919 | } |
1920 | |
1921 | /* |
1922 | * ocfs2_readdir() |
1923 | * |
1924 | */ |
1925 | int ocfs2_readdir(struct file *file, struct dir_context *ctx) |
1926 | { |
1927 | int error = 0; |
1928 | struct inode *inode = file_inode(f: file); |
1929 | int lock_level = 0; |
1930 | |
1931 | trace_ocfs2_readdir(num: (unsigned long long)OCFS2_I(inode)->ip_blkno); |
1932 | |
1933 | error = ocfs2_inode_lock_atime(inode, vfsmnt: file->f_path.mnt, level: &lock_level, wait: 1); |
1934 | if (lock_level && error >= 0) { |
1935 | /* We release EX lock which used to update atime |
1936 | * and get PR lock again to reduce contention |
1937 | * on commonly accessed directories. */ |
1938 | ocfs2_inode_unlock(inode, ex: 1); |
1939 | lock_level = 0; |
1940 | error = ocfs2_inode_lock(inode, NULL, 0); |
1941 | } |
1942 | if (error < 0) { |
1943 | if (error != -ENOENT) |
1944 | mlog_errno(error); |
1945 | /* we haven't got any yet, so propagate the error. */ |
1946 | goto bail_nolock; |
1947 | } |
1948 | |
1949 | error = ocfs2_dir_foreach_blk(inode, f_version: &file->f_version, ctx, persist: false); |
1950 | |
1951 | ocfs2_inode_unlock(inode, ex: lock_level); |
1952 | if (error) |
1953 | mlog_errno(error); |
1954 | |
1955 | bail_nolock: |
1956 | |
1957 | return error; |
1958 | } |
1959 | |
1960 | /* |
1961 | * NOTE: this should always be called with parent dir i_rwsem taken. |
1962 | */ |
1963 | int ocfs2_find_files_on_disk(const char *name, |
1964 | int namelen, |
1965 | u64 *blkno, |
1966 | struct inode *inode, |
1967 | struct ocfs2_dir_lookup_result *lookup) |
1968 | { |
1969 | int status = -ENOENT; |
1970 | |
1971 | trace_ocfs2_find_files_on_disk(namelen, name, blkno, |
1972 | dir: (unsigned long long)OCFS2_I(inode)->ip_blkno); |
1973 | |
1974 | status = ocfs2_find_entry(name, namelen, dir: inode, lookup); |
1975 | if (status) |
1976 | goto leave; |
1977 | |
1978 | *blkno = le64_to_cpu(lookup->dl_entry->inode); |
1979 | |
1980 | status = 0; |
1981 | leave: |
1982 | |
1983 | return status; |
1984 | } |
1985 | |
1986 | /* |
1987 | * Convenience function for callers which just want the block number |
1988 | * mapped to a name and don't require the full dirent info, etc. |
1989 | */ |
1990 | int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name, |
1991 | int namelen, u64 *blkno) |
1992 | { |
1993 | int ret; |
1994 | struct ocfs2_dir_lookup_result lookup = { NULL, }; |
1995 | |
1996 | ret = ocfs2_find_files_on_disk(name, namelen, blkno, inode: dir, lookup: &lookup); |
1997 | ocfs2_free_dir_lookup_result(res: &lookup); |
1998 | |
1999 | return ret; |
2000 | } |
2001 | |
2002 | /* Check for a name within a directory. |
2003 | * |
2004 | * Return 0 if the name does not exist |
2005 | * Return -EEXIST if the directory contains the name |
2006 | * |
2007 | * Callers should have i_rwsem + a cluster lock on dir |
2008 | */ |
2009 | int ocfs2_check_dir_for_entry(struct inode *dir, |
2010 | const char *name, |
2011 | int namelen) |
2012 | { |
2013 | int ret = 0; |
2014 | struct ocfs2_dir_lookup_result lookup = { NULL, }; |
2015 | |
2016 | trace_ocfs2_check_dir_for_entry( |
2017 | dir: (unsigned long long)OCFS2_I(inode: dir)->ip_blkno, namelen, name); |
2018 | |
2019 | if (ocfs2_find_entry(name, namelen, dir, lookup: &lookup) == 0) { |
2020 | ret = -EEXIST; |
2021 | mlog_errno(ret); |
2022 | } |
2023 | |
2024 | ocfs2_free_dir_lookup_result(res: &lookup); |
2025 | |
2026 | return ret; |
2027 | } |
2028 | |
2029 | struct ocfs2_empty_dir_priv { |
2030 | struct dir_context ctx; |
2031 | unsigned seen_dot; |
2032 | unsigned seen_dot_dot; |
2033 | unsigned seen_other; |
2034 | unsigned dx_dir; |
2035 | }; |
2036 | static bool ocfs2_empty_dir_filldir(struct dir_context *ctx, const char *name, |
2037 | int name_len, loff_t pos, u64 ino, |
2038 | unsigned type) |
2039 | { |
2040 | struct ocfs2_empty_dir_priv *p = |
2041 | container_of(ctx, struct ocfs2_empty_dir_priv, ctx); |
2042 | |
2043 | /* |
2044 | * Check the positions of "." and ".." records to be sure |
2045 | * they're in the correct place. |
2046 | * |
2047 | * Indexed directories don't need to proceed past the first |
2048 | * two entries, so we end the scan after seeing '..'. Despite |
2049 | * that, we allow the scan to proceed In the event that we |
2050 | * have a corrupted indexed directory (no dot or dot dot |
2051 | * entries). This allows us to double check for existing |
2052 | * entries which might not have been found in the index. |
2053 | */ |
2054 | if (name_len == 1 && !strncmp("." , name, 1) && pos == 0) { |
2055 | p->seen_dot = 1; |
2056 | return true; |
2057 | } |
2058 | |
2059 | if (name_len == 2 && !strncmp(".." , name, 2) && |
2060 | pos == OCFS2_DIR_REC_LEN(1)) { |
2061 | p->seen_dot_dot = 1; |
2062 | |
2063 | if (p->dx_dir && p->seen_dot) |
2064 | return false; |
2065 | |
2066 | return true; |
2067 | } |
2068 | |
2069 | p->seen_other = 1; |
2070 | return false; |
2071 | } |
2072 | |
2073 | static int ocfs2_empty_dir_dx(struct inode *inode, |
2074 | struct ocfs2_empty_dir_priv *priv) |
2075 | { |
2076 | int ret; |
2077 | struct buffer_head *di_bh = NULL; |
2078 | struct buffer_head *dx_root_bh = NULL; |
2079 | struct ocfs2_dinode *di; |
2080 | struct ocfs2_dx_root_block *dx_root; |
2081 | |
2082 | priv->dx_dir = 1; |
2083 | |
2084 | ret = ocfs2_read_inode_block(inode, bh: &di_bh); |
2085 | if (ret) { |
2086 | mlog_errno(ret); |
2087 | goto out; |
2088 | } |
2089 | di = (struct ocfs2_dinode *)di_bh->b_data; |
2090 | |
2091 | ret = ocfs2_read_dx_root(dir: inode, di, dx_root_bh: &dx_root_bh); |
2092 | if (ret) { |
2093 | mlog_errno(ret); |
2094 | goto out; |
2095 | } |
2096 | dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; |
2097 | |
2098 | if (le32_to_cpu(dx_root->dr_num_entries) != 2) |
2099 | priv->seen_other = 1; |
2100 | |
2101 | out: |
2102 | brelse(bh: di_bh); |
2103 | brelse(bh: dx_root_bh); |
2104 | return ret; |
2105 | } |
2106 | |
2107 | /* |
2108 | * routine to check that the specified directory is empty (for rmdir) |
2109 | * |
2110 | * Returns 1 if dir is empty, zero otherwise. |
2111 | * |
2112 | * XXX: This is a performance problem for unindexed directories. |
2113 | */ |
2114 | int ocfs2_empty_dir(struct inode *inode) |
2115 | { |
2116 | int ret; |
2117 | struct ocfs2_empty_dir_priv priv = { |
2118 | .ctx.actor = ocfs2_empty_dir_filldir, |
2119 | }; |
2120 | |
2121 | if (ocfs2_dir_indexed(inode)) { |
2122 | ret = ocfs2_empty_dir_dx(inode, priv: &priv); |
2123 | if (ret) |
2124 | mlog_errno(ret); |
2125 | /* |
2126 | * We still run ocfs2_dir_foreach to get the checks |
2127 | * for "." and "..". |
2128 | */ |
2129 | } |
2130 | |
2131 | ret = ocfs2_dir_foreach(inode, ctx: &priv.ctx); |
2132 | if (ret) |
2133 | mlog_errno(ret); |
2134 | |
2135 | if (!priv.seen_dot || !priv.seen_dot_dot) { |
2136 | mlog(ML_ERROR, "bad directory (dir #%llu) - no `.' or `..'\n" , |
2137 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
2138 | /* |
2139 | * XXX: Is it really safe to allow an unlink to continue? |
2140 | */ |
2141 | return 1; |
2142 | } |
2143 | |
2144 | return !priv.seen_other; |
2145 | } |
2146 | |
2147 | /* |
2148 | * Fills "." and ".." dirents in a new directory block. Returns dirent for |
2149 | * "..", which might be used during creation of a directory with a trailing |
2150 | * header. It is otherwise safe to ignore the return code. |
2151 | */ |
2152 | static struct ocfs2_dir_entry *ocfs2_fill_initial_dirents(struct inode *inode, |
2153 | struct inode *parent, |
2154 | char *start, |
2155 | unsigned int size) |
2156 | { |
2157 | struct ocfs2_dir_entry *de = (struct ocfs2_dir_entry *)start; |
2158 | |
2159 | de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno); |
2160 | de->name_len = 1; |
2161 | de->rec_len = |
2162 | cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len)); |
2163 | strcpy(p: de->name, q: "." ); |
2164 | ocfs2_set_de_type(de, S_IFDIR); |
2165 | |
2166 | de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len)); |
2167 | de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno); |
2168 | de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1)); |
2169 | de->name_len = 2; |
2170 | strcpy(p: de->name, q: ".." ); |
2171 | ocfs2_set_de_type(de, S_IFDIR); |
2172 | |
2173 | return de; |
2174 | } |
2175 | |
2176 | /* |
2177 | * This works together with code in ocfs2_mknod_locked() which sets |
2178 | * the inline-data flag and initializes the inline-data section. |
2179 | */ |
2180 | static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb, |
2181 | handle_t *handle, |
2182 | struct inode *parent, |
2183 | struct inode *inode, |
2184 | struct buffer_head *di_bh) |
2185 | { |
2186 | int ret; |
2187 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; |
2188 | struct ocfs2_inline_data *data = &di->id2.i_data; |
2189 | unsigned int size = le16_to_cpu(data->id_count); |
2190 | |
2191 | ret = ocfs2_journal_access_di(handle, ci: INODE_CACHE(inode), bh: di_bh, |
2192 | OCFS2_JOURNAL_ACCESS_WRITE); |
2193 | if (ret) { |
2194 | mlog_errno(ret); |
2195 | goto out; |
2196 | } |
2197 | |
2198 | ocfs2_fill_initial_dirents(inode, parent, start: data->id_data, size); |
2199 | ocfs2_journal_dirty(handle, bh: di_bh); |
2200 | |
2201 | i_size_write(inode, i_size: size); |
2202 | set_nlink(inode, nlink: 2); |
2203 | inode->i_blocks = ocfs2_inode_sector_count(inode); |
2204 | |
2205 | ret = ocfs2_mark_inode_dirty(handle, inode, bh: di_bh); |
2206 | if (ret < 0) |
2207 | mlog_errno(ret); |
2208 | |
2209 | out: |
2210 | return ret; |
2211 | } |
2212 | |
2213 | static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb, |
2214 | handle_t *handle, |
2215 | struct inode *parent, |
2216 | struct inode *inode, |
2217 | struct buffer_head *fe_bh, |
2218 | struct ocfs2_alloc_context *data_ac, |
2219 | struct buffer_head **ret_new_bh) |
2220 | { |
2221 | int status; |
2222 | unsigned int size = osb->sb->s_blocksize; |
2223 | struct buffer_head *new_bh = NULL; |
2224 | struct ocfs2_dir_entry *de; |
2225 | |
2226 | if (ocfs2_new_dir_wants_trailer(dir: inode)) |
2227 | size = ocfs2_dir_trailer_blk_off(sb: parent->i_sb); |
2228 | |
2229 | status = ocfs2_do_extend_dir(sb: osb->sb, handle, dir: inode, parent_fe_bh: fe_bh, |
2230 | data_ac, NULL, new_bh: &new_bh); |
2231 | if (status < 0) { |
2232 | mlog_errno(status); |
2233 | goto bail; |
2234 | } |
2235 | |
2236 | ocfs2_set_new_buffer_uptodate(ci: INODE_CACHE(inode), bh: new_bh); |
2237 | |
2238 | status = ocfs2_journal_access_db(handle, ci: INODE_CACHE(inode), bh: new_bh, |
2239 | OCFS2_JOURNAL_ACCESS_CREATE); |
2240 | if (status < 0) { |
2241 | mlog_errno(status); |
2242 | goto bail; |
2243 | } |
2244 | memset(new_bh->b_data, 0, osb->sb->s_blocksize); |
2245 | |
2246 | de = ocfs2_fill_initial_dirents(inode, parent, start: new_bh->b_data, size); |
2247 | if (ocfs2_new_dir_wants_trailer(dir: inode)) { |
2248 | int size = le16_to_cpu(de->rec_len); |
2249 | |
2250 | /* |
2251 | * Figure out the size of the hole left over after |
2252 | * insertion of '.' and '..'. The trailer wants this |
2253 | * information. |
2254 | */ |
2255 | size -= OCFS2_DIR_REC_LEN(2); |
2256 | size -= sizeof(struct ocfs2_dir_block_trailer); |
2257 | |
2258 | ocfs2_init_dir_trailer(inode, bh: new_bh, rec_len: size); |
2259 | } |
2260 | |
2261 | ocfs2_journal_dirty(handle, bh: new_bh); |
2262 | |
2263 | i_size_write(inode, i_size: inode->i_sb->s_blocksize); |
2264 | set_nlink(inode, nlink: 2); |
2265 | inode->i_blocks = ocfs2_inode_sector_count(inode); |
2266 | status = ocfs2_mark_inode_dirty(handle, inode, bh: fe_bh); |
2267 | if (status < 0) { |
2268 | mlog_errno(status); |
2269 | goto bail; |
2270 | } |
2271 | |
2272 | status = 0; |
2273 | if (ret_new_bh) { |
2274 | *ret_new_bh = new_bh; |
2275 | new_bh = NULL; |
2276 | } |
2277 | bail: |
2278 | brelse(bh: new_bh); |
2279 | |
2280 | return status; |
2281 | } |
2282 | |
2283 | static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb, |
2284 | handle_t *handle, struct inode *dir, |
2285 | struct buffer_head *di_bh, |
2286 | struct buffer_head *dirdata_bh, |
2287 | struct ocfs2_alloc_context *meta_ac, |
2288 | int dx_inline, u32 num_entries, |
2289 | struct buffer_head **ret_dx_root_bh) |
2290 | { |
2291 | int ret; |
2292 | struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; |
2293 | u16 dr_suballoc_bit; |
2294 | u64 suballoc_loc, dr_blkno; |
2295 | unsigned int num_bits; |
2296 | struct buffer_head *dx_root_bh = NULL; |
2297 | struct ocfs2_dx_root_block *dx_root; |
2298 | struct ocfs2_dir_block_trailer *trailer = |
2299 | ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb); |
2300 | |
2301 | ret = ocfs2_claim_metadata(handle, ac: meta_ac, bits_wanted: 1, suballoc_loc: &suballoc_loc, |
2302 | suballoc_bit_start: &dr_suballoc_bit, num_bits: &num_bits, blkno_start: &dr_blkno); |
2303 | if (ret) { |
2304 | mlog_errno(ret); |
2305 | goto out; |
2306 | } |
2307 | |
2308 | trace_ocfs2_dx_dir_attach_index( |
2309 | val1: (unsigned long long)OCFS2_I(inode: dir)->ip_blkno, |
2310 | val2: (unsigned long long)dr_blkno); |
2311 | |
2312 | dx_root_bh = sb_getblk(sb: osb->sb, block: dr_blkno); |
2313 | if (dx_root_bh == NULL) { |
2314 | ret = -ENOMEM; |
2315 | goto out; |
2316 | } |
2317 | ocfs2_set_new_buffer_uptodate(ci: INODE_CACHE(inode: dir), bh: dx_root_bh); |
2318 | |
2319 | ret = ocfs2_journal_access_dr(handle, ci: INODE_CACHE(inode: dir), bh: dx_root_bh, |
2320 | OCFS2_JOURNAL_ACCESS_CREATE); |
2321 | if (ret < 0) { |
2322 | mlog_errno(ret); |
2323 | goto out; |
2324 | } |
2325 | |
2326 | dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; |
2327 | memset(dx_root, 0, osb->sb->s_blocksize); |
2328 | strcpy(p: dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE); |
2329 | dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); |
2330 | dx_root->dr_suballoc_loc = cpu_to_le64(suballoc_loc); |
2331 | dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit); |
2332 | dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation); |
2333 | dx_root->dr_blkno = cpu_to_le64(dr_blkno); |
2334 | dx_root->dr_dir_blkno = cpu_to_le64(OCFS2_I(dir)->ip_blkno); |
2335 | dx_root->dr_num_entries = cpu_to_le32(num_entries); |
2336 | if (le16_to_cpu(trailer->db_free_rec_len)) |
2337 | dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr); |
2338 | else |
2339 | dx_root->dr_free_blk = cpu_to_le64(0); |
2340 | |
2341 | if (dx_inline) { |
2342 | dx_root->dr_flags |= OCFS2_DX_FLAG_INLINE; |
2343 | dx_root->dr_entries.de_count = |
2344 | cpu_to_le16(ocfs2_dx_entries_per_root(osb->sb)); |
2345 | } else { |
2346 | dx_root->dr_list.l_count = |
2347 | cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb)); |
2348 | } |
2349 | ocfs2_journal_dirty(handle, bh: dx_root_bh); |
2350 | |
2351 | ret = ocfs2_journal_access_di(handle, ci: INODE_CACHE(inode: dir), bh: di_bh, |
2352 | OCFS2_JOURNAL_ACCESS_CREATE); |
2353 | if (ret) { |
2354 | mlog_errno(ret); |
2355 | goto out; |
2356 | } |
2357 | |
2358 | di->i_dx_root = cpu_to_le64(dr_blkno); |
2359 | |
2360 | spin_lock(lock: &OCFS2_I(inode: dir)->ip_lock); |
2361 | OCFS2_I(inode: dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL; |
2362 | di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features); |
2363 | spin_unlock(lock: &OCFS2_I(inode: dir)->ip_lock); |
2364 | |
2365 | ocfs2_journal_dirty(handle, bh: di_bh); |
2366 | |
2367 | *ret_dx_root_bh = dx_root_bh; |
2368 | dx_root_bh = NULL; |
2369 | |
2370 | out: |
2371 | brelse(bh: dx_root_bh); |
2372 | return ret; |
2373 | } |
2374 | |
2375 | static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb, |
2376 | handle_t *handle, struct inode *dir, |
2377 | struct buffer_head **dx_leaves, |
2378 | int num_dx_leaves, u64 start_blk) |
2379 | { |
2380 | int ret, i; |
2381 | struct ocfs2_dx_leaf *dx_leaf; |
2382 | struct buffer_head *bh; |
2383 | |
2384 | for (i = 0; i < num_dx_leaves; i++) { |
2385 | bh = sb_getblk(sb: osb->sb, block: start_blk + i); |
2386 | if (bh == NULL) { |
2387 | ret = -ENOMEM; |
2388 | goto out; |
2389 | } |
2390 | dx_leaves[i] = bh; |
2391 | |
2392 | ocfs2_set_new_buffer_uptodate(ci: INODE_CACHE(inode: dir), bh); |
2393 | |
2394 | ret = ocfs2_journal_access_dl(handle, ci: INODE_CACHE(inode: dir), bh, |
2395 | OCFS2_JOURNAL_ACCESS_CREATE); |
2396 | if (ret < 0) { |
2397 | mlog_errno(ret); |
2398 | goto out; |
2399 | } |
2400 | |
2401 | dx_leaf = (struct ocfs2_dx_leaf *) bh->b_data; |
2402 | |
2403 | memset(dx_leaf, 0, osb->sb->s_blocksize); |
2404 | strcpy(p: dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE); |
2405 | dx_leaf->dl_fs_generation = cpu_to_le32(osb->fs_generation); |
2406 | dx_leaf->dl_blkno = cpu_to_le64(bh->b_blocknr); |
2407 | dx_leaf->dl_list.de_count = |
2408 | cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb)); |
2409 | |
2410 | trace_ocfs2_dx_dir_format_cluster( |
2411 | val1: (unsigned long long)OCFS2_I(inode: dir)->ip_blkno, |
2412 | val2: (unsigned long long)bh->b_blocknr, |
2413 | le16_to_cpu(dx_leaf->dl_list.de_count)); |
2414 | |
2415 | ocfs2_journal_dirty(handle, bh); |
2416 | } |
2417 | |
2418 | ret = 0; |
2419 | out: |
2420 | return ret; |
2421 | } |
2422 | |
2423 | /* |
2424 | * Allocates and formats a new cluster for use in an indexed dir |
2425 | * leaf. This version will not do the extent insert, so that it can be |
2426 | * used by operations which need careful ordering. |
2427 | */ |
2428 | static int __ocfs2_dx_dir_new_cluster(struct inode *dir, |
2429 | u32 cpos, handle_t *handle, |
2430 | struct ocfs2_alloc_context *data_ac, |
2431 | struct buffer_head **dx_leaves, |
2432 | int num_dx_leaves, u64 *ret_phys_blkno) |
2433 | { |
2434 | int ret; |
2435 | u32 phys, num; |
2436 | u64 phys_blkno; |
2437 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
2438 | |
2439 | /* |
2440 | * XXX: For create, this should claim cluster for the index |
2441 | * *before* the unindexed insert so that we have a better |
2442 | * chance of contiguousness as the directory grows in number |
2443 | * of entries. |
2444 | */ |
2445 | ret = __ocfs2_claim_clusters(handle, ac: data_ac, min_clusters: 1, max_clusters: 1, cluster_start: &phys, num_clusters: &num); |
2446 | if (ret) { |
2447 | mlog_errno(ret); |
2448 | goto out; |
2449 | } |
2450 | |
2451 | /* |
2452 | * Format the new cluster first. That way, we're inserting |
2453 | * valid data. |
2454 | */ |
2455 | phys_blkno = ocfs2_clusters_to_blocks(sb: osb->sb, clusters: phys); |
2456 | ret = ocfs2_dx_dir_format_cluster(osb, handle, dir, dx_leaves, |
2457 | num_dx_leaves, start_blk: phys_blkno); |
2458 | if (ret) { |
2459 | mlog_errno(ret); |
2460 | goto out; |
2461 | } |
2462 | |
2463 | *ret_phys_blkno = phys_blkno; |
2464 | out: |
2465 | return ret; |
2466 | } |
2467 | |
2468 | static int ocfs2_dx_dir_new_cluster(struct inode *dir, |
2469 | struct ocfs2_extent_tree *et, |
2470 | u32 cpos, handle_t *handle, |
2471 | struct ocfs2_alloc_context *data_ac, |
2472 | struct ocfs2_alloc_context *meta_ac, |
2473 | struct buffer_head **dx_leaves, |
2474 | int num_dx_leaves) |
2475 | { |
2476 | int ret; |
2477 | u64 phys_blkno; |
2478 | |
2479 | ret = __ocfs2_dx_dir_new_cluster(dir, cpos, handle, data_ac, dx_leaves, |
2480 | num_dx_leaves, ret_phys_blkno: &phys_blkno); |
2481 | if (ret) { |
2482 | mlog_errno(ret); |
2483 | goto out; |
2484 | } |
2485 | |
2486 | ret = ocfs2_insert_extent(handle, et, cpos, start_blk: phys_blkno, new_clusters: 1, flags: 0, |
2487 | meta_ac); |
2488 | if (ret) |
2489 | mlog_errno(ret); |
2490 | out: |
2491 | return ret; |
2492 | } |
2493 | |
2494 | static struct buffer_head **ocfs2_dx_dir_kmalloc_leaves(struct super_block *sb, |
2495 | int *ret_num_leaves) |
2496 | { |
2497 | int num_dx_leaves = ocfs2_clusters_to_blocks(sb, clusters: 1); |
2498 | struct buffer_head **dx_leaves; |
2499 | |
2500 | dx_leaves = kcalloc(n: num_dx_leaves, size: sizeof(struct buffer_head *), |
2501 | GFP_NOFS); |
2502 | if (dx_leaves && ret_num_leaves) |
2503 | *ret_num_leaves = num_dx_leaves; |
2504 | |
2505 | return dx_leaves; |
2506 | } |
2507 | |
2508 | static int ocfs2_fill_new_dir_dx(struct ocfs2_super *osb, |
2509 | handle_t *handle, |
2510 | struct inode *parent, |
2511 | struct inode *inode, |
2512 | struct buffer_head *di_bh, |
2513 | struct ocfs2_alloc_context *data_ac, |
2514 | struct ocfs2_alloc_context *meta_ac) |
2515 | { |
2516 | int ret; |
2517 | struct buffer_head *leaf_bh = NULL; |
2518 | struct buffer_head *dx_root_bh = NULL; |
2519 | struct ocfs2_dx_hinfo hinfo; |
2520 | struct ocfs2_dx_root_block *dx_root; |
2521 | struct ocfs2_dx_entry_list *entry_list; |
2522 | |
2523 | /* |
2524 | * Our strategy is to create the directory as though it were |
2525 | * unindexed, then add the index block. This works with very |
2526 | * little complication since the state of a new directory is a |
2527 | * very well known quantity. |
2528 | * |
2529 | * Essentially, we have two dirents ("." and ".."), in the 1st |
2530 | * block which need indexing. These are easily inserted into |
2531 | * the index block. |
2532 | */ |
2533 | |
2534 | ret = ocfs2_fill_new_dir_el(osb, handle, parent, inode, fe_bh: di_bh, |
2535 | data_ac, ret_new_bh: &leaf_bh); |
2536 | if (ret) { |
2537 | mlog_errno(ret); |
2538 | goto out; |
2539 | } |
2540 | |
2541 | ret = ocfs2_dx_dir_attach_index(osb, handle, dir: inode, di_bh, dirdata_bh: leaf_bh, |
2542 | meta_ac, dx_inline: 1, num_entries: 2, ret_dx_root_bh: &dx_root_bh); |
2543 | if (ret) { |
2544 | mlog_errno(ret); |
2545 | goto out; |
2546 | } |
2547 | dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; |
2548 | entry_list = &dx_root->dr_entries; |
2549 | |
2550 | /* Buffer has been journaled for us by ocfs2_dx_dir_attach_index */ |
2551 | ocfs2_dx_dir_name_hash(dir: inode, name: "." , len: 1, hinfo: &hinfo); |
2552 | ocfs2_dx_entry_list_insert(entry_list, hinfo: &hinfo, dirent_blk: leaf_bh->b_blocknr); |
2553 | |
2554 | ocfs2_dx_dir_name_hash(dir: inode, name: ".." , len: 2, hinfo: &hinfo); |
2555 | ocfs2_dx_entry_list_insert(entry_list, hinfo: &hinfo, dirent_blk: leaf_bh->b_blocknr); |
2556 | |
2557 | out: |
2558 | brelse(bh: dx_root_bh); |
2559 | brelse(bh: leaf_bh); |
2560 | return ret; |
2561 | } |
2562 | |
2563 | int ocfs2_fill_new_dir(struct ocfs2_super *osb, |
2564 | handle_t *handle, |
2565 | struct inode *parent, |
2566 | struct inode *inode, |
2567 | struct buffer_head *fe_bh, |
2568 | struct ocfs2_alloc_context *data_ac, |
2569 | struct ocfs2_alloc_context *meta_ac) |
2570 | |
2571 | { |
2572 | BUG_ON(!ocfs2_supports_inline_data(osb) && data_ac == NULL); |
2573 | |
2574 | if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) |
2575 | return ocfs2_fill_new_dir_id(osb, handle, parent, inode, di_bh: fe_bh); |
2576 | |
2577 | if (ocfs2_supports_indexed_dirs(osb)) |
2578 | return ocfs2_fill_new_dir_dx(osb, handle, parent, inode, di_bh: fe_bh, |
2579 | data_ac, meta_ac); |
2580 | |
2581 | return ocfs2_fill_new_dir_el(osb, handle, parent, inode, fe_bh, |
2582 | data_ac, NULL); |
2583 | } |
2584 | |
2585 | static int ocfs2_dx_dir_index_block(struct inode *dir, |
2586 | handle_t *handle, |
2587 | struct buffer_head **dx_leaves, |
2588 | int num_dx_leaves, |
2589 | u32 *num_dx_entries, |
2590 | struct buffer_head *dirent_bh) |
2591 | { |
2592 | int ret = 0, namelen, i; |
2593 | char *de_buf, *limit; |
2594 | struct ocfs2_dir_entry *de; |
2595 | struct buffer_head *dx_leaf_bh; |
2596 | struct ocfs2_dx_hinfo hinfo; |
2597 | u64 dirent_blk = dirent_bh->b_blocknr; |
2598 | |
2599 | de_buf = dirent_bh->b_data; |
2600 | limit = de_buf + dir->i_sb->s_blocksize; |
2601 | |
2602 | while (de_buf < limit) { |
2603 | de = (struct ocfs2_dir_entry *)de_buf; |
2604 | |
2605 | namelen = de->name_len; |
2606 | if (!namelen || !de->inode) |
2607 | goto inc; |
2608 | |
2609 | ocfs2_dx_dir_name_hash(dir, name: de->name, len: namelen, hinfo: &hinfo); |
2610 | |
2611 | i = ocfs2_dx_dir_hash_idx(OCFS2_SB(dir->i_sb), hinfo: &hinfo); |
2612 | dx_leaf_bh = dx_leaves[i]; |
2613 | |
2614 | ret = __ocfs2_dx_dir_leaf_insert(dir, handle, hinfo: &hinfo, |
2615 | dirent_blk, dx_leaf_bh); |
2616 | if (ret) { |
2617 | mlog_errno(ret); |
2618 | goto out; |
2619 | } |
2620 | |
2621 | *num_dx_entries = *num_dx_entries + 1; |
2622 | |
2623 | inc: |
2624 | de_buf += le16_to_cpu(de->rec_len); |
2625 | } |
2626 | |
2627 | out: |
2628 | return ret; |
2629 | } |
2630 | |
2631 | /* |
2632 | * XXX: This expects dx_root_bh to already be part of the transaction. |
2633 | */ |
2634 | static void ocfs2_dx_dir_index_root_block(struct inode *dir, |
2635 | struct buffer_head *dx_root_bh, |
2636 | struct buffer_head *dirent_bh) |
2637 | { |
2638 | char *de_buf, *limit; |
2639 | struct ocfs2_dx_root_block *dx_root; |
2640 | struct ocfs2_dir_entry *de; |
2641 | struct ocfs2_dx_hinfo hinfo; |
2642 | u64 dirent_blk = dirent_bh->b_blocknr; |
2643 | |
2644 | dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; |
2645 | |
2646 | de_buf = dirent_bh->b_data; |
2647 | limit = de_buf + dir->i_sb->s_blocksize; |
2648 | |
2649 | while (de_buf < limit) { |
2650 | de = (struct ocfs2_dir_entry *)de_buf; |
2651 | |
2652 | if (!de->name_len || !de->inode) |
2653 | goto inc; |
2654 | |
2655 | ocfs2_dx_dir_name_hash(dir, name: de->name, len: de->name_len, hinfo: &hinfo); |
2656 | |
2657 | trace_ocfs2_dx_dir_index_root_block( |
2658 | dir: (unsigned long long)dir->i_ino, |
2659 | major_hash: hinfo.major_hash, minor_hash: hinfo.minor_hash, |
2660 | namelen: de->name_len, name: de->name, |
2661 | le16_to_cpu(dx_root->dr_entries.de_num_used)); |
2662 | |
2663 | ocfs2_dx_entry_list_insert(entry_list: &dx_root->dr_entries, hinfo: &hinfo, |
2664 | dirent_blk); |
2665 | |
2666 | le32_add_cpu(var: &dx_root->dr_num_entries, val: 1); |
2667 | inc: |
2668 | de_buf += le16_to_cpu(de->rec_len); |
2669 | } |
2670 | } |
2671 | |
2672 | /* |
2673 | * Count the number of inline directory entries in di_bh and compare |
2674 | * them against the number of entries we can hold in an inline dx root |
2675 | * block. |
2676 | */ |
2677 | static int ocfs2_new_dx_should_be_inline(struct inode *dir, |
2678 | struct buffer_head *di_bh) |
2679 | { |
2680 | int dirent_count = 0; |
2681 | char *de_buf, *limit; |
2682 | struct ocfs2_dir_entry *de; |
2683 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; |
2684 | |
2685 | de_buf = di->id2.i_data.id_data; |
2686 | limit = de_buf + i_size_read(inode: dir); |
2687 | |
2688 | while (de_buf < limit) { |
2689 | de = (struct ocfs2_dir_entry *)de_buf; |
2690 | |
2691 | if (de->name_len && de->inode) |
2692 | dirent_count++; |
2693 | |
2694 | de_buf += le16_to_cpu(de->rec_len); |
2695 | } |
2696 | |
2697 | /* We are careful to leave room for one extra record. */ |
2698 | return dirent_count < ocfs2_dx_entries_per_root(sb: dir->i_sb); |
2699 | } |
2700 | |
2701 | /* |
2702 | * Expand rec_len of the rightmost dirent in a directory block so that it |
2703 | * contains the end of our valid space for dirents. We do this during |
2704 | * expansion from an inline directory to one with extents. The first dir block |
2705 | * in that case is taken from the inline data portion of the inode block. |
2706 | * |
2707 | * This will also return the largest amount of contiguous space for a dirent |
2708 | * in the block. That value is *not* necessarily the last dirent, even after |
2709 | * expansion. The directory indexing code wants this value for free space |
2710 | * accounting. We do this here since we're already walking the entire dir |
2711 | * block. |
2712 | * |
2713 | * We add the dir trailer if this filesystem wants it. |
2714 | */ |
2715 | static unsigned int ocfs2_expand_last_dirent(char *start, unsigned int old_size, |
2716 | struct inode *dir) |
2717 | { |
2718 | struct super_block *sb = dir->i_sb; |
2719 | struct ocfs2_dir_entry *de; |
2720 | struct ocfs2_dir_entry *prev_de; |
2721 | char *de_buf, *limit; |
2722 | unsigned int new_size = sb->s_blocksize; |
2723 | unsigned int bytes, this_hole; |
2724 | unsigned int largest_hole = 0; |
2725 | |
2726 | if (ocfs2_new_dir_wants_trailer(dir)) |
2727 | new_size = ocfs2_dir_trailer_blk_off(sb); |
2728 | |
2729 | bytes = new_size - old_size; |
2730 | |
2731 | limit = start + old_size; |
2732 | de_buf = start; |
2733 | de = (struct ocfs2_dir_entry *)de_buf; |
2734 | do { |
2735 | this_hole = ocfs2_figure_dirent_hole(de); |
2736 | if (this_hole > largest_hole) |
2737 | largest_hole = this_hole; |
2738 | |
2739 | prev_de = de; |
2740 | de_buf += le16_to_cpu(de->rec_len); |
2741 | de = (struct ocfs2_dir_entry *)de_buf; |
2742 | } while (de_buf < limit); |
2743 | |
2744 | le16_add_cpu(var: &prev_de->rec_len, val: bytes); |
2745 | |
2746 | /* We need to double check this after modification of the final |
2747 | * dirent. */ |
2748 | this_hole = ocfs2_figure_dirent_hole(de: prev_de); |
2749 | if (this_hole > largest_hole) |
2750 | largest_hole = this_hole; |
2751 | |
2752 | if (largest_hole >= OCFS2_DIR_MIN_REC_LEN) |
2753 | return largest_hole; |
2754 | return 0; |
2755 | } |
2756 | |
2757 | /* |
2758 | * We allocate enough clusters to fulfill "blocks_wanted", but set |
2759 | * i_size to exactly one block. Ocfs2_extend_dir() will handle the |
2760 | * rest automatically for us. |
2761 | * |
2762 | * *first_block_bh is a pointer to the 1st data block allocated to the |
2763 | * directory. |
2764 | */ |
2765 | static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, |
2766 | unsigned int blocks_wanted, |
2767 | struct ocfs2_dir_lookup_result *lookup, |
2768 | struct buffer_head **first_block_bh) |
2769 | { |
2770 | u32 alloc, dx_alloc, bit_off, len, num_dx_entries = 0; |
2771 | struct super_block *sb = dir->i_sb; |
2772 | int ret, i, num_dx_leaves = 0, dx_inline = 0, |
2773 | credits = ocfs2_inline_to_extents_credits(sb); |
2774 | u64 dx_insert_blkno, blkno, |
2775 | bytes = blocks_wanted << sb->s_blocksize_bits; |
2776 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
2777 | struct ocfs2_inode_info *oi = OCFS2_I(inode: dir); |
2778 | struct ocfs2_alloc_context *data_ac = NULL; |
2779 | struct ocfs2_alloc_context *meta_ac = NULL; |
2780 | struct buffer_head *dirdata_bh = NULL; |
2781 | struct buffer_head *dx_root_bh = NULL; |
2782 | struct buffer_head **dx_leaves = NULL; |
2783 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; |
2784 | handle_t *handle; |
2785 | struct ocfs2_extent_tree et; |
2786 | struct ocfs2_extent_tree dx_et; |
2787 | int did_quota = 0, bytes_allocated = 0; |
2788 | |
2789 | ocfs2_init_dinode_extent_tree(et: &et, ci: INODE_CACHE(inode: dir), bh: di_bh); |
2790 | |
2791 | alloc = ocfs2_clusters_for_bytes(sb, bytes); |
2792 | dx_alloc = 0; |
2793 | |
2794 | down_write(sem: &oi->ip_alloc_sem); |
2795 | |
2796 | if (ocfs2_supports_indexed_dirs(osb)) { |
2797 | credits += ocfs2_add_dir_index_credits(sb); |
2798 | |
2799 | dx_inline = ocfs2_new_dx_should_be_inline(dir, di_bh); |
2800 | if (!dx_inline) { |
2801 | /* Add one more cluster for an index leaf */ |
2802 | dx_alloc++; |
2803 | dx_leaves = ocfs2_dx_dir_kmalloc_leaves(sb, |
2804 | ret_num_leaves: &num_dx_leaves); |
2805 | if (!dx_leaves) { |
2806 | ret = -ENOMEM; |
2807 | mlog_errno(ret); |
2808 | goto out; |
2809 | } |
2810 | } |
2811 | |
2812 | /* This gets us the dx_root */ |
2813 | ret = ocfs2_reserve_new_metadata_blocks(osb, blocks: 1, ac: &meta_ac); |
2814 | if (ret) { |
2815 | mlog_errno(ret); |
2816 | goto out; |
2817 | } |
2818 | } |
2819 | |
2820 | /* |
2821 | * We should never need more than 2 clusters for the unindexed |
2822 | * tree - maximum dirent size is far less than one block. In |
2823 | * fact, the only time we'd need more than one cluster is if |
2824 | * blocksize == clustersize and the dirent won't fit in the |
2825 | * extra space that the expansion to a single block gives. As |
2826 | * of today, that only happens on 4k/4k file systems. |
2827 | */ |
2828 | BUG_ON(alloc > 2); |
2829 | |
2830 | ret = ocfs2_reserve_clusters(osb, bits_wanted: alloc + dx_alloc, ac: &data_ac); |
2831 | if (ret) { |
2832 | mlog_errno(ret); |
2833 | goto out; |
2834 | } |
2835 | |
2836 | /* |
2837 | * Prepare for worst case allocation scenario of two separate |
2838 | * extents in the unindexed tree. |
2839 | */ |
2840 | if (alloc == 2) |
2841 | credits += OCFS2_SUBALLOC_ALLOC; |
2842 | |
2843 | handle = ocfs2_start_trans(osb, max_buffs: credits); |
2844 | if (IS_ERR(ptr: handle)) { |
2845 | ret = PTR_ERR(ptr: handle); |
2846 | mlog_errno(ret); |
2847 | goto out; |
2848 | } |
2849 | |
2850 | ret = dquot_alloc_space_nodirty(inode: dir, |
2851 | nr: ocfs2_clusters_to_bytes(sb: osb->sb, clusters: alloc + dx_alloc)); |
2852 | if (ret) |
2853 | goto out_commit; |
2854 | did_quota = 1; |
2855 | |
2856 | if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) { |
2857 | /* |
2858 | * Allocate our index cluster first, to maximize the |
2859 | * possibility that unindexed leaves grow |
2860 | * contiguously. |
2861 | */ |
2862 | ret = __ocfs2_dx_dir_new_cluster(dir, cpos: 0, handle, data_ac, |
2863 | dx_leaves, num_dx_leaves, |
2864 | ret_phys_blkno: &dx_insert_blkno); |
2865 | if (ret) { |
2866 | mlog_errno(ret); |
2867 | goto out_commit; |
2868 | } |
2869 | bytes_allocated += ocfs2_clusters_to_bytes(sb: dir->i_sb, clusters: 1); |
2870 | } |
2871 | |
2872 | /* |
2873 | * Try to claim as many clusters as the bitmap can give though |
2874 | * if we only get one now, that's enough to continue. The rest |
2875 | * will be claimed after the conversion to extents. |
2876 | */ |
2877 | if (ocfs2_dir_resv_allowed(osb)) |
2878 | data_ac->ac_resv = &oi->ip_la_data_resv; |
2879 | ret = ocfs2_claim_clusters(handle, ac: data_ac, min_clusters: 1, cluster_start: &bit_off, num_clusters: &len); |
2880 | if (ret) { |
2881 | mlog_errno(ret); |
2882 | goto out_commit; |
2883 | } |
2884 | bytes_allocated += ocfs2_clusters_to_bytes(sb: dir->i_sb, clusters: 1); |
2885 | |
2886 | /* |
2887 | * Operations are carefully ordered so that we set up the new |
2888 | * data block first. The conversion from inline data to |
2889 | * extents follows. |
2890 | */ |
2891 | blkno = ocfs2_clusters_to_blocks(sb: dir->i_sb, clusters: bit_off); |
2892 | dirdata_bh = sb_getblk(sb, block: blkno); |
2893 | if (!dirdata_bh) { |
2894 | ret = -ENOMEM; |
2895 | mlog_errno(ret); |
2896 | goto out_commit; |
2897 | } |
2898 | |
2899 | ocfs2_set_new_buffer_uptodate(ci: INODE_CACHE(inode: dir), bh: dirdata_bh); |
2900 | |
2901 | ret = ocfs2_journal_access_db(handle, ci: INODE_CACHE(inode: dir), bh: dirdata_bh, |
2902 | OCFS2_JOURNAL_ACCESS_CREATE); |
2903 | if (ret) { |
2904 | mlog_errno(ret); |
2905 | goto out_commit; |
2906 | } |
2907 | |
2908 | memcpy(dirdata_bh->b_data, di->id2.i_data.id_data, i_size_read(dir)); |
2909 | memset(dirdata_bh->b_data + i_size_read(dir), 0, |
2910 | sb->s_blocksize - i_size_read(dir)); |
2911 | i = ocfs2_expand_last_dirent(start: dirdata_bh->b_data, old_size: i_size_read(inode: dir), dir); |
2912 | if (ocfs2_new_dir_wants_trailer(dir)) { |
2913 | /* |
2914 | * Prepare the dir trailer up front. It will otherwise look |
2915 | * like a valid dirent. Even if inserting the index fails |
2916 | * (unlikely), then all we'll have done is given first dir |
2917 | * block a small amount of fragmentation. |
2918 | */ |
2919 | ocfs2_init_dir_trailer(inode: dir, bh: dirdata_bh, rec_len: i); |
2920 | } |
2921 | |
2922 | ocfs2_update_inode_fsync_trans(handle, inode: dir, datasync: 1); |
2923 | ocfs2_journal_dirty(handle, bh: dirdata_bh); |
2924 | |
2925 | if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) { |
2926 | /* |
2927 | * Dx dirs with an external cluster need to do this up |
2928 | * front. Inline dx root's get handled later, after |
2929 | * we've allocated our root block. We get passed back |
2930 | * a total number of items so that dr_num_entries can |
2931 | * be correctly set once the dx_root has been |
2932 | * allocated. |
2933 | */ |
2934 | ret = ocfs2_dx_dir_index_block(dir, handle, dx_leaves, |
2935 | num_dx_leaves, num_dx_entries: &num_dx_entries, |
2936 | dirent_bh: dirdata_bh); |
2937 | if (ret) { |
2938 | mlog_errno(ret); |
2939 | goto out_commit; |
2940 | } |
2941 | } |
2942 | |
2943 | /* |
2944 | * Set extent, i_size, etc on the directory. After this, the |
2945 | * inode should contain the same exact dirents as before and |
2946 | * be fully accessible from system calls. |
2947 | * |
2948 | * We let the later dirent insert modify c/mtime - to the user |
2949 | * the data hasn't changed. |
2950 | */ |
2951 | ret = ocfs2_journal_access_di(handle, ci: INODE_CACHE(inode: dir), bh: di_bh, |
2952 | OCFS2_JOURNAL_ACCESS_CREATE); |
2953 | if (ret) { |
2954 | mlog_errno(ret); |
2955 | goto out_commit; |
2956 | } |
2957 | |
2958 | spin_lock(lock: &oi->ip_lock); |
2959 | oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL; |
2960 | di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); |
2961 | spin_unlock(lock: &oi->ip_lock); |
2962 | |
2963 | ocfs2_dinode_new_extent_list(inode: dir, di); |
2964 | |
2965 | i_size_write(inode: dir, i_size: sb->s_blocksize); |
2966 | inode_set_mtime_to_ts(inode: dir, ts: inode_set_ctime_current(inode: dir)); |
2967 | |
2968 | di->i_size = cpu_to_le64(sb->s_blocksize); |
2969 | di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(dir)); |
2970 | di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(dir)); |
2971 | ocfs2_update_inode_fsync_trans(handle, inode: dir, datasync: 1); |
2972 | |
2973 | /* |
2974 | * This should never fail as our extent list is empty and all |
2975 | * related blocks have been journaled already. |
2976 | */ |
2977 | ret = ocfs2_insert_extent(handle, et: &et, cpos: 0, start_blk: blkno, new_clusters: len, |
2978 | flags: 0, NULL); |
2979 | if (ret) { |
2980 | mlog_errno(ret); |
2981 | goto out_commit; |
2982 | } |
2983 | |
2984 | /* |
2985 | * Set i_blocks after the extent insert for the most up to |
2986 | * date ip_clusters value. |
2987 | */ |
2988 | dir->i_blocks = ocfs2_inode_sector_count(inode: dir); |
2989 | |
2990 | ocfs2_journal_dirty(handle, bh: di_bh); |
2991 | |
2992 | if (ocfs2_supports_indexed_dirs(osb)) { |
2993 | ret = ocfs2_dx_dir_attach_index(osb, handle, dir, di_bh, |
2994 | dirdata_bh, meta_ac, dx_inline, |
2995 | num_entries: num_dx_entries, ret_dx_root_bh: &dx_root_bh); |
2996 | if (ret) { |
2997 | mlog_errno(ret); |
2998 | goto out_commit; |
2999 | } |
3000 | |
3001 | if (dx_inline) { |
3002 | ocfs2_dx_dir_index_root_block(dir, dx_root_bh, |
3003 | dirent_bh: dirdata_bh); |
3004 | } else { |
3005 | ocfs2_init_dx_root_extent_tree(et: &dx_et, |
3006 | ci: INODE_CACHE(inode: dir), |
3007 | bh: dx_root_bh); |
3008 | ret = ocfs2_insert_extent(handle, et: &dx_et, cpos: 0, |
3009 | start_blk: dx_insert_blkno, new_clusters: 1, flags: 0, NULL); |
3010 | if (ret) |
3011 | mlog_errno(ret); |
3012 | } |
3013 | } |
3014 | |
3015 | /* |
3016 | * We asked for two clusters, but only got one in the 1st |
3017 | * pass. Claim the 2nd cluster as a separate extent. |
3018 | */ |
3019 | if (alloc > len) { |
3020 | ret = ocfs2_claim_clusters(handle, ac: data_ac, min_clusters: 1, cluster_start: &bit_off, |
3021 | num_clusters: &len); |
3022 | if (ret) { |
3023 | mlog_errno(ret); |
3024 | goto out_commit; |
3025 | } |
3026 | blkno = ocfs2_clusters_to_blocks(sb: dir->i_sb, clusters: bit_off); |
3027 | |
3028 | ret = ocfs2_insert_extent(handle, et: &et, cpos: 1, |
3029 | start_blk: blkno, new_clusters: len, flags: 0, NULL); |
3030 | if (ret) { |
3031 | mlog_errno(ret); |
3032 | goto out_commit; |
3033 | } |
3034 | bytes_allocated += ocfs2_clusters_to_bytes(sb: dir->i_sb, clusters: 1); |
3035 | } |
3036 | |
3037 | *first_block_bh = dirdata_bh; |
3038 | dirdata_bh = NULL; |
3039 | if (ocfs2_supports_indexed_dirs(osb)) { |
3040 | unsigned int off; |
3041 | |
3042 | if (!dx_inline) { |
3043 | /* |
3044 | * We need to return the correct block within the |
3045 | * cluster which should hold our entry. |
3046 | */ |
3047 | off = ocfs2_dx_dir_hash_idx(osb, |
3048 | hinfo: &lookup->dl_hinfo); |
3049 | get_bh(bh: dx_leaves[off]); |
3050 | lookup->dl_dx_leaf_bh = dx_leaves[off]; |
3051 | } |
3052 | lookup->dl_dx_root_bh = dx_root_bh; |
3053 | dx_root_bh = NULL; |
3054 | } |
3055 | |
3056 | out_commit: |
3057 | if (ret < 0 && did_quota) |
3058 | dquot_free_space_nodirty(inode: dir, nr: bytes_allocated); |
3059 | |
3060 | ocfs2_commit_trans(osb, handle); |
3061 | |
3062 | out: |
3063 | up_write(sem: &oi->ip_alloc_sem); |
3064 | if (data_ac) |
3065 | ocfs2_free_alloc_context(ac: data_ac); |
3066 | if (meta_ac) |
3067 | ocfs2_free_alloc_context(ac: meta_ac); |
3068 | |
3069 | if (dx_leaves) { |
3070 | for (i = 0; i < num_dx_leaves; i++) |
3071 | brelse(bh: dx_leaves[i]); |
3072 | kfree(objp: dx_leaves); |
3073 | } |
3074 | |
3075 | brelse(bh: dirdata_bh); |
3076 | brelse(bh: dx_root_bh); |
3077 | |
3078 | return ret; |
3079 | } |
3080 | |
3081 | /* returns a bh of the 1st new block in the allocation. */ |
3082 | static int ocfs2_do_extend_dir(struct super_block *sb, |
3083 | handle_t *handle, |
3084 | struct inode *dir, |
3085 | struct buffer_head *parent_fe_bh, |
3086 | struct ocfs2_alloc_context *data_ac, |
3087 | struct ocfs2_alloc_context *meta_ac, |
3088 | struct buffer_head **new_bh) |
3089 | { |
3090 | int status; |
3091 | int extend, did_quota = 0; |
3092 | u64 p_blkno, v_blkno; |
3093 | |
3094 | spin_lock(lock: &OCFS2_I(inode: dir)->ip_lock); |
3095 | extend = (i_size_read(inode: dir) == ocfs2_clusters_to_bytes(sb, clusters: OCFS2_I(inode: dir)->ip_clusters)); |
3096 | spin_unlock(lock: &OCFS2_I(inode: dir)->ip_lock); |
3097 | |
3098 | if (extend) { |
3099 | u32 offset = OCFS2_I(inode: dir)->ip_clusters; |
3100 | |
3101 | status = dquot_alloc_space_nodirty(inode: dir, |
3102 | nr: ocfs2_clusters_to_bytes(sb, clusters: 1)); |
3103 | if (status) |
3104 | goto bail; |
3105 | did_quota = 1; |
3106 | |
3107 | status = ocfs2_add_inode_data(OCFS2_SB(sb), inode: dir, logical_offset: &offset, |
3108 | clusters_to_add: 1, mark_unwritten: 0, fe_bh: parent_fe_bh, handle, |
3109 | data_ac, meta_ac, NULL); |
3110 | BUG_ON(status == -EAGAIN); |
3111 | if (status < 0) { |
3112 | mlog_errno(status); |
3113 | goto bail; |
3114 | } |
3115 | } |
3116 | |
3117 | v_blkno = ocfs2_blocks_for_bytes(sb, bytes: i_size_read(inode: dir)); |
3118 | status = ocfs2_extent_map_get_blocks(inode: dir, v_blkno, p_blkno: &p_blkno, NULL, NULL); |
3119 | if (status < 0) { |
3120 | mlog_errno(status); |
3121 | goto bail; |
3122 | } |
3123 | |
3124 | *new_bh = sb_getblk(sb, block: p_blkno); |
3125 | if (!*new_bh) { |
3126 | status = -ENOMEM; |
3127 | mlog_errno(status); |
3128 | goto bail; |
3129 | } |
3130 | status = 0; |
3131 | bail: |
3132 | if (did_quota && status < 0) |
3133 | dquot_free_space_nodirty(inode: dir, nr: ocfs2_clusters_to_bytes(sb, clusters: 1)); |
3134 | return status; |
3135 | } |
3136 | |
3137 | /* |
3138 | * Assumes you already have a cluster lock on the directory. |
3139 | * |
3140 | * 'blocks_wanted' is only used if we have an inline directory which |
3141 | * is to be turned into an extent based one. The size of the dirent to |
3142 | * insert might be larger than the space gained by growing to just one |
3143 | * block, so we may have to grow the inode by two blocks in that case. |
3144 | * |
3145 | * If the directory is already indexed, dx_root_bh must be provided. |
3146 | */ |
3147 | static int ocfs2_extend_dir(struct ocfs2_super *osb, |
3148 | struct inode *dir, |
3149 | struct buffer_head *parent_fe_bh, |
3150 | unsigned int blocks_wanted, |
3151 | struct ocfs2_dir_lookup_result *lookup, |
3152 | struct buffer_head **new_de_bh) |
3153 | { |
3154 | int status = 0; |
3155 | int credits, num_free_extents, drop_alloc_sem = 0; |
3156 | loff_t dir_i_size; |
3157 | struct ocfs2_dinode *fe = (struct ocfs2_dinode *) parent_fe_bh->b_data; |
3158 | struct ocfs2_extent_list *el = &fe->id2.i_list; |
3159 | struct ocfs2_alloc_context *data_ac = NULL; |
3160 | struct ocfs2_alloc_context *meta_ac = NULL; |
3161 | handle_t *handle = NULL; |
3162 | struct buffer_head *new_bh = NULL; |
3163 | struct ocfs2_dir_entry * de; |
3164 | struct super_block *sb = osb->sb; |
3165 | struct ocfs2_extent_tree et; |
3166 | struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh; |
3167 | |
3168 | if (OCFS2_I(inode: dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { |
3169 | /* |
3170 | * This would be a code error as an inline directory should |
3171 | * never have an index root. |
3172 | */ |
3173 | BUG_ON(dx_root_bh); |
3174 | |
3175 | status = ocfs2_expand_inline_dir(dir, di_bh: parent_fe_bh, |
3176 | blocks_wanted, lookup, |
3177 | first_block_bh: &new_bh); |
3178 | if (status) { |
3179 | mlog_errno(status); |
3180 | goto bail; |
3181 | } |
3182 | |
3183 | /* Expansion from inline to an indexed directory will |
3184 | * have given us this. */ |
3185 | dx_root_bh = lookup->dl_dx_root_bh; |
3186 | |
3187 | if (blocks_wanted == 1) { |
3188 | /* |
3189 | * If the new dirent will fit inside the space |
3190 | * created by pushing out to one block, then |
3191 | * we can complete the operation |
3192 | * here. Otherwise we have to expand i_size |
3193 | * and format the 2nd block below. |
3194 | */ |
3195 | BUG_ON(new_bh == NULL); |
3196 | goto bail_bh; |
3197 | } |
3198 | |
3199 | /* |
3200 | * Get rid of 'new_bh' - we want to format the 2nd |
3201 | * data block and return that instead. |
3202 | */ |
3203 | brelse(bh: new_bh); |
3204 | new_bh = NULL; |
3205 | |
3206 | down_write(sem: &OCFS2_I(inode: dir)->ip_alloc_sem); |
3207 | drop_alloc_sem = 1; |
3208 | dir_i_size = i_size_read(inode: dir); |
3209 | credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS; |
3210 | goto do_extend; |
3211 | } |
3212 | |
3213 | down_write(sem: &OCFS2_I(inode: dir)->ip_alloc_sem); |
3214 | drop_alloc_sem = 1; |
3215 | dir_i_size = i_size_read(inode: dir); |
3216 | trace_ocfs2_extend_dir(val1: (unsigned long long)OCFS2_I(inode: dir)->ip_blkno, |
3217 | val2: dir_i_size); |
3218 | |
3219 | /* dir->i_size is always block aligned. */ |
3220 | spin_lock(lock: &OCFS2_I(inode: dir)->ip_lock); |
3221 | if (dir_i_size == ocfs2_clusters_to_bytes(sb, clusters: OCFS2_I(inode: dir)->ip_clusters)) { |
3222 | spin_unlock(lock: &OCFS2_I(inode: dir)->ip_lock); |
3223 | ocfs2_init_dinode_extent_tree(et: &et, ci: INODE_CACHE(inode: dir), |
3224 | bh: parent_fe_bh); |
3225 | num_free_extents = ocfs2_num_free_extents(et: &et); |
3226 | if (num_free_extents < 0) { |
3227 | status = num_free_extents; |
3228 | mlog_errno(status); |
3229 | goto bail; |
3230 | } |
3231 | |
3232 | if (!num_free_extents) { |
3233 | status = ocfs2_reserve_new_metadata(osb, root_el: el, ac: &meta_ac); |
3234 | if (status < 0) { |
3235 | if (status != -ENOSPC) |
3236 | mlog_errno(status); |
3237 | goto bail; |
3238 | } |
3239 | } |
3240 | |
3241 | status = ocfs2_reserve_clusters(osb, bits_wanted: 1, ac: &data_ac); |
3242 | if (status < 0) { |
3243 | if (status != -ENOSPC) |
3244 | mlog_errno(status); |
3245 | goto bail; |
3246 | } |
3247 | |
3248 | if (ocfs2_dir_resv_allowed(osb)) |
3249 | data_ac->ac_resv = &OCFS2_I(inode: dir)->ip_la_data_resv; |
3250 | |
3251 | credits = ocfs2_calc_extend_credits(sb, root_el: el); |
3252 | } else { |
3253 | spin_unlock(lock: &OCFS2_I(inode: dir)->ip_lock); |
3254 | credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS; |
3255 | } |
3256 | |
3257 | do_extend: |
3258 | if (ocfs2_dir_indexed(inode: dir)) |
3259 | credits++; /* For attaching the new dirent block to the |
3260 | * dx_root */ |
3261 | |
3262 | handle = ocfs2_start_trans(osb, max_buffs: credits); |
3263 | if (IS_ERR(ptr: handle)) { |
3264 | status = PTR_ERR(ptr: handle); |
3265 | handle = NULL; |
3266 | mlog_errno(status); |
3267 | goto bail; |
3268 | } |
3269 | |
3270 | status = ocfs2_do_extend_dir(sb: osb->sb, handle, dir, parent_fe_bh, |
3271 | data_ac, meta_ac, new_bh: &new_bh); |
3272 | if (status < 0) { |
3273 | mlog_errno(status); |
3274 | goto bail; |
3275 | } |
3276 | |
3277 | ocfs2_set_new_buffer_uptodate(ci: INODE_CACHE(inode: dir), bh: new_bh); |
3278 | |
3279 | status = ocfs2_journal_access_db(handle, ci: INODE_CACHE(inode: dir), bh: new_bh, |
3280 | OCFS2_JOURNAL_ACCESS_CREATE); |
3281 | if (status < 0) { |
3282 | mlog_errno(status); |
3283 | goto bail; |
3284 | } |
3285 | memset(new_bh->b_data, 0, sb->s_blocksize); |
3286 | |
3287 | de = (struct ocfs2_dir_entry *) new_bh->b_data; |
3288 | de->inode = 0; |
3289 | if (ocfs2_supports_dir_trailer(dir)) { |
3290 | de->rec_len = cpu_to_le16(ocfs2_dir_trailer_blk_off(sb)); |
3291 | |
3292 | ocfs2_init_dir_trailer(inode: dir, bh: new_bh, le16_to_cpu(de->rec_len)); |
3293 | |
3294 | if (ocfs2_dir_indexed(inode: dir)) { |
3295 | status = ocfs2_dx_dir_link_trailer(dir, handle, |
3296 | dx_root_bh, dirdata_bh: new_bh); |
3297 | if (status) { |
3298 | mlog_errno(status); |
3299 | goto bail; |
3300 | } |
3301 | } |
3302 | } else { |
3303 | de->rec_len = cpu_to_le16(sb->s_blocksize); |
3304 | } |
3305 | ocfs2_update_inode_fsync_trans(handle, inode: dir, datasync: 1); |
3306 | ocfs2_journal_dirty(handle, bh: new_bh); |
3307 | |
3308 | dir_i_size += dir->i_sb->s_blocksize; |
3309 | i_size_write(inode: dir, i_size: dir_i_size); |
3310 | dir->i_blocks = ocfs2_inode_sector_count(inode: dir); |
3311 | status = ocfs2_mark_inode_dirty(handle, inode: dir, bh: parent_fe_bh); |
3312 | if (status < 0) { |
3313 | mlog_errno(status); |
3314 | goto bail; |
3315 | } |
3316 | |
3317 | bail_bh: |
3318 | *new_de_bh = new_bh; |
3319 | get_bh(bh: *new_de_bh); |
3320 | bail: |
3321 | if (handle) |
3322 | ocfs2_commit_trans(osb, handle); |
3323 | if (drop_alloc_sem) |
3324 | up_write(sem: &OCFS2_I(inode: dir)->ip_alloc_sem); |
3325 | |
3326 | if (data_ac) |
3327 | ocfs2_free_alloc_context(ac: data_ac); |
3328 | if (meta_ac) |
3329 | ocfs2_free_alloc_context(ac: meta_ac); |
3330 | |
3331 | brelse(bh: new_bh); |
3332 | |
3333 | return status; |
3334 | } |
3335 | |
3336 | static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh, |
3337 | const char *name, int namelen, |
3338 | struct buffer_head **ret_de_bh, |
3339 | unsigned int *blocks_wanted) |
3340 | { |
3341 | int ret; |
3342 | struct super_block *sb = dir->i_sb; |
3343 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; |
3344 | struct ocfs2_dir_entry *de, *last_de = NULL; |
3345 | char *de_buf, *limit; |
3346 | unsigned long offset = 0; |
3347 | unsigned int rec_len, new_rec_len, free_space; |
3348 | |
3349 | /* |
3350 | * This calculates how many free bytes we'd have in block zero, should |
3351 | * this function force expansion to an extent tree. |
3352 | */ |
3353 | if (ocfs2_new_dir_wants_trailer(dir)) |
3354 | free_space = ocfs2_dir_trailer_blk_off(sb) - i_size_read(inode: dir); |
3355 | else |
3356 | free_space = dir->i_sb->s_blocksize - i_size_read(inode: dir); |
3357 | |
3358 | de_buf = di->id2.i_data.id_data; |
3359 | limit = de_buf + i_size_read(inode: dir); |
3360 | rec_len = OCFS2_DIR_REC_LEN(namelen); |
3361 | |
3362 | while (de_buf < limit) { |
3363 | de = (struct ocfs2_dir_entry *)de_buf; |
3364 | |
3365 | if (!ocfs2_check_dir_entry(dir, de, bh: di_bh, offset)) { |
3366 | ret = -ENOENT; |
3367 | goto out; |
3368 | } |
3369 | if (ocfs2_match(len: namelen, name, de)) { |
3370 | ret = -EEXIST; |
3371 | goto out; |
3372 | } |
3373 | /* |
3374 | * No need to check for a trailing dirent record here as |
3375 | * they're not used for inline dirs. |
3376 | */ |
3377 | |
3378 | if (ocfs2_dirent_would_fit(de, new_rec_len: rec_len)) { |
3379 | /* Ok, we found a spot. Return this bh and let |
3380 | * the caller actually fill it in. */ |
3381 | *ret_de_bh = di_bh; |
3382 | get_bh(bh: *ret_de_bh); |
3383 | ret = 0; |
3384 | goto out; |
3385 | } |
3386 | |
3387 | last_de = de; |
3388 | de_buf += le16_to_cpu(de->rec_len); |
3389 | offset += le16_to_cpu(de->rec_len); |
3390 | } |
3391 | |
3392 | /* |
3393 | * We're going to require expansion of the directory - figure |
3394 | * out how many blocks we'll need so that a place for the |
3395 | * dirent can be found. |
3396 | */ |
3397 | *blocks_wanted = 1; |
3398 | new_rec_len = le16_to_cpu(last_de->rec_len) + free_space; |
3399 | if (new_rec_len < (rec_len + OCFS2_DIR_REC_LEN(last_de->name_len))) |
3400 | *blocks_wanted = 2; |
3401 | |
3402 | ret = -ENOSPC; |
3403 | out: |
3404 | return ret; |
3405 | } |
3406 | |
3407 | static int ocfs2_find_dir_space_el(struct inode *dir, const char *name, |
3408 | int namelen, struct buffer_head **ret_de_bh) |
3409 | { |
3410 | unsigned long offset; |
3411 | struct buffer_head *bh = NULL; |
3412 | unsigned short rec_len; |
3413 | struct ocfs2_dir_entry *de; |
3414 | struct super_block *sb = dir->i_sb; |
3415 | int status; |
3416 | int blocksize = dir->i_sb->s_blocksize; |
3417 | |
3418 | status = ocfs2_read_dir_block(inode: dir, v_block: 0, bh: &bh, flags: 0); |
3419 | if (status) |
3420 | goto bail; |
3421 | |
3422 | rec_len = OCFS2_DIR_REC_LEN(namelen); |
3423 | offset = 0; |
3424 | de = (struct ocfs2_dir_entry *) bh->b_data; |
3425 | while (1) { |
3426 | if ((char *)de >= sb->s_blocksize + bh->b_data) { |
3427 | brelse(bh); |
3428 | bh = NULL; |
3429 | |
3430 | if (i_size_read(inode: dir) <= offset) { |
3431 | /* |
3432 | * Caller will have to expand this |
3433 | * directory. |
3434 | */ |
3435 | status = -ENOSPC; |
3436 | goto bail; |
3437 | } |
3438 | status = ocfs2_read_dir_block(inode: dir, |
3439 | v_block: offset >> sb->s_blocksize_bits, |
3440 | bh: &bh, flags: 0); |
3441 | if (status) |
3442 | goto bail; |
3443 | |
3444 | /* move to next block */ |
3445 | de = (struct ocfs2_dir_entry *) bh->b_data; |
3446 | } |
3447 | if (!ocfs2_check_dir_entry(dir, de, bh, offset)) { |
3448 | status = -ENOENT; |
3449 | goto bail; |
3450 | } |
3451 | if (ocfs2_match(len: namelen, name, de)) { |
3452 | status = -EEXIST; |
3453 | goto bail; |
3454 | } |
3455 | |
3456 | if (ocfs2_skip_dir_trailer(dir, de, offset: offset % blocksize, |
3457 | blklen: blocksize)) |
3458 | goto next; |
3459 | |
3460 | if (ocfs2_dirent_would_fit(de, new_rec_len: rec_len)) { |
3461 | /* Ok, we found a spot. Return this bh and let |
3462 | * the caller actually fill it in. */ |
3463 | *ret_de_bh = bh; |
3464 | get_bh(bh: *ret_de_bh); |
3465 | status = 0; |
3466 | goto bail; |
3467 | } |
3468 | next: |
3469 | offset += le16_to_cpu(de->rec_len); |
3470 | de = (struct ocfs2_dir_entry *)((char *) de + le16_to_cpu(de->rec_len)); |
3471 | } |
3472 | |
3473 | bail: |
3474 | brelse(bh); |
3475 | if (status) |
3476 | mlog_errno(status); |
3477 | |
3478 | return status; |
3479 | } |
3480 | |
3481 | static int dx_leaf_sort_cmp(const void *a, const void *b) |
3482 | { |
3483 | const struct ocfs2_dx_entry *entry1 = a; |
3484 | const struct ocfs2_dx_entry *entry2 = b; |
3485 | u32 major_hash1 = le32_to_cpu(entry1->dx_major_hash); |
3486 | u32 major_hash2 = le32_to_cpu(entry2->dx_major_hash); |
3487 | u32 minor_hash1 = le32_to_cpu(entry1->dx_minor_hash); |
3488 | u32 minor_hash2 = le32_to_cpu(entry2->dx_minor_hash); |
3489 | |
3490 | if (major_hash1 > major_hash2) |
3491 | return 1; |
3492 | if (major_hash1 < major_hash2) |
3493 | return -1; |
3494 | |
3495 | /* |
3496 | * It is not strictly necessary to sort by minor |
3497 | */ |
3498 | if (minor_hash1 > minor_hash2) |
3499 | return 1; |
3500 | if (minor_hash1 < minor_hash2) |
3501 | return -1; |
3502 | return 0; |
3503 | } |
3504 | |
3505 | static void dx_leaf_sort_swap(void *a, void *b, int size) |
3506 | { |
3507 | struct ocfs2_dx_entry *entry1 = a; |
3508 | struct ocfs2_dx_entry *entry2 = b; |
3509 | |
3510 | BUG_ON(size != sizeof(*entry1)); |
3511 | |
3512 | swap(*entry1, *entry2); |
3513 | } |
3514 | |
3515 | static int ocfs2_dx_leaf_same_major(struct ocfs2_dx_leaf *dx_leaf) |
3516 | { |
3517 | struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list; |
3518 | int i, num = le16_to_cpu(dl_list->de_num_used); |
3519 | |
3520 | for (i = 0; i < (num - 1); i++) { |
3521 | if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) != |
3522 | le32_to_cpu(dl_list->de_entries[i + 1].dx_major_hash)) |
3523 | return 0; |
3524 | } |
3525 | |
3526 | return 1; |
3527 | } |
3528 | |
3529 | /* |
3530 | * Find the optimal value to split this leaf on. This expects the leaf |
3531 | * entries to be in sorted order. |
3532 | * |
3533 | * leaf_cpos is the cpos of the leaf we're splitting. insert_hash is |
3534 | * the hash we want to insert. |
3535 | * |
3536 | * This function is only concerned with the major hash - that which |
3537 | * determines which cluster an item belongs to. |
3538 | */ |
3539 | static int ocfs2_dx_dir_find_leaf_split(struct ocfs2_dx_leaf *dx_leaf, |
3540 | u32 leaf_cpos, u32 insert_hash, |
3541 | u32 *split_hash) |
3542 | { |
3543 | struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list; |
3544 | int i, num_used = le16_to_cpu(dl_list->de_num_used); |
3545 | int allsame; |
3546 | |
3547 | /* |
3548 | * There's a couple rare, but nasty corner cases we have to |
3549 | * check for here. All of them involve a leaf where all value |
3550 | * have the same hash, which is what we look for first. |
3551 | * |
3552 | * Most of the time, all of the above is false, and we simply |
3553 | * pick the median value for a split. |
3554 | */ |
3555 | allsame = ocfs2_dx_leaf_same_major(dx_leaf); |
3556 | if (allsame) { |
3557 | u32 val = le32_to_cpu(dl_list->de_entries[0].dx_major_hash); |
3558 | |
3559 | if (val == insert_hash) { |
3560 | /* |
3561 | * No matter where we would choose to split, |
3562 | * the new entry would want to occupy the same |
3563 | * block as these. Since there's no space left |
3564 | * in their existing block, we know there |
3565 | * won't be space after the split. |
3566 | */ |
3567 | return -ENOSPC; |
3568 | } |
3569 | |
3570 | if (val == leaf_cpos) { |
3571 | /* |
3572 | * Because val is the same as leaf_cpos (which |
3573 | * is the smallest value this leaf can have), |
3574 | * yet is not equal to insert_hash, then we |
3575 | * know that insert_hash *must* be larger than |
3576 | * val (and leaf_cpos). At least cpos+1 in value. |
3577 | * |
3578 | * We also know then, that there cannot be an |
3579 | * adjacent extent (otherwise we'd be looking |
3580 | * at it). Choosing this value gives us a |
3581 | * chance to get some contiguousness. |
3582 | */ |
3583 | *split_hash = leaf_cpos + 1; |
3584 | return 0; |
3585 | } |
3586 | |
3587 | if (val > insert_hash) { |
3588 | /* |
3589 | * val can not be the same as insert hash, and |
3590 | * also must be larger than leaf_cpos. Also, |
3591 | * we know that there can't be a leaf between |
3592 | * cpos and val, otherwise the entries with |
3593 | * hash 'val' would be there. |
3594 | */ |
3595 | *split_hash = val; |
3596 | return 0; |
3597 | } |
3598 | |
3599 | *split_hash = insert_hash; |
3600 | return 0; |
3601 | } |
3602 | |
3603 | /* |
3604 | * Since the records are sorted and the checks above |
3605 | * guaranteed that not all records in this block are the same, |
3606 | * we simple travel forward, from the median, and pick the 1st |
3607 | * record whose value is larger than leaf_cpos. |
3608 | */ |
3609 | for (i = (num_used / 2); i < num_used; i++) |
3610 | if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) > |
3611 | leaf_cpos) |
3612 | break; |
3613 | |
3614 | BUG_ON(i == num_used); /* Should be impossible */ |
3615 | *split_hash = le32_to_cpu(dl_list->de_entries[i].dx_major_hash); |
3616 | return 0; |
3617 | } |
3618 | |
3619 | /* |
3620 | * Transfer all entries in orig_dx_leaves whose major hash is equal to or |
3621 | * larger than split_hash into new_dx_leaves. We use a temporary |
3622 | * buffer (tmp_dx_leaf) to make the changes to the original leaf blocks. |
3623 | * |
3624 | * Since the block offset inside a leaf (cluster) is a constant mask |
3625 | * of minor_hash, we can optimize - an item at block offset X within |
3626 | * the original cluster, will be at offset X within the new cluster. |
3627 | */ |
3628 | static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash, |
3629 | handle_t *handle, |
3630 | struct ocfs2_dx_leaf *tmp_dx_leaf, |
3631 | struct buffer_head **orig_dx_leaves, |
3632 | struct buffer_head **new_dx_leaves, |
3633 | int num_dx_leaves) |
3634 | { |
3635 | int i, j, num_used; |
3636 | u32 major_hash; |
3637 | struct ocfs2_dx_leaf *orig_dx_leaf, *new_dx_leaf; |
3638 | struct ocfs2_dx_entry_list *orig_list, *tmp_list; |
3639 | struct ocfs2_dx_entry *dx_entry; |
3640 | |
3641 | tmp_list = &tmp_dx_leaf->dl_list; |
3642 | |
3643 | for (i = 0; i < num_dx_leaves; i++) { |
3644 | orig_dx_leaf = (struct ocfs2_dx_leaf *) orig_dx_leaves[i]->b_data; |
3645 | orig_list = &orig_dx_leaf->dl_list; |
3646 | new_dx_leaf = (struct ocfs2_dx_leaf *) new_dx_leaves[i]->b_data; |
3647 | |
3648 | num_used = le16_to_cpu(orig_list->de_num_used); |
3649 | |
3650 | memcpy(tmp_dx_leaf, orig_dx_leaf, dir->i_sb->s_blocksize); |
3651 | tmp_list->de_num_used = cpu_to_le16(0); |
3652 | memset(&tmp_list->de_entries, 0, sizeof(*dx_entry)*num_used); |
3653 | |
3654 | for (j = 0; j < num_used; j++) { |
3655 | dx_entry = &orig_list->de_entries[j]; |
3656 | major_hash = le32_to_cpu(dx_entry->dx_major_hash); |
3657 | if (major_hash >= split_hash) |
3658 | ocfs2_dx_dir_leaf_insert_tail(dx_leaf: new_dx_leaf, |
3659 | dx_new_entry: dx_entry); |
3660 | else |
3661 | ocfs2_dx_dir_leaf_insert_tail(dx_leaf: tmp_dx_leaf, |
3662 | dx_new_entry: dx_entry); |
3663 | } |
3664 | memcpy(orig_dx_leaf, tmp_dx_leaf, dir->i_sb->s_blocksize); |
3665 | |
3666 | ocfs2_journal_dirty(handle, bh: orig_dx_leaves[i]); |
3667 | ocfs2_journal_dirty(handle, bh: new_dx_leaves[i]); |
3668 | } |
3669 | } |
3670 | |
3671 | static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb, |
3672 | struct ocfs2_dx_root_block *dx_root) |
3673 | { |
3674 | int credits = ocfs2_clusters_to_blocks(sb: osb->sb, clusters: 3); |
3675 | |
3676 | credits += ocfs2_calc_extend_credits(sb: osb->sb, root_el: &dx_root->dr_list); |
3677 | credits += ocfs2_quota_trans_credits(sb: osb->sb); |
3678 | return credits; |
3679 | } |
3680 | |
3681 | /* |
3682 | * Find the median value in dx_leaf_bh and allocate a new leaf to move |
3683 | * half our entries into. |
3684 | */ |
3685 | static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir, |
3686 | struct buffer_head *dx_root_bh, |
3687 | struct buffer_head *dx_leaf_bh, |
3688 | struct ocfs2_dx_hinfo *hinfo, u32 leaf_cpos, |
3689 | u64 leaf_blkno) |
3690 | { |
3691 | struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data; |
3692 | int credits, ret, i, num_used, did_quota = 0; |
3693 | u32 cpos, split_hash, insert_hash = hinfo->major_hash; |
3694 | u64 orig_leaves_start; |
3695 | int num_dx_leaves; |
3696 | struct buffer_head **orig_dx_leaves = NULL; |
3697 | struct buffer_head **new_dx_leaves = NULL; |
3698 | struct ocfs2_alloc_context *data_ac = NULL, *meta_ac = NULL; |
3699 | struct ocfs2_extent_tree et; |
3700 | handle_t *handle = NULL; |
3701 | struct ocfs2_dx_root_block *dx_root; |
3702 | struct ocfs2_dx_leaf *tmp_dx_leaf = NULL; |
3703 | |
3704 | trace_ocfs2_dx_dir_rebalance(val1: (unsigned long long)OCFS2_I(inode: dir)->ip_blkno, |
3705 | val2: (unsigned long long)leaf_blkno, |
3706 | val3: insert_hash); |
3707 | |
3708 | ocfs2_init_dx_root_extent_tree(et: &et, ci: INODE_CACHE(inode: dir), bh: dx_root_bh); |
3709 | |
3710 | dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; |
3711 | /* |
3712 | * XXX: This is a rather large limit. We should use a more |
3713 | * realistic value. |
3714 | */ |
3715 | if (le32_to_cpu(dx_root->dr_clusters) == UINT_MAX) |
3716 | return -ENOSPC; |
3717 | |
3718 | num_used = le16_to_cpu(dx_leaf->dl_list.de_num_used); |
3719 | if (num_used < le16_to_cpu(dx_leaf->dl_list.de_count)) { |
3720 | mlog(ML_ERROR, "DX Dir: %llu, Asked to rebalance empty leaf: " |
3721 | "%llu, %d\n" , (unsigned long long)OCFS2_I(dir)->ip_blkno, |
3722 | (unsigned long long)leaf_blkno, num_used); |
3723 | ret = -EIO; |
3724 | goto out; |
3725 | } |
3726 | |
3727 | orig_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(sb: osb->sb, ret_num_leaves: &num_dx_leaves); |
3728 | if (!orig_dx_leaves) { |
3729 | ret = -ENOMEM; |
3730 | mlog_errno(ret); |
3731 | goto out; |
3732 | } |
3733 | |
3734 | new_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(sb: osb->sb, NULL); |
3735 | if (!new_dx_leaves) { |
3736 | ret = -ENOMEM; |
3737 | mlog_errno(ret); |
3738 | goto out; |
3739 | } |
3740 | |
3741 | ret = ocfs2_lock_allocators(inode: dir, et: &et, clusters_to_add: 1, extents_to_split: 0, data_ac: &data_ac, meta_ac: &meta_ac); |
3742 | if (ret) { |
3743 | if (ret != -ENOSPC) |
3744 | mlog_errno(ret); |
3745 | goto out; |
3746 | } |
3747 | |
3748 | credits = ocfs2_dx_dir_rebalance_credits(osb, dx_root); |
3749 | handle = ocfs2_start_trans(osb, max_buffs: credits); |
3750 | if (IS_ERR(ptr: handle)) { |
3751 | ret = PTR_ERR(ptr: handle); |
3752 | handle = NULL; |
3753 | mlog_errno(ret); |
3754 | goto out; |
3755 | } |
3756 | |
3757 | ret = dquot_alloc_space_nodirty(inode: dir, |
3758 | nr: ocfs2_clusters_to_bytes(sb: dir->i_sb, clusters: 1)); |
3759 | if (ret) |
3760 | goto out_commit; |
3761 | did_quota = 1; |
3762 | |
3763 | ret = ocfs2_journal_access_dl(handle, ci: INODE_CACHE(inode: dir), bh: dx_leaf_bh, |
3764 | OCFS2_JOURNAL_ACCESS_WRITE); |
3765 | if (ret) { |
3766 | mlog_errno(ret); |
3767 | goto out_commit; |
3768 | } |
3769 | |
3770 | /* |
3771 | * This block is changing anyway, so we can sort it in place. |
3772 | */ |
3773 | sort(base: dx_leaf->dl_list.de_entries, num: num_used, |
3774 | size: sizeof(struct ocfs2_dx_entry), cmp_func: dx_leaf_sort_cmp, |
3775 | swap_func: dx_leaf_sort_swap); |
3776 | |
3777 | ocfs2_journal_dirty(handle, bh: dx_leaf_bh); |
3778 | |
3779 | ret = ocfs2_dx_dir_find_leaf_split(dx_leaf, leaf_cpos, insert_hash, |
3780 | split_hash: &split_hash); |
3781 | if (ret) { |
3782 | mlog_errno(ret); |
3783 | goto out_commit; |
3784 | } |
3785 | |
3786 | trace_ocfs2_dx_dir_rebalance_split(value1: leaf_cpos, value2: split_hash, value3: insert_hash); |
3787 | |
3788 | /* |
3789 | * We have to carefully order operations here. There are items |
3790 | * which want to be in the new cluster before insert, but in |
3791 | * order to put those items in the new cluster, we alter the |
3792 | * old cluster. A failure to insert gets nasty. |
3793 | * |
3794 | * So, start by reserving writes to the old |
3795 | * cluster. ocfs2_dx_dir_new_cluster will reserve writes on |
3796 | * the new cluster for us, before inserting it. The insert |
3797 | * won't happen if there's an error before that. Once the |
3798 | * insert is done then, we can transfer from one leaf into the |
3799 | * other without fear of hitting any error. |
3800 | */ |
3801 | |
3802 | /* |
3803 | * The leaf transfer wants some scratch space so that we don't |
3804 | * wind up doing a bunch of expensive memmove(). |
3805 | */ |
3806 | tmp_dx_leaf = kmalloc(size: osb->sb->s_blocksize, GFP_NOFS); |
3807 | if (!tmp_dx_leaf) { |
3808 | ret = -ENOMEM; |
3809 | mlog_errno(ret); |
3810 | goto out_commit; |
3811 | } |
3812 | |
3813 | orig_leaves_start = ocfs2_block_to_cluster_start(sb: dir->i_sb, blocks: leaf_blkno); |
3814 | ret = ocfs2_read_dx_leaves(dir, start: orig_leaves_start, num: num_dx_leaves, |
3815 | dx_leaf_bhs: orig_dx_leaves); |
3816 | if (ret) { |
3817 | mlog_errno(ret); |
3818 | goto out_commit; |
3819 | } |
3820 | |
3821 | cpos = split_hash; |
3822 | ret = ocfs2_dx_dir_new_cluster(dir, et: &et, cpos, handle, |
3823 | data_ac, meta_ac, dx_leaves: new_dx_leaves, |
3824 | num_dx_leaves); |
3825 | if (ret) { |
3826 | mlog_errno(ret); |
3827 | goto out_commit; |
3828 | } |
3829 | |
3830 | for (i = 0; i < num_dx_leaves; i++) { |
3831 | ret = ocfs2_journal_access_dl(handle, ci: INODE_CACHE(inode: dir), |
3832 | bh: orig_dx_leaves[i], |
3833 | OCFS2_JOURNAL_ACCESS_WRITE); |
3834 | if (ret) { |
3835 | mlog_errno(ret); |
3836 | goto out_commit; |
3837 | } |
3838 | |
3839 | ret = ocfs2_journal_access_dl(handle, ci: INODE_CACHE(inode: dir), |
3840 | bh: new_dx_leaves[i], |
3841 | OCFS2_JOURNAL_ACCESS_WRITE); |
3842 | if (ret) { |
3843 | mlog_errno(ret); |
3844 | goto out_commit; |
3845 | } |
3846 | } |
3847 | |
3848 | ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf, |
3849 | orig_dx_leaves, new_dx_leaves, num_dx_leaves); |
3850 | |
3851 | out_commit: |
3852 | if (ret < 0 && did_quota) |
3853 | dquot_free_space_nodirty(inode: dir, |
3854 | nr: ocfs2_clusters_to_bytes(sb: dir->i_sb, clusters: 1)); |
3855 | |
3856 | ocfs2_update_inode_fsync_trans(handle, inode: dir, datasync: 1); |
3857 | ocfs2_commit_trans(osb, handle); |
3858 | |
3859 | out: |
3860 | if (orig_dx_leaves || new_dx_leaves) { |
3861 | for (i = 0; i < num_dx_leaves; i++) { |
3862 | if (orig_dx_leaves) |
3863 | brelse(bh: orig_dx_leaves[i]); |
3864 | if (new_dx_leaves) |
3865 | brelse(bh: new_dx_leaves[i]); |
3866 | } |
3867 | kfree(objp: orig_dx_leaves); |
3868 | kfree(objp: new_dx_leaves); |
3869 | } |
3870 | |
3871 | if (meta_ac) |
3872 | ocfs2_free_alloc_context(ac: meta_ac); |
3873 | if (data_ac) |
3874 | ocfs2_free_alloc_context(ac: data_ac); |
3875 | |
3876 | kfree(objp: tmp_dx_leaf); |
3877 | return ret; |
3878 | } |
3879 | |
3880 | static int ocfs2_find_dir_space_dx(struct ocfs2_super *osb, struct inode *dir, |
3881 | struct buffer_head *di_bh, |
3882 | struct buffer_head *dx_root_bh, |
3883 | const char *name, int namelen, |
3884 | struct ocfs2_dir_lookup_result *lookup) |
3885 | { |
3886 | int ret, rebalanced = 0; |
3887 | struct ocfs2_dx_root_block *dx_root; |
3888 | struct buffer_head *dx_leaf_bh = NULL; |
3889 | struct ocfs2_dx_leaf *dx_leaf; |
3890 | u64 blkno; |
3891 | u32 leaf_cpos; |
3892 | |
3893 | dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; |
3894 | |
3895 | restart_search: |
3896 | ret = ocfs2_dx_dir_lookup(inode: dir, el: &dx_root->dr_list, hinfo: &lookup->dl_hinfo, |
3897 | ret_cpos: &leaf_cpos, ret_phys_blkno: &blkno); |
3898 | if (ret) { |
3899 | mlog_errno(ret); |
3900 | goto out; |
3901 | } |
3902 | |
3903 | ret = ocfs2_read_dx_leaf(dir, blkno, dx_leaf_bh: &dx_leaf_bh); |
3904 | if (ret) { |
3905 | mlog_errno(ret); |
3906 | goto out; |
3907 | } |
3908 | |
3909 | dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data; |
3910 | |
3911 | if (le16_to_cpu(dx_leaf->dl_list.de_num_used) >= |
3912 | le16_to_cpu(dx_leaf->dl_list.de_count)) { |
3913 | if (rebalanced) { |
3914 | /* |
3915 | * Rebalancing should have provided us with |
3916 | * space in an appropriate leaf. |
3917 | * |
3918 | * XXX: Is this an abnormal condition then? |
3919 | * Should we print a message here? |
3920 | */ |
3921 | ret = -ENOSPC; |
3922 | goto out; |
3923 | } |
3924 | |
3925 | ret = ocfs2_dx_dir_rebalance(osb, dir, dx_root_bh, dx_leaf_bh, |
3926 | hinfo: &lookup->dl_hinfo, leaf_cpos, |
3927 | leaf_blkno: blkno); |
3928 | if (ret) { |
3929 | if (ret != -ENOSPC) |
3930 | mlog_errno(ret); |
3931 | goto out; |
3932 | } |
3933 | |
3934 | /* |
3935 | * Restart the lookup. The rebalance might have |
3936 | * changed which block our item fits into. Mark our |
3937 | * progress, so we only execute this once. |
3938 | */ |
3939 | brelse(bh: dx_leaf_bh); |
3940 | dx_leaf_bh = NULL; |
3941 | rebalanced = 1; |
3942 | goto restart_search; |
3943 | } |
3944 | |
3945 | lookup->dl_dx_leaf_bh = dx_leaf_bh; |
3946 | dx_leaf_bh = NULL; |
3947 | |
3948 | out: |
3949 | brelse(bh: dx_leaf_bh); |
3950 | return ret; |
3951 | } |
3952 | |
3953 | static int ocfs2_search_dx_free_list(struct inode *dir, |
3954 | struct buffer_head *dx_root_bh, |
3955 | int namelen, |
3956 | struct ocfs2_dir_lookup_result *lookup) |
3957 | { |
3958 | int ret = -ENOSPC; |
3959 | struct buffer_head *leaf_bh = NULL, *prev_leaf_bh = NULL; |
3960 | struct ocfs2_dir_block_trailer *db; |
3961 | u64 next_block; |
3962 | int rec_len = OCFS2_DIR_REC_LEN(namelen); |
3963 | struct ocfs2_dx_root_block *dx_root; |
3964 | |
3965 | dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; |
3966 | next_block = le64_to_cpu(dx_root->dr_free_blk); |
3967 | |
3968 | while (next_block) { |
3969 | brelse(bh: prev_leaf_bh); |
3970 | prev_leaf_bh = leaf_bh; |
3971 | leaf_bh = NULL; |
3972 | |
3973 | ret = ocfs2_read_dir_block_direct(dir, phys: next_block, bh: &leaf_bh); |
3974 | if (ret) { |
3975 | mlog_errno(ret); |
3976 | goto out; |
3977 | } |
3978 | |
3979 | db = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb); |
3980 | if (rec_len <= le16_to_cpu(db->db_free_rec_len)) { |
3981 | lookup->dl_leaf_bh = leaf_bh; |
3982 | lookup->dl_prev_leaf_bh = prev_leaf_bh; |
3983 | leaf_bh = NULL; |
3984 | prev_leaf_bh = NULL; |
3985 | break; |
3986 | } |
3987 | |
3988 | next_block = le64_to_cpu(db->db_free_next); |
3989 | } |
3990 | |
3991 | if (!next_block) |
3992 | ret = -ENOSPC; |
3993 | |
3994 | out: |
3995 | |
3996 | brelse(bh: leaf_bh); |
3997 | brelse(bh: prev_leaf_bh); |
3998 | return ret; |
3999 | } |
4000 | |
4001 | static int ocfs2_expand_inline_dx_root(struct inode *dir, |
4002 | struct buffer_head *dx_root_bh) |
4003 | { |
4004 | int ret, num_dx_leaves, i, j, did_quota = 0; |
4005 | struct buffer_head **dx_leaves = NULL; |
4006 | struct ocfs2_extent_tree et; |
4007 | u64 insert_blkno; |
4008 | struct ocfs2_alloc_context *data_ac = NULL; |
4009 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
4010 | handle_t *handle = NULL; |
4011 | struct ocfs2_dx_root_block *dx_root; |
4012 | struct ocfs2_dx_entry_list *entry_list; |
4013 | struct ocfs2_dx_entry *dx_entry; |
4014 | struct ocfs2_dx_leaf *target_leaf; |
4015 | |
4016 | ret = ocfs2_reserve_clusters(osb, bits_wanted: 1, ac: &data_ac); |
4017 | if (ret) { |
4018 | mlog_errno(ret); |
4019 | goto out; |
4020 | } |
4021 | |
4022 | dx_leaves = ocfs2_dx_dir_kmalloc_leaves(sb: osb->sb, ret_num_leaves: &num_dx_leaves); |
4023 | if (!dx_leaves) { |
4024 | ret = -ENOMEM; |
4025 | mlog_errno(ret); |
4026 | goto out; |
4027 | } |
4028 | |
4029 | handle = ocfs2_start_trans(osb, max_buffs: ocfs2_calc_dxi_expand_credits(sb: osb->sb)); |
4030 | if (IS_ERR(ptr: handle)) { |
4031 | ret = PTR_ERR(ptr: handle); |
4032 | mlog_errno(ret); |
4033 | goto out; |
4034 | } |
4035 | |
4036 | ret = dquot_alloc_space_nodirty(inode: dir, |
4037 | nr: ocfs2_clusters_to_bytes(sb: osb->sb, clusters: 1)); |
4038 | if (ret) |
4039 | goto out_commit; |
4040 | did_quota = 1; |
4041 | |
4042 | /* |
4043 | * We do this up front, before the allocation, so that a |
4044 | * failure to add the dx_root_bh to the journal won't result |
4045 | * us losing clusters. |
4046 | */ |
4047 | ret = ocfs2_journal_access_dr(handle, ci: INODE_CACHE(inode: dir), bh: dx_root_bh, |
4048 | OCFS2_JOURNAL_ACCESS_WRITE); |
4049 | if (ret) { |
4050 | mlog_errno(ret); |
4051 | goto out_commit; |
4052 | } |
4053 | |
4054 | ret = __ocfs2_dx_dir_new_cluster(dir, cpos: 0, handle, data_ac, dx_leaves, |
4055 | num_dx_leaves, ret_phys_blkno: &insert_blkno); |
4056 | if (ret) { |
4057 | mlog_errno(ret); |
4058 | goto out_commit; |
4059 | } |
4060 | |
4061 | /* |
4062 | * Transfer the entries from our dx_root into the appropriate |
4063 | * block |
4064 | */ |
4065 | dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data; |
4066 | entry_list = &dx_root->dr_entries; |
4067 | |
4068 | for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) { |
4069 | dx_entry = &entry_list->de_entries[i]; |
4070 | |
4071 | j = __ocfs2_dx_dir_hash_idx(osb, |
4072 | le32_to_cpu(dx_entry->dx_minor_hash)); |
4073 | target_leaf = (struct ocfs2_dx_leaf *)dx_leaves[j]->b_data; |
4074 | |
4075 | ocfs2_dx_dir_leaf_insert_tail(dx_leaf: target_leaf, dx_new_entry: dx_entry); |
4076 | |
4077 | /* Each leaf has been passed to the journal already |
4078 | * via __ocfs2_dx_dir_new_cluster() */ |
4079 | } |
4080 | |
4081 | dx_root->dr_flags &= ~OCFS2_DX_FLAG_INLINE; |
4082 | memset(&dx_root->dr_list, 0, osb->sb->s_blocksize - |
4083 | offsetof(struct ocfs2_dx_root_block, dr_list)); |
4084 | dx_root->dr_list.l_count = |
4085 | cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb)); |
4086 | |
4087 | /* This should never fail considering we start with an empty |
4088 | * dx_root. */ |
4089 | ocfs2_init_dx_root_extent_tree(et: &et, ci: INODE_CACHE(inode: dir), bh: dx_root_bh); |
4090 | ret = ocfs2_insert_extent(handle, et: &et, cpos: 0, start_blk: insert_blkno, new_clusters: 1, flags: 0, NULL); |
4091 | if (ret) |
4092 | mlog_errno(ret); |
4093 | did_quota = 0; |
4094 | |
4095 | ocfs2_update_inode_fsync_trans(handle, inode: dir, datasync: 1); |
4096 | ocfs2_journal_dirty(handle, bh: dx_root_bh); |
4097 | |
4098 | out_commit: |
4099 | if (ret < 0 && did_quota) |
4100 | dquot_free_space_nodirty(inode: dir, |
4101 | nr: ocfs2_clusters_to_bytes(sb: dir->i_sb, clusters: 1)); |
4102 | |
4103 | ocfs2_commit_trans(osb, handle); |
4104 | |
4105 | out: |
4106 | if (data_ac) |
4107 | ocfs2_free_alloc_context(ac: data_ac); |
4108 | |
4109 | if (dx_leaves) { |
4110 | for (i = 0; i < num_dx_leaves; i++) |
4111 | brelse(bh: dx_leaves[i]); |
4112 | kfree(objp: dx_leaves); |
4113 | } |
4114 | return ret; |
4115 | } |
4116 | |
4117 | static int ocfs2_inline_dx_has_space(struct buffer_head *dx_root_bh) |
4118 | { |
4119 | struct ocfs2_dx_root_block *dx_root; |
4120 | struct ocfs2_dx_entry_list *entry_list; |
4121 | |
4122 | dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data; |
4123 | entry_list = &dx_root->dr_entries; |
4124 | |
4125 | if (le16_to_cpu(entry_list->de_num_used) >= |
4126 | le16_to_cpu(entry_list->de_count)) |
4127 | return -ENOSPC; |
4128 | |
4129 | return 0; |
4130 | } |
4131 | |
4132 | static int ocfs2_prepare_dx_dir_for_insert(struct inode *dir, |
4133 | struct buffer_head *di_bh, |
4134 | const char *name, |
4135 | int namelen, |
4136 | struct ocfs2_dir_lookup_result *lookup) |
4137 | { |
4138 | int ret, free_dx_root = 1; |
4139 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
4140 | struct buffer_head *dx_root_bh = NULL; |
4141 | struct buffer_head *leaf_bh = NULL; |
4142 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; |
4143 | struct ocfs2_dx_root_block *dx_root; |
4144 | |
4145 | ret = ocfs2_read_dx_root(dir, di, dx_root_bh: &dx_root_bh); |
4146 | if (ret) { |
4147 | mlog_errno(ret); |
4148 | goto out; |
4149 | } |
4150 | |
4151 | dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; |
4152 | if (le32_to_cpu(dx_root->dr_num_entries) == OCFS2_DX_ENTRIES_MAX) { |
4153 | ret = -ENOSPC; |
4154 | mlog_errno(ret); |
4155 | goto out; |
4156 | } |
4157 | |
4158 | if (ocfs2_dx_root_inline(dx_root)) { |
4159 | ret = ocfs2_inline_dx_has_space(dx_root_bh); |
4160 | |
4161 | if (ret == 0) |
4162 | goto search_el; |
4163 | |
4164 | /* |
4165 | * We ran out of room in the root block. Expand it to |
4166 | * an extent, then allow ocfs2_find_dir_space_dx to do |
4167 | * the rest. |
4168 | */ |
4169 | ret = ocfs2_expand_inline_dx_root(dir, dx_root_bh); |
4170 | if (ret) { |
4171 | mlog_errno(ret); |
4172 | goto out; |
4173 | } |
4174 | } |
4175 | |
4176 | /* |
4177 | * Insert preparation for an indexed directory is split into two |
4178 | * steps. The call to find_dir_space_dx reserves room in the index for |
4179 | * an additional item. If we run out of space there, it's a real error |
4180 | * we can't continue on. |
4181 | */ |
4182 | ret = ocfs2_find_dir_space_dx(osb, dir, di_bh, dx_root_bh, name, |
4183 | namelen, lookup); |
4184 | if (ret) { |
4185 | mlog_errno(ret); |
4186 | goto out; |
4187 | } |
4188 | |
4189 | search_el: |
4190 | /* |
4191 | * Next, we need to find space in the unindexed tree. This call |
4192 | * searches using the free space linked list. If the unindexed tree |
4193 | * lacks sufficient space, we'll expand it below. The expansion code |
4194 | * is smart enough to add any new blocks to the free space list. |
4195 | */ |
4196 | ret = ocfs2_search_dx_free_list(dir, dx_root_bh, namelen, lookup); |
4197 | if (ret && ret != -ENOSPC) { |
4198 | mlog_errno(ret); |
4199 | goto out; |
4200 | } |
4201 | |
4202 | /* Do this up here - ocfs2_extend_dir might need the dx_root */ |
4203 | lookup->dl_dx_root_bh = dx_root_bh; |
4204 | free_dx_root = 0; |
4205 | |
4206 | if (ret == -ENOSPC) { |
4207 | ret = ocfs2_extend_dir(osb, dir, parent_fe_bh: di_bh, blocks_wanted: 1, lookup, new_de_bh: &leaf_bh); |
4208 | |
4209 | if (ret) { |
4210 | mlog_errno(ret); |
4211 | goto out; |
4212 | } |
4213 | |
4214 | /* |
4215 | * We make the assumption here that new leaf blocks are added |
4216 | * to the front of our free list. |
4217 | */ |
4218 | lookup->dl_prev_leaf_bh = NULL; |
4219 | lookup->dl_leaf_bh = leaf_bh; |
4220 | } |
4221 | |
4222 | out: |
4223 | if (free_dx_root) |
4224 | brelse(bh: dx_root_bh); |
4225 | return ret; |
4226 | } |
4227 | |
4228 | /* |
4229 | * Get a directory ready for insert. Any directory allocation required |
4230 | * happens here. Success returns zero, and enough context in the dir |
4231 | * lookup result that ocfs2_add_entry() will be able complete the task |
4232 | * with minimal performance impact. |
4233 | */ |
4234 | int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb, |
4235 | struct inode *dir, |
4236 | struct buffer_head *parent_fe_bh, |
4237 | const char *name, |
4238 | int namelen, |
4239 | struct ocfs2_dir_lookup_result *lookup) |
4240 | { |
4241 | int ret; |
4242 | unsigned int blocks_wanted = 1; |
4243 | struct buffer_head *bh = NULL; |
4244 | |
4245 | trace_ocfs2_prepare_dir_for_insert( |
4246 | val1: (unsigned long long)OCFS2_I(inode: dir)->ip_blkno, val2: namelen); |
4247 | |
4248 | if (!namelen) { |
4249 | ret = -EINVAL; |
4250 | mlog_errno(ret); |
4251 | goto out; |
4252 | } |
4253 | |
4254 | /* |
4255 | * Do this up front to reduce confusion. |
4256 | * |
4257 | * The directory might start inline, then be turned into an |
4258 | * indexed one, in which case we'd need to hash deep inside |
4259 | * ocfs2_find_dir_space_id(). Since |
4260 | * ocfs2_prepare_dx_dir_for_insert() also needs this hash |
4261 | * done, there seems no point in spreading out the calls. We |
4262 | * can optimize away the case where the file system doesn't |
4263 | * support indexing. |
4264 | */ |
4265 | if (ocfs2_supports_indexed_dirs(osb)) |
4266 | ocfs2_dx_dir_name_hash(dir, name, len: namelen, hinfo: &lookup->dl_hinfo); |
4267 | |
4268 | if (ocfs2_dir_indexed(inode: dir)) { |
4269 | ret = ocfs2_prepare_dx_dir_for_insert(dir, di_bh: parent_fe_bh, |
4270 | name, namelen, lookup); |
4271 | if (ret) |
4272 | mlog_errno(ret); |
4273 | goto out; |
4274 | } |
4275 | |
4276 | if (OCFS2_I(inode: dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { |
4277 | ret = ocfs2_find_dir_space_id(dir, di_bh: parent_fe_bh, name, |
4278 | namelen, ret_de_bh: &bh, blocks_wanted: &blocks_wanted); |
4279 | } else |
4280 | ret = ocfs2_find_dir_space_el(dir, name, namelen, ret_de_bh: &bh); |
4281 | |
4282 | if (ret && ret != -ENOSPC) { |
4283 | mlog_errno(ret); |
4284 | goto out; |
4285 | } |
4286 | |
4287 | if (ret == -ENOSPC) { |
4288 | /* |
4289 | * We have to expand the directory to add this name. |
4290 | */ |
4291 | BUG_ON(bh); |
4292 | |
4293 | ret = ocfs2_extend_dir(osb, dir, parent_fe_bh, blocks_wanted, |
4294 | lookup, new_de_bh: &bh); |
4295 | if (ret) { |
4296 | if (ret != -ENOSPC) |
4297 | mlog_errno(ret); |
4298 | goto out; |
4299 | } |
4300 | |
4301 | BUG_ON(!bh); |
4302 | } |
4303 | |
4304 | lookup->dl_leaf_bh = bh; |
4305 | bh = NULL; |
4306 | out: |
4307 | brelse(bh); |
4308 | return ret; |
4309 | } |
4310 | |
4311 | static int ocfs2_dx_dir_remove_index(struct inode *dir, |
4312 | struct buffer_head *di_bh, |
4313 | struct buffer_head *dx_root_bh) |
4314 | { |
4315 | int ret; |
4316 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
4317 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; |
4318 | struct ocfs2_dx_root_block *dx_root; |
4319 | struct inode *dx_alloc_inode = NULL; |
4320 | struct buffer_head *dx_alloc_bh = NULL; |
4321 | handle_t *handle; |
4322 | u64 blk; |
4323 | u16 bit; |
4324 | u64 bg_blkno; |
4325 | |
4326 | dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data; |
4327 | |
4328 | dx_alloc_inode = ocfs2_get_system_file_inode(osb, |
4329 | type: EXTENT_ALLOC_SYSTEM_INODE, |
4330 | le16_to_cpu(dx_root->dr_suballoc_slot)); |
4331 | if (!dx_alloc_inode) { |
4332 | ret = -ENOMEM; |
4333 | mlog_errno(ret); |
4334 | goto out; |
4335 | } |
4336 | inode_lock(inode: dx_alloc_inode); |
4337 | |
4338 | ret = ocfs2_inode_lock(dx_alloc_inode, &dx_alloc_bh, 1); |
4339 | if (ret) { |
4340 | mlog_errno(ret); |
4341 | goto out_mutex; |
4342 | } |
4343 | |
4344 | handle = ocfs2_start_trans(osb, OCFS2_DX_ROOT_REMOVE_CREDITS); |
4345 | if (IS_ERR(ptr: handle)) { |
4346 | ret = PTR_ERR(ptr: handle); |
4347 | mlog_errno(ret); |
4348 | goto out_unlock; |
4349 | } |
4350 | |
4351 | ret = ocfs2_journal_access_di(handle, ci: INODE_CACHE(inode: dir), bh: di_bh, |
4352 | OCFS2_JOURNAL_ACCESS_WRITE); |
4353 | if (ret) { |
4354 | mlog_errno(ret); |
4355 | goto out_commit; |
4356 | } |
4357 | |
4358 | spin_lock(lock: &OCFS2_I(inode: dir)->ip_lock); |
4359 | OCFS2_I(inode: dir)->ip_dyn_features &= ~OCFS2_INDEXED_DIR_FL; |
4360 | di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features); |
4361 | spin_unlock(lock: &OCFS2_I(inode: dir)->ip_lock); |
4362 | di->i_dx_root = cpu_to_le64(0ULL); |
4363 | ocfs2_update_inode_fsync_trans(handle, inode: dir, datasync: 1); |
4364 | |
4365 | ocfs2_journal_dirty(handle, bh: di_bh); |
4366 | |
4367 | blk = le64_to_cpu(dx_root->dr_blkno); |
4368 | bit = le16_to_cpu(dx_root->dr_suballoc_bit); |
4369 | if (dx_root->dr_suballoc_loc) |
4370 | bg_blkno = le64_to_cpu(dx_root->dr_suballoc_loc); |
4371 | else |
4372 | bg_blkno = ocfs2_which_suballoc_group(block: blk, bit); |
4373 | ret = ocfs2_free_suballoc_bits(handle, alloc_inode: dx_alloc_inode, alloc_bh: dx_alloc_bh, |
4374 | start_bit: bit, bg_blkno, count: 1); |
4375 | if (ret) |
4376 | mlog_errno(ret); |
4377 | |
4378 | out_commit: |
4379 | ocfs2_commit_trans(osb, handle); |
4380 | |
4381 | out_unlock: |
4382 | ocfs2_inode_unlock(inode: dx_alloc_inode, ex: 1); |
4383 | |
4384 | out_mutex: |
4385 | inode_unlock(inode: dx_alloc_inode); |
4386 | brelse(bh: dx_alloc_bh); |
4387 | out: |
4388 | iput(dx_alloc_inode); |
4389 | return ret; |
4390 | } |
4391 | |
4392 | int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh) |
4393 | { |
4394 | int ret; |
4395 | unsigned int clen; |
4396 | u32 major_hash = UINT_MAX, p_cpos, cpos; |
4397 | u64 blkno; |
4398 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
4399 | struct buffer_head *dx_root_bh = NULL; |
4400 | struct ocfs2_dx_root_block *dx_root; |
4401 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; |
4402 | struct ocfs2_cached_dealloc_ctxt dealloc; |
4403 | struct ocfs2_extent_tree et; |
4404 | |
4405 | ocfs2_init_dealloc_ctxt(c: &dealloc); |
4406 | |
4407 | if (!ocfs2_dir_indexed(inode: dir)) |
4408 | return 0; |
4409 | |
4410 | ret = ocfs2_read_dx_root(dir, di, dx_root_bh: &dx_root_bh); |
4411 | if (ret) { |
4412 | mlog_errno(ret); |
4413 | goto out; |
4414 | } |
4415 | dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; |
4416 | |
4417 | if (ocfs2_dx_root_inline(dx_root)) |
4418 | goto remove_index; |
4419 | |
4420 | ocfs2_init_dx_root_extent_tree(et: &et, ci: INODE_CACHE(inode: dir), bh: dx_root_bh); |
4421 | |
4422 | /* XXX: What if dr_clusters is too large? */ |
4423 | while (le32_to_cpu(dx_root->dr_clusters)) { |
4424 | ret = ocfs2_dx_dir_lookup_rec(inode: dir, el: &dx_root->dr_list, |
4425 | major_hash, ret_cpos: &cpos, ret_phys_blkno: &blkno, ret_clen: &clen); |
4426 | if (ret) { |
4427 | mlog_errno(ret); |
4428 | goto out; |
4429 | } |
4430 | |
4431 | p_cpos = ocfs2_blocks_to_clusters(sb: dir->i_sb, blocks: blkno); |
4432 | |
4433 | ret = ocfs2_remove_btree_range(inode: dir, et: &et, cpos, phys_cpos: p_cpos, len: clen, flags: 0, |
4434 | dealloc: &dealloc, refcount_loc: 0, refcount_tree_locked: false); |
4435 | if (ret) { |
4436 | mlog_errno(ret); |
4437 | goto out; |
4438 | } |
4439 | |
4440 | if (cpos == 0) |
4441 | break; |
4442 | |
4443 | major_hash = cpos - 1; |
4444 | } |
4445 | |
4446 | remove_index: |
4447 | ret = ocfs2_dx_dir_remove_index(dir, di_bh, dx_root_bh); |
4448 | if (ret) { |
4449 | mlog_errno(ret); |
4450 | goto out; |
4451 | } |
4452 | |
4453 | ocfs2_remove_from_cache(ci: INODE_CACHE(inode: dir), bh: dx_root_bh); |
4454 | out: |
4455 | ocfs2_schedule_truncate_log_flush(osb, cancel: 1); |
4456 | ocfs2_run_deallocs(osb, ctxt: &dealloc); |
4457 | |
4458 | brelse(bh: dx_root_bh); |
4459 | return ret; |
4460 | } |
4461 | |