1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * NILFS directory entry operations |
4 | * |
5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
6 | * |
7 | * Modified for NILFS by Amagai Yoshiji. |
8 | */ |
9 | /* |
10 | * linux/fs/ext2/dir.c |
11 | * |
12 | * Copyright (C) 1992, 1993, 1994, 1995 |
13 | * Remy Card (card@masi.ibp.fr) |
14 | * Laboratoire MASI - Institut Blaise Pascal |
15 | * Universite Pierre et Marie Curie (Paris VI) |
16 | * |
17 | * from |
18 | * |
19 | * linux/fs/minix/dir.c |
20 | * |
21 | * Copyright (C) 1991, 1992 Linus Torvalds |
22 | * |
23 | * ext2 directory handling functions |
24 | * |
25 | * Big-endian to little-endian byte-swapping/bitmaps by |
26 | * David S. Miller (davem@caip.rutgers.edu), 1995 |
27 | * |
28 | * All code that works with directory layout had been switched to pagecache |
29 | * and moved here. AV |
30 | */ |
31 | |
32 | #include <linux/pagemap.h> |
33 | #include "nilfs.h" |
34 | #include "page.h" |
35 | |
36 | static inline unsigned int nilfs_rec_len_from_disk(__le16 dlen) |
37 | { |
38 | unsigned int len = le16_to_cpu(dlen); |
39 | |
40 | #if (PAGE_SIZE >= 65536) |
41 | if (len == NILFS_MAX_REC_LEN) |
42 | return 1 << 16; |
43 | #endif |
44 | return len; |
45 | } |
46 | |
47 | static inline __le16 nilfs_rec_len_to_disk(unsigned int len) |
48 | { |
49 | #if (PAGE_SIZE >= 65536) |
50 | if (len == (1 << 16)) |
51 | return cpu_to_le16(NILFS_MAX_REC_LEN); |
52 | |
53 | BUG_ON(len > (1 << 16)); |
54 | #endif |
55 | return cpu_to_le16(len); |
56 | } |
57 | |
58 | /* |
59 | * nilfs uses block-sized chunks. Arguably, sector-sized ones would be |
60 | * more robust, but we have what we have |
61 | */ |
62 | static inline unsigned int nilfs_chunk_size(struct inode *inode) |
63 | { |
64 | return inode->i_sb->s_blocksize; |
65 | } |
66 | |
67 | static inline void nilfs_put_page(struct page *page) |
68 | { |
69 | kunmap(page); |
70 | put_page(page); |
71 | } |
72 | |
73 | /* |
74 | * Return the offset into page `page_nr' of the last valid |
75 | * byte in that page, plus one. |
76 | */ |
77 | static unsigned int nilfs_last_byte(struct inode *inode, unsigned long page_nr) |
78 | { |
79 | unsigned int last_byte = inode->i_size; |
80 | |
81 | last_byte -= page_nr << PAGE_SHIFT; |
82 | if (last_byte > PAGE_SIZE) |
83 | last_byte = PAGE_SIZE; |
84 | return last_byte; |
85 | } |
86 | |
87 | static int nilfs_prepare_chunk(struct page *page, unsigned int from, |
88 | unsigned int to) |
89 | { |
90 | loff_t pos = page_offset(page) + from; |
91 | |
92 | return __block_write_begin(page, pos, len: to - from, get_block: nilfs_get_block); |
93 | } |
94 | |
95 | static void nilfs_commit_chunk(struct page *page, |
96 | struct address_space *mapping, |
97 | unsigned int from, unsigned int to) |
98 | { |
99 | struct inode *dir = mapping->host; |
100 | loff_t pos = page_offset(page) + from; |
101 | unsigned int len = to - from; |
102 | unsigned int nr_dirty, copied; |
103 | int err; |
104 | |
105 | nr_dirty = nilfs_page_count_clean_buffers(page, from, to); |
106 | copied = block_write_end(NULL, mapping, pos, len, len, page, NULL); |
107 | if (pos + copied > dir->i_size) |
108 | i_size_write(inode: dir, i_size: pos + copied); |
109 | if (IS_DIRSYNC(dir)) |
110 | nilfs_set_transaction_flag(NILFS_TI_SYNC); |
111 | err = nilfs_set_file_dirty(inode: dir, nr_dirty); |
112 | WARN_ON(err); /* do not happen */ |
113 | unlock_page(page); |
114 | } |
115 | |
116 | static bool nilfs_check_page(struct page *page) |
117 | { |
118 | struct inode *dir = page->mapping->host; |
119 | struct super_block *sb = dir->i_sb; |
120 | unsigned int chunk_size = nilfs_chunk_size(inode: dir); |
121 | char *kaddr = page_address(page); |
122 | unsigned int offs, rec_len; |
123 | unsigned int limit = PAGE_SIZE; |
124 | struct nilfs_dir_entry *p; |
125 | char *error; |
126 | |
127 | if ((dir->i_size >> PAGE_SHIFT) == page->index) { |
128 | limit = dir->i_size & ~PAGE_MASK; |
129 | if (limit & (chunk_size - 1)) |
130 | goto Ebadsize; |
131 | if (!limit) |
132 | goto out; |
133 | } |
134 | for (offs = 0; offs <= limit - NILFS_DIR_REC_LEN(1); offs += rec_len) { |
135 | p = (struct nilfs_dir_entry *)(kaddr + offs); |
136 | rec_len = nilfs_rec_len_from_disk(dlen: p->rec_len); |
137 | |
138 | if (rec_len < NILFS_DIR_REC_LEN(1)) |
139 | goto Eshort; |
140 | if (rec_len & 3) |
141 | goto Ealign; |
142 | if (rec_len < NILFS_DIR_REC_LEN(p->name_len)) |
143 | goto Enamelen; |
144 | if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)) |
145 | goto Espan; |
146 | } |
147 | if (offs != limit) |
148 | goto Eend; |
149 | out: |
150 | SetPageChecked(page); |
151 | return true; |
152 | |
153 | /* Too bad, we had an error */ |
154 | |
155 | Ebadsize: |
156 | nilfs_error(sb, |
157 | "size of directory #%lu is not a multiple of chunk size" , |
158 | dir->i_ino); |
159 | goto fail; |
160 | Eshort: |
161 | error = "rec_len is smaller than minimal" ; |
162 | goto bad_entry; |
163 | Ealign: |
164 | error = "unaligned directory entry" ; |
165 | goto bad_entry; |
166 | Enamelen: |
167 | error = "rec_len is too small for name_len" ; |
168 | goto bad_entry; |
169 | Espan: |
170 | error = "directory entry across blocks" ; |
171 | bad_entry: |
172 | nilfs_error(sb, |
173 | "bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%d, name_len=%d" , |
174 | dir->i_ino, error, (page->index << PAGE_SHIFT) + offs, |
175 | (unsigned long)le64_to_cpu(p->inode), |
176 | rec_len, p->name_len); |
177 | goto fail; |
178 | Eend: |
179 | p = (struct nilfs_dir_entry *)(kaddr + offs); |
180 | nilfs_error(sb, |
181 | "entry in directory #%lu spans the page boundary offset=%lu, inode=%lu" , |
182 | dir->i_ino, (page->index << PAGE_SHIFT) + offs, |
183 | (unsigned long)le64_to_cpu(p->inode)); |
184 | fail: |
185 | SetPageError(page); |
186 | return false; |
187 | } |
188 | |
189 | static struct page *nilfs_get_page(struct inode *dir, unsigned long n) |
190 | { |
191 | struct address_space *mapping = dir->i_mapping; |
192 | struct page *page = read_mapping_page(mapping, index: n, NULL); |
193 | |
194 | if (!IS_ERR(ptr: page)) { |
195 | kmap(page); |
196 | if (unlikely(!PageChecked(page))) { |
197 | if (!nilfs_check_page(page)) |
198 | goto fail; |
199 | } |
200 | } |
201 | return page; |
202 | |
203 | fail: |
204 | nilfs_put_page(page); |
205 | return ERR_PTR(error: -EIO); |
206 | } |
207 | |
208 | /* |
209 | * NOTE! unlike strncmp, nilfs_match returns 1 for success, 0 for failure. |
210 | * |
211 | * len <= NILFS_NAME_LEN and de != NULL are guaranteed by caller. |
212 | */ |
213 | static int |
214 | nilfs_match(int len, const unsigned char *name, struct nilfs_dir_entry *de) |
215 | { |
216 | if (len != de->name_len) |
217 | return 0; |
218 | if (!de->inode) |
219 | return 0; |
220 | return !memcmp(p: name, q: de->name, size: len); |
221 | } |
222 | |
223 | /* |
224 | * p is at least 6 bytes before the end of page |
225 | */ |
226 | static struct nilfs_dir_entry *nilfs_next_entry(struct nilfs_dir_entry *p) |
227 | { |
228 | return (struct nilfs_dir_entry *)((char *)p + |
229 | nilfs_rec_len_from_disk(dlen: p->rec_len)); |
230 | } |
231 | |
232 | static unsigned char |
233 | nilfs_filetype_table[NILFS_FT_MAX] = { |
234 | [NILFS_FT_UNKNOWN] = DT_UNKNOWN, |
235 | [NILFS_FT_REG_FILE] = DT_REG, |
236 | [NILFS_FT_DIR] = DT_DIR, |
237 | [NILFS_FT_CHRDEV] = DT_CHR, |
238 | [NILFS_FT_BLKDEV] = DT_BLK, |
239 | [NILFS_FT_FIFO] = DT_FIFO, |
240 | [NILFS_FT_SOCK] = DT_SOCK, |
241 | [NILFS_FT_SYMLINK] = DT_LNK, |
242 | }; |
243 | |
244 | #define S_SHIFT 12 |
245 | static unsigned char |
246 | nilfs_type_by_mode[S_IFMT >> S_SHIFT] = { |
247 | [S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE, |
248 | [S_IFDIR >> S_SHIFT] = NILFS_FT_DIR, |
249 | [S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV, |
250 | [S_IFBLK >> S_SHIFT] = NILFS_FT_BLKDEV, |
251 | [S_IFIFO >> S_SHIFT] = NILFS_FT_FIFO, |
252 | [S_IFSOCK >> S_SHIFT] = NILFS_FT_SOCK, |
253 | [S_IFLNK >> S_SHIFT] = NILFS_FT_SYMLINK, |
254 | }; |
255 | |
256 | static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode) |
257 | { |
258 | umode_t mode = inode->i_mode; |
259 | |
260 | de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; |
261 | } |
262 | |
263 | static int nilfs_readdir(struct file *file, struct dir_context *ctx) |
264 | { |
265 | loff_t pos = ctx->pos; |
266 | struct inode *inode = file_inode(f: file); |
267 | struct super_block *sb = inode->i_sb; |
268 | unsigned int offset = pos & ~PAGE_MASK; |
269 | unsigned long n = pos >> PAGE_SHIFT; |
270 | unsigned long npages = dir_pages(inode); |
271 | |
272 | if (pos > inode->i_size - NILFS_DIR_REC_LEN(1)) |
273 | return 0; |
274 | |
275 | for ( ; n < npages; n++, offset = 0) { |
276 | char *kaddr, *limit; |
277 | struct nilfs_dir_entry *de; |
278 | struct page *page = nilfs_get_page(dir: inode, n); |
279 | |
280 | if (IS_ERR(ptr: page)) { |
281 | nilfs_error(sb, "bad page in #%lu" , inode->i_ino); |
282 | ctx->pos += PAGE_SIZE - offset; |
283 | return -EIO; |
284 | } |
285 | kaddr = page_address(page); |
286 | de = (struct nilfs_dir_entry *)(kaddr + offset); |
287 | limit = kaddr + nilfs_last_byte(inode, page_nr: n) - |
288 | NILFS_DIR_REC_LEN(1); |
289 | for ( ; (char *)de <= limit; de = nilfs_next_entry(p: de)) { |
290 | if (de->rec_len == 0) { |
291 | nilfs_error(sb, "zero-length directory entry" ); |
292 | nilfs_put_page(page); |
293 | return -EIO; |
294 | } |
295 | if (de->inode) { |
296 | unsigned char t; |
297 | |
298 | if (de->file_type < NILFS_FT_MAX) |
299 | t = nilfs_filetype_table[de->file_type]; |
300 | else |
301 | t = DT_UNKNOWN; |
302 | |
303 | if (!dir_emit(ctx, name: de->name, namelen: de->name_len, |
304 | le64_to_cpu(de->inode), type: t)) { |
305 | nilfs_put_page(page); |
306 | return 0; |
307 | } |
308 | } |
309 | ctx->pos += nilfs_rec_len_from_disk(dlen: de->rec_len); |
310 | } |
311 | nilfs_put_page(page); |
312 | } |
313 | return 0; |
314 | } |
315 | |
316 | /* |
317 | * nilfs_find_entry() |
318 | * |
319 | * finds an entry in the specified directory with the wanted name. It |
320 | * returns the page in which the entry was found, and the entry itself |
321 | * (as a parameter - res_dir). Page is returned mapped and unlocked. |
322 | * Entry is guaranteed to be valid. |
323 | */ |
324 | struct nilfs_dir_entry * |
325 | nilfs_find_entry(struct inode *dir, const struct qstr *qstr, |
326 | struct page **res_page) |
327 | { |
328 | const unsigned char *name = qstr->name; |
329 | int namelen = qstr->len; |
330 | unsigned int reclen = NILFS_DIR_REC_LEN(namelen); |
331 | unsigned long start, n; |
332 | unsigned long npages = dir_pages(inode: dir); |
333 | struct page *page = NULL; |
334 | struct nilfs_inode_info *ei = NILFS_I(inode: dir); |
335 | struct nilfs_dir_entry *de; |
336 | |
337 | if (npages == 0) |
338 | goto out; |
339 | |
340 | /* OFFSET_CACHE */ |
341 | *res_page = NULL; |
342 | |
343 | start = ei->i_dir_start_lookup; |
344 | if (start >= npages) |
345 | start = 0; |
346 | n = start; |
347 | do { |
348 | char *kaddr; |
349 | |
350 | page = nilfs_get_page(dir, n); |
351 | if (!IS_ERR(ptr: page)) { |
352 | kaddr = page_address(page); |
353 | de = (struct nilfs_dir_entry *)kaddr; |
354 | kaddr += nilfs_last_byte(inode: dir, page_nr: n) - reclen; |
355 | while ((char *) de <= kaddr) { |
356 | if (de->rec_len == 0) { |
357 | nilfs_error(dir->i_sb, |
358 | "zero-length directory entry" ); |
359 | nilfs_put_page(page); |
360 | goto out; |
361 | } |
362 | if (nilfs_match(len: namelen, name, de)) |
363 | goto found; |
364 | de = nilfs_next_entry(p: de); |
365 | } |
366 | nilfs_put_page(page); |
367 | } |
368 | if (++n >= npages) |
369 | n = 0; |
370 | /* next page is past the blocks we've got */ |
371 | if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { |
372 | nilfs_error(dir->i_sb, |
373 | "dir %lu size %lld exceeds block count %llu" , |
374 | dir->i_ino, dir->i_size, |
375 | (unsigned long long)dir->i_blocks); |
376 | goto out; |
377 | } |
378 | } while (n != start); |
379 | out: |
380 | return NULL; |
381 | |
382 | found: |
383 | *res_page = page; |
384 | ei->i_dir_start_lookup = n; |
385 | return de; |
386 | } |
387 | |
388 | struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p) |
389 | { |
390 | struct page *page = nilfs_get_page(dir, n: 0); |
391 | struct nilfs_dir_entry *de = NULL; |
392 | |
393 | if (!IS_ERR(ptr: page)) { |
394 | de = nilfs_next_entry( |
395 | p: (struct nilfs_dir_entry *)page_address(page)); |
396 | *p = page; |
397 | } |
398 | return de; |
399 | } |
400 | |
401 | ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr) |
402 | { |
403 | ino_t res = 0; |
404 | struct nilfs_dir_entry *de; |
405 | struct page *page; |
406 | |
407 | de = nilfs_find_entry(dir, qstr, res_page: &page); |
408 | if (de) { |
409 | res = le64_to_cpu(de->inode); |
410 | kunmap(page); |
411 | put_page(page); |
412 | } |
413 | return res; |
414 | } |
415 | |
416 | /* Releases the page */ |
417 | void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, |
418 | struct page *page, struct inode *inode) |
419 | { |
420 | unsigned int from = (char *)de - (char *)page_address(page); |
421 | unsigned int to = from + nilfs_rec_len_from_disk(dlen: de->rec_len); |
422 | struct address_space *mapping = page->mapping; |
423 | int err; |
424 | |
425 | lock_page(page); |
426 | err = nilfs_prepare_chunk(page, from, to); |
427 | BUG_ON(err); |
428 | de->inode = cpu_to_le64(inode->i_ino); |
429 | nilfs_set_de_type(de, inode); |
430 | nilfs_commit_chunk(page, mapping, from, to); |
431 | nilfs_put_page(page); |
432 | inode_set_mtime_to_ts(inode: dir, ts: inode_set_ctime_current(inode: dir)); |
433 | } |
434 | |
435 | /* |
436 | * Parent is locked. |
437 | */ |
438 | int nilfs_add_link(struct dentry *dentry, struct inode *inode) |
439 | { |
440 | struct inode *dir = d_inode(dentry: dentry->d_parent); |
441 | const unsigned char *name = dentry->d_name.name; |
442 | int namelen = dentry->d_name.len; |
443 | unsigned int chunk_size = nilfs_chunk_size(inode: dir); |
444 | unsigned int reclen = NILFS_DIR_REC_LEN(namelen); |
445 | unsigned short rec_len, name_len; |
446 | struct page *page = NULL; |
447 | struct nilfs_dir_entry *de; |
448 | unsigned long npages = dir_pages(inode: dir); |
449 | unsigned long n; |
450 | char *kaddr; |
451 | unsigned int from, to; |
452 | int err; |
453 | |
454 | /* |
455 | * We take care of directory expansion in the same loop. |
456 | * This code plays outside i_size, so it locks the page |
457 | * to protect that region. |
458 | */ |
459 | for (n = 0; n <= npages; n++) { |
460 | char *dir_end; |
461 | |
462 | page = nilfs_get_page(dir, n); |
463 | err = PTR_ERR(ptr: page); |
464 | if (IS_ERR(ptr: page)) |
465 | goto out; |
466 | lock_page(page); |
467 | kaddr = page_address(page); |
468 | dir_end = kaddr + nilfs_last_byte(inode: dir, page_nr: n); |
469 | de = (struct nilfs_dir_entry *)kaddr; |
470 | kaddr += PAGE_SIZE - reclen; |
471 | while ((char *)de <= kaddr) { |
472 | if ((char *)de == dir_end) { |
473 | /* We hit i_size */ |
474 | name_len = 0; |
475 | rec_len = chunk_size; |
476 | de->rec_len = nilfs_rec_len_to_disk(len: chunk_size); |
477 | de->inode = 0; |
478 | goto got_it; |
479 | } |
480 | if (de->rec_len == 0) { |
481 | nilfs_error(dir->i_sb, |
482 | "zero-length directory entry" ); |
483 | err = -EIO; |
484 | goto out_unlock; |
485 | } |
486 | err = -EEXIST; |
487 | if (nilfs_match(len: namelen, name, de)) |
488 | goto out_unlock; |
489 | name_len = NILFS_DIR_REC_LEN(de->name_len); |
490 | rec_len = nilfs_rec_len_from_disk(dlen: de->rec_len); |
491 | if (!de->inode && rec_len >= reclen) |
492 | goto got_it; |
493 | if (rec_len >= name_len + reclen) |
494 | goto got_it; |
495 | de = (struct nilfs_dir_entry *)((char *)de + rec_len); |
496 | } |
497 | unlock_page(page); |
498 | nilfs_put_page(page); |
499 | } |
500 | BUG(); |
501 | return -EINVAL; |
502 | |
503 | got_it: |
504 | from = (char *)de - (char *)page_address(page); |
505 | to = from + rec_len; |
506 | err = nilfs_prepare_chunk(page, from, to); |
507 | if (err) |
508 | goto out_unlock; |
509 | if (de->inode) { |
510 | struct nilfs_dir_entry *de1; |
511 | |
512 | de1 = (struct nilfs_dir_entry *)((char *)de + name_len); |
513 | de1->rec_len = nilfs_rec_len_to_disk(len: rec_len - name_len); |
514 | de->rec_len = nilfs_rec_len_to_disk(len: name_len); |
515 | de = de1; |
516 | } |
517 | de->name_len = namelen; |
518 | memcpy(de->name, name, namelen); |
519 | de->inode = cpu_to_le64(inode->i_ino); |
520 | nilfs_set_de_type(de, inode); |
521 | nilfs_commit_chunk(page, mapping: page->mapping, from, to); |
522 | inode_set_mtime_to_ts(inode: dir, ts: inode_set_ctime_current(inode: dir)); |
523 | nilfs_mark_inode_dirty(inode: dir); |
524 | /* OFFSET_CACHE */ |
525 | out_put: |
526 | nilfs_put_page(page); |
527 | out: |
528 | return err; |
529 | out_unlock: |
530 | unlock_page(page); |
531 | goto out_put; |
532 | } |
533 | |
534 | /* |
535 | * nilfs_delete_entry deletes a directory entry by merging it with the |
536 | * previous entry. Page is up-to-date. Releases the page. |
537 | */ |
538 | int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page) |
539 | { |
540 | struct address_space *mapping = page->mapping; |
541 | struct inode *inode = mapping->host; |
542 | char *kaddr = page_address(page); |
543 | unsigned int from, to; |
544 | struct nilfs_dir_entry *de, *pde = NULL; |
545 | int err; |
546 | |
547 | from = ((char *)dir - kaddr) & ~(nilfs_chunk_size(inode) - 1); |
548 | to = ((char *)dir - kaddr) + nilfs_rec_len_from_disk(dlen: dir->rec_len); |
549 | de = (struct nilfs_dir_entry *)(kaddr + from); |
550 | |
551 | while ((char *)de < (char *)dir) { |
552 | if (de->rec_len == 0) { |
553 | nilfs_error(inode->i_sb, |
554 | "zero-length directory entry" ); |
555 | err = -EIO; |
556 | goto out; |
557 | } |
558 | pde = de; |
559 | de = nilfs_next_entry(p: de); |
560 | } |
561 | if (pde) |
562 | from = (char *)pde - (char *)page_address(page); |
563 | lock_page(page); |
564 | err = nilfs_prepare_chunk(page, from, to); |
565 | BUG_ON(err); |
566 | if (pde) |
567 | pde->rec_len = nilfs_rec_len_to_disk(len: to - from); |
568 | dir->inode = 0; |
569 | nilfs_commit_chunk(page, mapping, from, to); |
570 | inode_set_mtime_to_ts(inode, ts: inode_set_ctime_current(inode)); |
571 | out: |
572 | nilfs_put_page(page); |
573 | return err; |
574 | } |
575 | |
576 | /* |
577 | * Set the first fragment of directory. |
578 | */ |
579 | int nilfs_make_empty(struct inode *inode, struct inode *parent) |
580 | { |
581 | struct address_space *mapping = inode->i_mapping; |
582 | struct page *page = grab_cache_page(mapping, index: 0); |
583 | unsigned int chunk_size = nilfs_chunk_size(inode); |
584 | struct nilfs_dir_entry *de; |
585 | int err; |
586 | void *kaddr; |
587 | |
588 | if (!page) |
589 | return -ENOMEM; |
590 | |
591 | err = nilfs_prepare_chunk(page, from: 0, to: chunk_size); |
592 | if (unlikely(err)) { |
593 | unlock_page(page); |
594 | goto fail; |
595 | } |
596 | kaddr = kmap_atomic(page); |
597 | memset(kaddr, 0, chunk_size); |
598 | de = (struct nilfs_dir_entry *)kaddr; |
599 | de->name_len = 1; |
600 | de->rec_len = nilfs_rec_len_to_disk(NILFS_DIR_REC_LEN(1)); |
601 | memcpy(de->name, ".\0\0" , 4); |
602 | de->inode = cpu_to_le64(inode->i_ino); |
603 | nilfs_set_de_type(de, inode); |
604 | |
605 | de = (struct nilfs_dir_entry *)(kaddr + NILFS_DIR_REC_LEN(1)); |
606 | de->name_len = 2; |
607 | de->rec_len = nilfs_rec_len_to_disk(len: chunk_size - NILFS_DIR_REC_LEN(1)); |
608 | de->inode = cpu_to_le64(parent->i_ino); |
609 | memcpy(de->name, "..\0" , 4); |
610 | nilfs_set_de_type(de, inode); |
611 | kunmap_atomic(kaddr); |
612 | nilfs_commit_chunk(page, mapping, from: 0, to: chunk_size); |
613 | fail: |
614 | put_page(page); |
615 | return err; |
616 | } |
617 | |
618 | /* |
619 | * routine to check that the specified directory is empty (for rmdir) |
620 | */ |
621 | int nilfs_empty_dir(struct inode *inode) |
622 | { |
623 | struct page *page = NULL; |
624 | unsigned long i, npages = dir_pages(inode); |
625 | |
626 | for (i = 0; i < npages; i++) { |
627 | char *kaddr; |
628 | struct nilfs_dir_entry *de; |
629 | |
630 | page = nilfs_get_page(dir: inode, n: i); |
631 | if (IS_ERR(ptr: page)) |
632 | continue; |
633 | |
634 | kaddr = page_address(page); |
635 | de = (struct nilfs_dir_entry *)kaddr; |
636 | kaddr += nilfs_last_byte(inode, page_nr: i) - NILFS_DIR_REC_LEN(1); |
637 | |
638 | while ((char *)de <= kaddr) { |
639 | if (de->rec_len == 0) { |
640 | nilfs_error(inode->i_sb, |
641 | "zero-length directory entry (kaddr=%p, de=%p)" , |
642 | kaddr, de); |
643 | goto not_empty; |
644 | } |
645 | if (de->inode != 0) { |
646 | /* check for . and .. */ |
647 | if (de->name[0] != '.') |
648 | goto not_empty; |
649 | if (de->name_len > 2) |
650 | goto not_empty; |
651 | if (de->name_len < 2) { |
652 | if (de->inode != |
653 | cpu_to_le64(inode->i_ino)) |
654 | goto not_empty; |
655 | } else if (de->name[1] != '.') |
656 | goto not_empty; |
657 | } |
658 | de = nilfs_next_entry(p: de); |
659 | } |
660 | nilfs_put_page(page); |
661 | } |
662 | return 1; |
663 | |
664 | not_empty: |
665 | nilfs_put_page(page); |
666 | return 0; |
667 | } |
668 | |
669 | const struct file_operations nilfs_dir_operations = { |
670 | .llseek = generic_file_llseek, |
671 | .read = generic_read_dir, |
672 | .iterate_shared = nilfs_readdir, |
673 | .unlocked_ioctl = nilfs_ioctl, |
674 | #ifdef CONFIG_COMPAT |
675 | .compat_ioctl = nilfs_compat_ioctl, |
676 | #endif /* CONFIG_COMPAT */ |
677 | .fsync = nilfs_sync_file, |
678 | |
679 | }; |
680 | |