1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * linux/fs/ext2/dir.c |
4 | * |
5 | * Copyright (C) 1992, 1993, 1994, 1995 |
6 | * Remy Card (card@masi.ibp.fr) |
7 | * Laboratoire MASI - Institut Blaise Pascal |
8 | * Universite Pierre et Marie Curie (Paris VI) |
9 | * |
10 | * from |
11 | * |
12 | * linux/fs/minix/dir.c |
13 | * |
14 | * Copyright (C) 1991, 1992 Linus Torvalds |
15 | * |
16 | * ext2 directory handling functions |
17 | * |
18 | * Big-endian to little-endian byte-swapping/bitmaps by |
19 | * David S. Miller (davem@caip.rutgers.edu), 1995 |
20 | * |
21 | * All code that works with directory layout had been switched to pagecache |
22 | * and moved here. AV |
23 | */ |
24 | |
25 | #include "ext2.h" |
26 | #include <linux/buffer_head.h> |
27 | #include <linux/pagemap.h> |
28 | #include <linux/swap.h> |
29 | #include <linux/iversion.h> |
30 | |
31 | typedef struct ext2_dir_entry_2 ext2_dirent; |
32 | |
33 | /* |
34 | * Tests against MAX_REC_LEN etc were put in place for 64k block |
35 | * sizes; if that is not possible on this arch, we can skip |
36 | * those tests and speed things up. |
37 | */ |
38 | static inline unsigned ext2_rec_len_from_disk(__le16 dlen) |
39 | { |
40 | unsigned len = le16_to_cpu(dlen); |
41 | |
42 | #if (PAGE_SIZE >= 65536) |
43 | if (len == EXT2_MAX_REC_LEN) |
44 | return 1 << 16; |
45 | #endif |
46 | return len; |
47 | } |
48 | |
49 | static inline __le16 ext2_rec_len_to_disk(unsigned len) |
50 | { |
51 | #if (PAGE_SIZE >= 65536) |
52 | if (len == (1 << 16)) |
53 | return cpu_to_le16(EXT2_MAX_REC_LEN); |
54 | else |
55 | BUG_ON(len > (1 << 16)); |
56 | #endif |
57 | return cpu_to_le16(len); |
58 | } |
59 | |
60 | /* |
61 | * ext2 uses block-sized chunks. Arguably, sector-sized ones would be |
62 | * more robust, but we have what we have |
63 | */ |
64 | static inline unsigned ext2_chunk_size(struct inode *inode) |
65 | { |
66 | return inode->i_sb->s_blocksize; |
67 | } |
68 | |
69 | /* |
70 | * Return the offset into page `page_nr' of the last valid |
71 | * byte in that page, plus one. |
72 | */ |
73 | static unsigned |
74 | ext2_last_byte(struct inode *inode, unsigned long page_nr) |
75 | { |
76 | unsigned last_byte = inode->i_size; |
77 | |
78 | last_byte -= page_nr << PAGE_SHIFT; |
79 | if (last_byte > PAGE_SIZE) |
80 | last_byte = PAGE_SIZE; |
81 | return last_byte; |
82 | } |
83 | |
84 | static void ext2_commit_chunk(struct folio *folio, loff_t pos, unsigned len) |
85 | { |
86 | struct address_space *mapping = folio->mapping; |
87 | struct inode *dir = mapping->host; |
88 | |
89 | inode_inc_iversion(inode: dir); |
90 | block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL); |
91 | |
92 | if (pos+len > dir->i_size) { |
93 | i_size_write(inode: dir, i_size: pos+len); |
94 | mark_inode_dirty(inode: dir); |
95 | } |
96 | folio_unlock(folio); |
97 | } |
98 | |
99 | static bool ext2_check_folio(struct folio *folio, int quiet, char *kaddr) |
100 | { |
101 | struct inode *dir = folio->mapping->host; |
102 | struct super_block *sb = dir->i_sb; |
103 | unsigned chunk_size = ext2_chunk_size(inode: dir); |
104 | u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count); |
105 | unsigned offs, rec_len; |
106 | unsigned limit = folio_size(folio); |
107 | ext2_dirent *p; |
108 | char *error; |
109 | |
110 | if (dir->i_size < folio_pos(folio) + limit) { |
111 | limit = offset_in_folio(folio, dir->i_size); |
112 | if (limit & (chunk_size - 1)) |
113 | goto Ebadsize; |
114 | if (!limit) |
115 | goto out; |
116 | } |
117 | for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) { |
118 | p = (ext2_dirent *)(kaddr + offs); |
119 | rec_len = ext2_rec_len_from_disk(dlen: p->rec_len); |
120 | |
121 | if (unlikely(rec_len < EXT2_DIR_REC_LEN(1))) |
122 | goto Eshort; |
123 | if (unlikely(rec_len & 3)) |
124 | goto Ealign; |
125 | if (unlikely(rec_len < EXT2_DIR_REC_LEN(p->name_len))) |
126 | goto Enamelen; |
127 | if (unlikely(((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))) |
128 | goto Espan; |
129 | if (unlikely(le32_to_cpu(p->inode) > max_inumber)) |
130 | goto Einumber; |
131 | } |
132 | if (offs != limit) |
133 | goto Eend; |
134 | out: |
135 | folio_set_checked(folio); |
136 | return true; |
137 | |
138 | /* Too bad, we had an error */ |
139 | |
140 | Ebadsize: |
141 | if (!quiet) |
142 | ext2_error(sb, __func__, |
143 | "size of directory #%lu is not a multiple " |
144 | "of chunk size" , dir->i_ino); |
145 | goto fail; |
146 | Eshort: |
147 | error = "rec_len is smaller than minimal" ; |
148 | goto bad_entry; |
149 | Ealign: |
150 | error = "unaligned directory entry" ; |
151 | goto bad_entry; |
152 | Enamelen: |
153 | error = "rec_len is too small for name_len" ; |
154 | goto bad_entry; |
155 | Espan: |
156 | error = "directory entry across blocks" ; |
157 | goto bad_entry; |
158 | Einumber: |
159 | error = "inode out of bounds" ; |
160 | bad_entry: |
161 | if (!quiet) |
162 | ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - " |
163 | "offset=%llu, inode=%lu, rec_len=%d, name_len=%d" , |
164 | dir->i_ino, error, folio_pos(folio) + offs, |
165 | (unsigned long) le32_to_cpu(p->inode), |
166 | rec_len, p->name_len); |
167 | goto fail; |
168 | Eend: |
169 | if (!quiet) { |
170 | p = (ext2_dirent *)(kaddr + offs); |
171 | ext2_error(sb, "ext2_check_folio" , |
172 | "entry in directory #%lu spans the page boundary" |
173 | "offset=%llu, inode=%lu" , |
174 | dir->i_ino, folio_pos(folio) + offs, |
175 | (unsigned long) le32_to_cpu(p->inode)); |
176 | } |
177 | fail: |
178 | folio_set_error(folio); |
179 | return false; |
180 | } |
181 | |
182 | /* |
183 | * Calls to ext2_get_folio()/folio_release_kmap() must be nested according |
184 | * to the rules documented in kmap_local_folio()/kunmap_local(). |
185 | * |
186 | * NOTE: ext2_find_entry() and ext2_dotdot() act as a call |
187 | * to folio_release_kmap() and should be treated as a call to |
188 | * folio_release_kmap() for nesting purposes. |
189 | */ |
190 | static void *ext2_get_folio(struct inode *dir, unsigned long n, |
191 | int quiet, struct folio **foliop) |
192 | { |
193 | struct address_space *mapping = dir->i_mapping; |
194 | struct folio *folio = read_mapping_folio(mapping, index: n, NULL); |
195 | void *kaddr; |
196 | |
197 | if (IS_ERR(ptr: folio)) |
198 | return ERR_CAST(ptr: folio); |
199 | kaddr = kmap_local_folio(folio, offset: 0); |
200 | if (unlikely(!folio_test_checked(folio))) { |
201 | if (!ext2_check_folio(folio, quiet, kaddr)) |
202 | goto fail; |
203 | } |
204 | *foliop = folio; |
205 | return kaddr; |
206 | |
207 | fail: |
208 | folio_release_kmap(folio, addr: kaddr); |
209 | return ERR_PTR(error: -EIO); |
210 | } |
211 | |
212 | /* |
213 | * NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure. |
214 | * |
215 | * len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller. |
216 | */ |
217 | static inline int ext2_match (int len, const char * const name, |
218 | struct ext2_dir_entry_2 * de) |
219 | { |
220 | if (len != de->name_len) |
221 | return 0; |
222 | if (!de->inode) |
223 | return 0; |
224 | return !memcmp(p: name, q: de->name, size: len); |
225 | } |
226 | |
227 | /* |
228 | * p is at least 6 bytes before the end of page |
229 | */ |
230 | static inline ext2_dirent *ext2_next_entry(ext2_dirent *p) |
231 | { |
232 | return (ext2_dirent *)((char *)p + |
233 | ext2_rec_len_from_disk(dlen: p->rec_len)); |
234 | } |
235 | |
236 | static inline unsigned |
237 | ext2_validate_entry(char *base, unsigned offset, unsigned mask) |
238 | { |
239 | ext2_dirent *de = (ext2_dirent*)(base + offset); |
240 | ext2_dirent *p = (ext2_dirent*)(base + (offset&mask)); |
241 | while ((char*)p < (char*)de) { |
242 | if (p->rec_len == 0) |
243 | break; |
244 | p = ext2_next_entry(p); |
245 | } |
246 | return offset_in_page(p); |
247 | } |
248 | |
249 | static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode) |
250 | { |
251 | if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT2_FEATURE_INCOMPAT_FILETYPE)) |
252 | de->file_type = fs_umode_to_ftype(mode: inode->i_mode); |
253 | else |
254 | de->file_type = 0; |
255 | } |
256 | |
257 | static int |
258 | ext2_readdir(struct file *file, struct dir_context *ctx) |
259 | { |
260 | loff_t pos = ctx->pos; |
261 | struct inode *inode = file_inode(f: file); |
262 | struct super_block *sb = inode->i_sb; |
263 | unsigned int offset = pos & ~PAGE_MASK; |
264 | unsigned long n = pos >> PAGE_SHIFT; |
265 | unsigned long npages = dir_pages(inode); |
266 | unsigned chunk_mask = ~(ext2_chunk_size(inode)-1); |
267 | bool need_revalidate = !inode_eq_iversion(inode, old: file->f_version); |
268 | bool has_filetype; |
269 | |
270 | if (pos > inode->i_size - EXT2_DIR_REC_LEN(1)) |
271 | return 0; |
272 | |
273 | has_filetype = |
274 | EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE); |
275 | |
276 | for ( ; n < npages; n++, offset = 0) { |
277 | ext2_dirent *de; |
278 | struct folio *folio; |
279 | char *kaddr = ext2_get_folio(dir: inode, n, quiet: 0, foliop: &folio); |
280 | char *limit; |
281 | |
282 | if (IS_ERR(ptr: kaddr)) { |
283 | ext2_error(sb, __func__, |
284 | "bad page in #%lu" , |
285 | inode->i_ino); |
286 | ctx->pos += PAGE_SIZE - offset; |
287 | return PTR_ERR(ptr: kaddr); |
288 | } |
289 | if (unlikely(need_revalidate)) { |
290 | if (offset) { |
291 | offset = ext2_validate_entry(base: kaddr, offset, mask: chunk_mask); |
292 | ctx->pos = (n<<PAGE_SHIFT) + offset; |
293 | } |
294 | file->f_version = inode_query_iversion(inode); |
295 | need_revalidate = false; |
296 | } |
297 | de = (ext2_dirent *)(kaddr+offset); |
298 | limit = kaddr + ext2_last_byte(inode, page_nr: n) - EXT2_DIR_REC_LEN(1); |
299 | for ( ;(char*)de <= limit; de = ext2_next_entry(p: de)) { |
300 | if (de->rec_len == 0) { |
301 | ext2_error(sb, __func__, |
302 | "zero-length directory entry" ); |
303 | folio_release_kmap(folio, addr: de); |
304 | return -EIO; |
305 | } |
306 | if (de->inode) { |
307 | unsigned char d_type = DT_UNKNOWN; |
308 | |
309 | if (has_filetype) |
310 | d_type = fs_ftype_to_dtype(filetype: de->file_type); |
311 | |
312 | if (!dir_emit(ctx, name: de->name, namelen: de->name_len, |
313 | le32_to_cpu(de->inode), |
314 | type: d_type)) { |
315 | folio_release_kmap(folio, addr: de); |
316 | return 0; |
317 | } |
318 | } |
319 | ctx->pos += ext2_rec_len_from_disk(dlen: de->rec_len); |
320 | } |
321 | folio_release_kmap(folio, addr: kaddr); |
322 | } |
323 | return 0; |
324 | } |
325 | |
326 | /* |
327 | * ext2_find_entry() |
328 | * |
329 | * finds an entry in the specified directory with the wanted name. It |
330 | * returns the page in which the entry was found (as a parameter - res_page), |
331 | * and the entry itself. Page is returned mapped and unlocked. |
332 | * Entry is guaranteed to be valid. |
333 | * |
334 | * On Success folio_release_kmap() should be called on *foliop. |
335 | * |
336 | * NOTE: Calls to ext2_get_folio()/folio_release_kmap() must be nested |
337 | * according to the rules documented in kmap_local_folio()/kunmap_local(). |
338 | * |
339 | * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_folio() |
340 | * and should be treated as a call to ext2_get_folio() for nesting |
341 | * purposes. |
342 | */ |
343 | struct ext2_dir_entry_2 *ext2_find_entry (struct inode *dir, |
344 | const struct qstr *child, struct folio **foliop) |
345 | { |
346 | const char *name = child->name; |
347 | int namelen = child->len; |
348 | unsigned reclen = EXT2_DIR_REC_LEN(namelen); |
349 | unsigned long start, n; |
350 | unsigned long npages = dir_pages(inode: dir); |
351 | struct ext2_inode_info *ei = EXT2_I(inode: dir); |
352 | ext2_dirent * de; |
353 | |
354 | if (npages == 0) |
355 | goto out; |
356 | |
357 | start = ei->i_dir_start_lookup; |
358 | if (start >= npages) |
359 | start = 0; |
360 | n = start; |
361 | do { |
362 | char *kaddr = ext2_get_folio(dir, n, quiet: 0, foliop); |
363 | if (IS_ERR(ptr: kaddr)) |
364 | return ERR_CAST(ptr: kaddr); |
365 | |
366 | de = (ext2_dirent *) kaddr; |
367 | kaddr += ext2_last_byte(inode: dir, page_nr: n) - reclen; |
368 | while ((char *) de <= kaddr) { |
369 | if (de->rec_len == 0) { |
370 | ext2_error(dir->i_sb, __func__, |
371 | "zero-length directory entry" ); |
372 | folio_release_kmap(folio: *foliop, addr: de); |
373 | goto out; |
374 | } |
375 | if (ext2_match(len: namelen, name, de)) |
376 | goto found; |
377 | de = ext2_next_entry(p: de); |
378 | } |
379 | folio_release_kmap(folio: *foliop, addr: kaddr); |
380 | |
381 | if (++n >= npages) |
382 | n = 0; |
383 | /* next folio is past the blocks we've got */ |
384 | if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { |
385 | ext2_error(dir->i_sb, __func__, |
386 | "dir %lu size %lld exceeds block count %llu" , |
387 | dir->i_ino, dir->i_size, |
388 | (unsigned long long)dir->i_blocks); |
389 | goto out; |
390 | } |
391 | } while (n != start); |
392 | out: |
393 | return ERR_PTR(error: -ENOENT); |
394 | |
395 | found: |
396 | ei->i_dir_start_lookup = n; |
397 | return de; |
398 | } |
399 | |
400 | /* |
401 | * Return the '..' directory entry and the page in which the entry was found |
402 | * (as a parameter - p). |
403 | * |
404 | * On Success folio_release_kmap() should be called on *foliop. |
405 | * |
406 | * NOTE: Calls to ext2_get_folio()/folio_release_kmap() must be nested |
407 | * according to the rules documented in kmap_local_folio()/kunmap_local(). |
408 | * |
409 | * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_folio() |
410 | * and should be treated as a call to ext2_get_folio() for nesting |
411 | * purposes. |
412 | */ |
413 | struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct folio **foliop) |
414 | { |
415 | ext2_dirent *de = ext2_get_folio(dir, n: 0, quiet: 0, foliop); |
416 | |
417 | if (!IS_ERR(ptr: de)) |
418 | return ext2_next_entry(p: de); |
419 | return NULL; |
420 | } |
421 | |
422 | int ext2_inode_by_name(struct inode *dir, const struct qstr *child, ino_t *ino) |
423 | { |
424 | struct ext2_dir_entry_2 *de; |
425 | struct folio *folio; |
426 | |
427 | de = ext2_find_entry(dir, child, foliop: &folio); |
428 | if (IS_ERR(ptr: de)) |
429 | return PTR_ERR(ptr: de); |
430 | |
431 | *ino = le32_to_cpu(de->inode); |
432 | folio_release_kmap(folio, addr: de); |
433 | return 0; |
434 | } |
435 | |
436 | static int ext2_prepare_chunk(struct folio *folio, loff_t pos, unsigned len) |
437 | { |
438 | return __block_write_begin(page: &folio->page, pos, len, get_block: ext2_get_block); |
439 | } |
440 | |
441 | static int ext2_handle_dirsync(struct inode *dir) |
442 | { |
443 | int err; |
444 | |
445 | err = filemap_write_and_wait(mapping: dir->i_mapping); |
446 | if (!err) |
447 | err = sync_inode_metadata(inode: dir, wait: 1); |
448 | return err; |
449 | } |
450 | |
451 | int ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de, |
452 | struct folio *folio, struct inode *inode, bool update_times) |
453 | { |
454 | loff_t pos = folio_pos(folio) + offset_in_folio(folio, de); |
455 | unsigned len = ext2_rec_len_from_disk(dlen: de->rec_len); |
456 | int err; |
457 | |
458 | folio_lock(folio); |
459 | err = ext2_prepare_chunk(folio, pos, len); |
460 | if (err) { |
461 | folio_unlock(folio); |
462 | return err; |
463 | } |
464 | de->inode = cpu_to_le32(inode->i_ino); |
465 | ext2_set_de_type(de, inode); |
466 | ext2_commit_chunk(folio, pos, len); |
467 | if (update_times) |
468 | inode_set_mtime_to_ts(inode: dir, ts: inode_set_ctime_current(inode: dir)); |
469 | EXT2_I(inode: dir)->i_flags &= ~EXT2_BTREE_FL; |
470 | mark_inode_dirty(inode: dir); |
471 | return ext2_handle_dirsync(dir); |
472 | } |
473 | |
474 | /* |
475 | * Parent is locked. |
476 | */ |
477 | int ext2_add_link (struct dentry *dentry, struct inode *inode) |
478 | { |
479 | struct inode *dir = d_inode(dentry: dentry->d_parent); |
480 | const char *name = dentry->d_name.name; |
481 | int namelen = dentry->d_name.len; |
482 | unsigned chunk_size = ext2_chunk_size(inode: dir); |
483 | unsigned reclen = EXT2_DIR_REC_LEN(namelen); |
484 | unsigned short rec_len, name_len; |
485 | struct folio *folio = NULL; |
486 | ext2_dirent * de; |
487 | unsigned long npages = dir_pages(inode: dir); |
488 | unsigned long n; |
489 | loff_t pos; |
490 | int err; |
491 | |
492 | /* |
493 | * We take care of directory expansion in the same loop. |
494 | * This code plays outside i_size, so it locks the folio |
495 | * to protect that region. |
496 | */ |
497 | for (n = 0; n <= npages; n++) { |
498 | char *kaddr = ext2_get_folio(dir, n, quiet: 0, foliop: &folio); |
499 | char *dir_end; |
500 | |
501 | if (IS_ERR(ptr: kaddr)) |
502 | return PTR_ERR(ptr: kaddr); |
503 | folio_lock(folio); |
504 | dir_end = kaddr + ext2_last_byte(inode: dir, page_nr: n); |
505 | de = (ext2_dirent *)kaddr; |
506 | kaddr += folio_size(folio) - reclen; |
507 | while ((char *)de <= kaddr) { |
508 | if ((char *)de == dir_end) { |
509 | /* We hit i_size */ |
510 | name_len = 0; |
511 | rec_len = chunk_size; |
512 | de->rec_len = ext2_rec_len_to_disk(len: chunk_size); |
513 | de->inode = 0; |
514 | goto got_it; |
515 | } |
516 | if (de->rec_len == 0) { |
517 | ext2_error(dir->i_sb, __func__, |
518 | "zero-length directory entry" ); |
519 | err = -EIO; |
520 | goto out_unlock; |
521 | } |
522 | err = -EEXIST; |
523 | if (ext2_match (len: namelen, name, de)) |
524 | goto out_unlock; |
525 | name_len = EXT2_DIR_REC_LEN(de->name_len); |
526 | rec_len = ext2_rec_len_from_disk(dlen: de->rec_len); |
527 | if (!de->inode && rec_len >= reclen) |
528 | goto got_it; |
529 | if (rec_len >= name_len + reclen) |
530 | goto got_it; |
531 | de = (ext2_dirent *) ((char *) de + rec_len); |
532 | } |
533 | folio_unlock(folio); |
534 | folio_release_kmap(folio, addr: kaddr); |
535 | } |
536 | BUG(); |
537 | return -EINVAL; |
538 | |
539 | got_it: |
540 | pos = folio_pos(folio) + offset_in_folio(folio, de); |
541 | err = ext2_prepare_chunk(folio, pos, len: rec_len); |
542 | if (err) |
543 | goto out_unlock; |
544 | if (de->inode) { |
545 | ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len); |
546 | de1->rec_len = ext2_rec_len_to_disk(len: rec_len - name_len); |
547 | de->rec_len = ext2_rec_len_to_disk(len: name_len); |
548 | de = de1; |
549 | } |
550 | de->name_len = namelen; |
551 | memcpy(de->name, name, namelen); |
552 | de->inode = cpu_to_le32(inode->i_ino); |
553 | ext2_set_de_type (de, inode); |
554 | ext2_commit_chunk(folio, pos, len: rec_len); |
555 | inode_set_mtime_to_ts(inode: dir, ts: inode_set_ctime_current(inode: dir)); |
556 | EXT2_I(inode: dir)->i_flags &= ~EXT2_BTREE_FL; |
557 | mark_inode_dirty(inode: dir); |
558 | err = ext2_handle_dirsync(dir); |
559 | /* OFFSET_CACHE */ |
560 | out_put: |
561 | folio_release_kmap(folio, addr: de); |
562 | return err; |
563 | out_unlock: |
564 | folio_unlock(folio); |
565 | goto out_put; |
566 | } |
567 | |
568 | /* |
569 | * ext2_delete_entry deletes a directory entry by merging it with the |
570 | * previous entry. Page is up-to-date. |
571 | */ |
572 | int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct folio *folio) |
573 | { |
574 | struct inode *inode = folio->mapping->host; |
575 | size_t from, to; |
576 | char *kaddr; |
577 | loff_t pos; |
578 | ext2_dirent *de, *pde = NULL; |
579 | int err; |
580 | |
581 | from = offset_in_folio(folio, dir); |
582 | to = from + ext2_rec_len_from_disk(dlen: dir->rec_len); |
583 | kaddr = (char *)dir - from; |
584 | from &= ~(ext2_chunk_size(inode)-1); |
585 | de = (ext2_dirent *)(kaddr + from); |
586 | |
587 | while ((char*)de < (char*)dir) { |
588 | if (de->rec_len == 0) { |
589 | ext2_error(inode->i_sb, __func__, |
590 | "zero-length directory entry" ); |
591 | return -EIO; |
592 | } |
593 | pde = de; |
594 | de = ext2_next_entry(p: de); |
595 | } |
596 | if (pde) |
597 | from = offset_in_folio(folio, pde); |
598 | pos = folio_pos(folio) + from; |
599 | folio_lock(folio); |
600 | err = ext2_prepare_chunk(folio, pos, len: to - from); |
601 | if (err) { |
602 | folio_unlock(folio); |
603 | return err; |
604 | } |
605 | if (pde) |
606 | pde->rec_len = ext2_rec_len_to_disk(len: to - from); |
607 | dir->inode = 0; |
608 | ext2_commit_chunk(folio, pos, len: to - from); |
609 | inode_set_mtime_to_ts(inode, ts: inode_set_ctime_current(inode)); |
610 | EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL; |
611 | mark_inode_dirty(inode); |
612 | return ext2_handle_dirsync(dir: inode); |
613 | } |
614 | |
615 | /* |
616 | * Set the first fragment of directory. |
617 | */ |
618 | int ext2_make_empty(struct inode *inode, struct inode *parent) |
619 | { |
620 | struct folio *folio = filemap_grab_folio(mapping: inode->i_mapping, index: 0); |
621 | unsigned chunk_size = ext2_chunk_size(inode); |
622 | struct ext2_dir_entry_2 * de; |
623 | int err; |
624 | void *kaddr; |
625 | |
626 | if (IS_ERR(ptr: folio)) |
627 | return PTR_ERR(ptr: folio); |
628 | |
629 | err = ext2_prepare_chunk(folio, pos: 0, len: chunk_size); |
630 | if (err) { |
631 | folio_unlock(folio); |
632 | goto fail; |
633 | } |
634 | kaddr = kmap_local_folio(folio, offset: 0); |
635 | memset(kaddr, 0, chunk_size); |
636 | de = (struct ext2_dir_entry_2 *)kaddr; |
637 | de->name_len = 1; |
638 | de->rec_len = ext2_rec_len_to_disk(EXT2_DIR_REC_LEN(1)); |
639 | memcpy (de->name, ".\0\0" , 4); |
640 | de->inode = cpu_to_le32(inode->i_ino); |
641 | ext2_set_de_type (de, inode); |
642 | |
643 | de = (struct ext2_dir_entry_2 *)(kaddr + EXT2_DIR_REC_LEN(1)); |
644 | de->name_len = 2; |
645 | de->rec_len = ext2_rec_len_to_disk(len: chunk_size - EXT2_DIR_REC_LEN(1)); |
646 | de->inode = cpu_to_le32(parent->i_ino); |
647 | memcpy (de->name, "..\0" , 4); |
648 | ext2_set_de_type (de, inode); |
649 | kunmap_local(kaddr); |
650 | ext2_commit_chunk(folio, pos: 0, len: chunk_size); |
651 | err = ext2_handle_dirsync(dir: inode); |
652 | fail: |
653 | folio_put(folio); |
654 | return err; |
655 | } |
656 | |
657 | /* |
658 | * routine to check that the specified directory is empty (for rmdir) |
659 | */ |
660 | int ext2_empty_dir(struct inode *inode) |
661 | { |
662 | struct folio *folio; |
663 | char *kaddr; |
664 | unsigned long i, npages = dir_pages(inode); |
665 | |
666 | for (i = 0; i < npages; i++) { |
667 | ext2_dirent *de; |
668 | |
669 | kaddr = ext2_get_folio(dir: inode, n: i, quiet: 0, foliop: &folio); |
670 | if (IS_ERR(ptr: kaddr)) |
671 | return 0; |
672 | |
673 | de = (ext2_dirent *)kaddr; |
674 | kaddr += ext2_last_byte(inode, page_nr: i) - EXT2_DIR_REC_LEN(1); |
675 | |
676 | while ((char *)de <= kaddr) { |
677 | if (de->rec_len == 0) { |
678 | ext2_error(inode->i_sb, __func__, |
679 | "zero-length directory entry" ); |
680 | printk("kaddr=%p, de=%p\n" , kaddr, de); |
681 | goto not_empty; |
682 | } |
683 | if (de->inode != 0) { |
684 | /* check for . and .. */ |
685 | if (de->name[0] != '.') |
686 | goto not_empty; |
687 | if (de->name_len > 2) |
688 | goto not_empty; |
689 | if (de->name_len < 2) { |
690 | if (de->inode != |
691 | cpu_to_le32(inode->i_ino)) |
692 | goto not_empty; |
693 | } else if (de->name[1] != '.') |
694 | goto not_empty; |
695 | } |
696 | de = ext2_next_entry(p: de); |
697 | } |
698 | folio_release_kmap(folio, addr: kaddr); |
699 | } |
700 | return 1; |
701 | |
702 | not_empty: |
703 | folio_release_kmap(folio, addr: kaddr); |
704 | return 0; |
705 | } |
706 | |
707 | const struct file_operations ext2_dir_operations = { |
708 | .llseek = generic_file_llseek, |
709 | .read = generic_read_dir, |
710 | .iterate_shared = ext2_readdir, |
711 | .unlocked_ioctl = ext2_ioctl, |
712 | #ifdef CONFIG_COMPAT |
713 | .compat_ioctl = ext2_compat_ioctl, |
714 | #endif |
715 | .fsync = ext2_fsync, |
716 | }; |
717 | |