1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * fs/kernfs/dir.c - kernfs directory implementation
4 *
5 * Copyright (c) 2001-3 Patrick Mochel
6 * Copyright (c) 2007 SUSE Linux Products GmbH
7 * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
8 */
9
10#include <linux/sched.h>
11#include <linux/fs.h>
12#include <linux/namei.h>
13#include <linux/idr.h>
14#include <linux/slab.h>
15#include <linux/security.h>
16#include <linux/hash.h>
17
18#include "kernfs-internal.h"
19
20static DEFINE_RWLOCK(kernfs_rename_lock); /* kn->parent and ->name */
21/*
22 * Don't use rename_lock to piggy back on pr_cont_buf. We don't want to
23 * call pr_cont() while holding rename_lock. Because sometimes pr_cont()
24 * will perform wakeups when releasing console_sem. Holding rename_lock
25 * will introduce deadlock if the scheduler reads the kernfs_name in the
26 * wakeup path.
27 */
28static DEFINE_SPINLOCK(kernfs_pr_cont_lock);
29static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by pr_cont_lock */
30static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */
31
32#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
33
34static bool __kernfs_active(struct kernfs_node *kn)
35{
36 return atomic_read(v: &kn->active) >= 0;
37}
38
39static bool kernfs_active(struct kernfs_node *kn)
40{
41 lockdep_assert_held(&kernfs_root(kn)->kernfs_rwsem);
42 return __kernfs_active(kn);
43}
44
45static bool kernfs_lockdep(struct kernfs_node *kn)
46{
47#ifdef CONFIG_DEBUG_LOCK_ALLOC
48 return kn->flags & KERNFS_LOCKDEP;
49#else
50 return false;
51#endif
52}
53
54static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen)
55{
56 if (!kn)
57 return strlcpy(p: buf, q: "(null)", size: buflen);
58
59 return strlcpy(p: buf, q: kn->parent ? kn->name : "/", size: buflen);
60}
61
62/* kernfs_node_depth - compute depth from @from to @to */
63static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to)
64{
65 size_t depth = 0;
66
67 while (to->parent && to != from) {
68 depth++;
69 to = to->parent;
70 }
71 return depth;
72}
73
74static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
75 struct kernfs_node *b)
76{
77 size_t da, db;
78 struct kernfs_root *ra = kernfs_root(kn: a), *rb = kernfs_root(kn: b);
79
80 if (ra != rb)
81 return NULL;
82
83 da = kernfs_depth(from: ra->kn, to: a);
84 db = kernfs_depth(from: rb->kn, to: b);
85
86 while (da > db) {
87 a = a->parent;
88 da--;
89 }
90 while (db > da) {
91 b = b->parent;
92 db--;
93 }
94
95 /* worst case b and a will be the same at root */
96 while (b != a) {
97 b = b->parent;
98 a = a->parent;
99 }
100
101 return a;
102}
103
104/**
105 * kernfs_path_from_node_locked - find a pseudo-absolute path to @kn_to,
106 * where kn_from is treated as root of the path.
107 * @kn_from: kernfs node which should be treated as root for the path
108 * @kn_to: kernfs node to which path is needed
109 * @buf: buffer to copy the path into
110 * @buflen: size of @buf
111 *
112 * We need to handle couple of scenarios here:
113 * [1] when @kn_from is an ancestor of @kn_to at some level
114 * kn_from: /n1/n2/n3
115 * kn_to: /n1/n2/n3/n4/n5
116 * result: /n4/n5
117 *
118 * [2] when @kn_from is on a different hierarchy and we need to find common
119 * ancestor between @kn_from and @kn_to.
120 * kn_from: /n1/n2/n3/n4
121 * kn_to: /n1/n2/n5
122 * result: /../../n5
123 * OR
124 * kn_from: /n1/n2/n3/n4/n5 [depth=5]
125 * kn_to: /n1/n2/n3 [depth=3]
126 * result: /../..
127 *
128 * [3] when @kn_to is %NULL result will be "(null)"
129 *
130 * Return: the length of the full path. If the full length is equal to or
131 * greater than @buflen, @buf contains the truncated path with the trailing
132 * '\0'. On error, -errno is returned.
133 */
134static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
135 struct kernfs_node *kn_from,
136 char *buf, size_t buflen)
137{
138 struct kernfs_node *kn, *common;
139 const char parent_str[] = "/..";
140 size_t depth_from, depth_to, len = 0;
141 int i, j;
142
143 if (!kn_to)
144 return strlcpy(p: buf, q: "(null)", size: buflen);
145
146 if (!kn_from)
147 kn_from = kernfs_root(kn: kn_to)->kn;
148
149 if (kn_from == kn_to)
150 return strlcpy(p: buf, q: "/", size: buflen);
151
152 common = kernfs_common_ancestor(a: kn_from, b: kn_to);
153 if (WARN_ON(!common))
154 return -EINVAL;
155
156 depth_to = kernfs_depth(from: common, to: kn_to);
157 depth_from = kernfs_depth(from: common, to: kn_from);
158
159 buf[0] = '\0';
160
161 for (i = 0; i < depth_from; i++)
162 len += strlcpy(p: buf + len, q: parent_str,
163 size: len < buflen ? buflen - len : 0);
164
165 /* Calculate how many bytes we need for the rest */
166 for (i = depth_to - 1; i >= 0; i--) {
167 for (kn = kn_to, j = 0; j < i; j++)
168 kn = kn->parent;
169 len += strlcpy(p: buf + len, q: "/",
170 size: len < buflen ? buflen - len : 0);
171 len += strlcpy(p: buf + len, q: kn->name,
172 size: len < buflen ? buflen - len : 0);
173 }
174
175 return len;
176}
177
178/**
179 * kernfs_name - obtain the name of a given node
180 * @kn: kernfs_node of interest
181 * @buf: buffer to copy @kn's name into
182 * @buflen: size of @buf
183 *
184 * Copies the name of @kn into @buf of @buflen bytes. The behavior is
185 * similar to strlcpy().
186 *
187 * Fills buffer with "(null)" if @kn is %NULL.
188 *
189 * Return: the length of @kn's name and if @buf isn't long enough,
190 * it's filled up to @buflen-1 and nul terminated.
191 *
192 * This function can be called from any context.
193 */
194int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
195{
196 unsigned long flags;
197 int ret;
198
199 read_lock_irqsave(&kernfs_rename_lock, flags);
200 ret = kernfs_name_locked(kn, buf, buflen);
201 read_unlock_irqrestore(&kernfs_rename_lock, flags);
202 return ret;
203}
204
205/**
206 * kernfs_path_from_node - build path of node @to relative to @from.
207 * @from: parent kernfs_node relative to which we need to build the path
208 * @to: kernfs_node of interest
209 * @buf: buffer to copy @to's path into
210 * @buflen: size of @buf
211 *
212 * Builds @to's path relative to @from in @buf. @from and @to must
213 * be on the same kernfs-root. If @from is not parent of @to, then a relative
214 * path (which includes '..'s) as needed to reach from @from to @to is
215 * returned.
216 *
217 * Return: the length of the full path. If the full length is equal to or
218 * greater than @buflen, @buf contains the truncated path with the trailing
219 * '\0'. On error, -errno is returned.
220 */
221int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
222 char *buf, size_t buflen)
223{
224 unsigned long flags;
225 int ret;
226
227 read_lock_irqsave(&kernfs_rename_lock, flags);
228 ret = kernfs_path_from_node_locked(kn_to: to, kn_from: from, buf, buflen);
229 read_unlock_irqrestore(&kernfs_rename_lock, flags);
230 return ret;
231}
232EXPORT_SYMBOL_GPL(kernfs_path_from_node);
233
234/**
235 * pr_cont_kernfs_name - pr_cont name of a kernfs_node
236 * @kn: kernfs_node of interest
237 *
238 * This function can be called from any context.
239 */
240void pr_cont_kernfs_name(struct kernfs_node *kn)
241{
242 unsigned long flags;
243
244 spin_lock_irqsave(&kernfs_pr_cont_lock, flags);
245
246 kernfs_name(kn, buf: kernfs_pr_cont_buf, buflen: sizeof(kernfs_pr_cont_buf));
247 pr_cont("%s", kernfs_pr_cont_buf);
248
249 spin_unlock_irqrestore(lock: &kernfs_pr_cont_lock, flags);
250}
251
252/**
253 * pr_cont_kernfs_path - pr_cont path of a kernfs_node
254 * @kn: kernfs_node of interest
255 *
256 * This function can be called from any context.
257 */
258void pr_cont_kernfs_path(struct kernfs_node *kn)
259{
260 unsigned long flags;
261 int sz;
262
263 spin_lock_irqsave(&kernfs_pr_cont_lock, flags);
264
265 sz = kernfs_path_from_node(kn, NULL, kernfs_pr_cont_buf,
266 sizeof(kernfs_pr_cont_buf));
267 if (sz < 0) {
268 pr_cont("(error)");
269 goto out;
270 }
271
272 if (sz >= sizeof(kernfs_pr_cont_buf)) {
273 pr_cont("(name too long)");
274 goto out;
275 }
276
277 pr_cont("%s", kernfs_pr_cont_buf);
278
279out:
280 spin_unlock_irqrestore(lock: &kernfs_pr_cont_lock, flags);
281}
282
283/**
284 * kernfs_get_parent - determine the parent node and pin it
285 * @kn: kernfs_node of interest
286 *
287 * Determines @kn's parent, pins and returns it. This function can be
288 * called from any context.
289 *
290 * Return: parent node of @kn
291 */
292struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
293{
294 struct kernfs_node *parent;
295 unsigned long flags;
296
297 read_lock_irqsave(&kernfs_rename_lock, flags);
298 parent = kn->parent;
299 kernfs_get(kn: parent);
300 read_unlock_irqrestore(&kernfs_rename_lock, flags);
301
302 return parent;
303}
304
305/**
306 * kernfs_name_hash - calculate hash of @ns + @name
307 * @name: Null terminated string to hash
308 * @ns: Namespace tag to hash
309 *
310 * Return: 31-bit hash of ns + name (so it fits in an off_t)
311 */
312static unsigned int kernfs_name_hash(const char *name, const void *ns)
313{
314 unsigned long hash = init_name_hash(ns);
315 unsigned int len = strlen(name);
316 while (len--)
317 hash = partial_name_hash(c: *name++, prevhash: hash);
318 hash = end_name_hash(hash);
319 hash &= 0x7fffffffU;
320 /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
321 if (hash < 2)
322 hash += 2;
323 if (hash >= INT_MAX)
324 hash = INT_MAX - 1;
325 return hash;
326}
327
328static int kernfs_name_compare(unsigned int hash, const char *name,
329 const void *ns, const struct kernfs_node *kn)
330{
331 if (hash < kn->hash)
332 return -1;
333 if (hash > kn->hash)
334 return 1;
335 if (ns < kn->ns)
336 return -1;
337 if (ns > kn->ns)
338 return 1;
339 return strcmp(name, kn->name);
340}
341
342static int kernfs_sd_compare(const struct kernfs_node *left,
343 const struct kernfs_node *right)
344{
345 return kernfs_name_compare(hash: left->hash, name: left->name, ns: left->ns, kn: right);
346}
347
348/**
349 * kernfs_link_sibling - link kernfs_node into sibling rbtree
350 * @kn: kernfs_node of interest
351 *
352 * Link @kn into its sibling rbtree which starts from
353 * @kn->parent->dir.children.
354 *
355 * Locking:
356 * kernfs_rwsem held exclusive
357 *
358 * Return:
359 * %0 on success, -EEXIST on failure.
360 */
361static int kernfs_link_sibling(struct kernfs_node *kn)
362{
363 struct rb_node **node = &kn->parent->dir.children.rb_node;
364 struct rb_node *parent = NULL;
365
366 while (*node) {
367 struct kernfs_node *pos;
368 int result;
369
370 pos = rb_to_kn(*node);
371 parent = *node;
372 result = kernfs_sd_compare(left: kn, right: pos);
373 if (result < 0)
374 node = &pos->rb.rb_left;
375 else if (result > 0)
376 node = &pos->rb.rb_right;
377 else
378 return -EEXIST;
379 }
380
381 /* add new node and rebalance the tree */
382 rb_link_node(node: &kn->rb, parent, rb_link: node);
383 rb_insert_color(&kn->rb, &kn->parent->dir.children);
384
385 /* successfully added, account subdir number */
386 down_write(sem: &kernfs_root(kn)->kernfs_iattr_rwsem);
387 if (kernfs_type(kn) == KERNFS_DIR)
388 kn->parent->dir.subdirs++;
389 kernfs_inc_rev(parent: kn->parent);
390 up_write(sem: &kernfs_root(kn)->kernfs_iattr_rwsem);
391
392 return 0;
393}
394
395/**
396 * kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree
397 * @kn: kernfs_node of interest
398 *
399 * Try to unlink @kn from its sibling rbtree which starts from
400 * kn->parent->dir.children.
401 *
402 * Return: %true if @kn was actually removed,
403 * %false if @kn wasn't on the rbtree.
404 *
405 * Locking:
406 * kernfs_rwsem held exclusive
407 */
408static bool kernfs_unlink_sibling(struct kernfs_node *kn)
409{
410 if (RB_EMPTY_NODE(&kn->rb))
411 return false;
412
413 down_write(sem: &kernfs_root(kn)->kernfs_iattr_rwsem);
414 if (kernfs_type(kn) == KERNFS_DIR)
415 kn->parent->dir.subdirs--;
416 kernfs_inc_rev(parent: kn->parent);
417 up_write(sem: &kernfs_root(kn)->kernfs_iattr_rwsem);
418
419 rb_erase(&kn->rb, &kn->parent->dir.children);
420 RB_CLEAR_NODE(&kn->rb);
421 return true;
422}
423
424/**
425 * kernfs_get_active - get an active reference to kernfs_node
426 * @kn: kernfs_node to get an active reference to
427 *
428 * Get an active reference of @kn. This function is noop if @kn
429 * is %NULL.
430 *
431 * Return:
432 * Pointer to @kn on success, %NULL on failure.
433 */
434struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
435{
436 if (unlikely(!kn))
437 return NULL;
438
439 if (!atomic_inc_unless_negative(v: &kn->active))
440 return NULL;
441
442 if (kernfs_lockdep(kn))
443 rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
444 return kn;
445}
446
447/**
448 * kernfs_put_active - put an active reference to kernfs_node
449 * @kn: kernfs_node to put an active reference to
450 *
451 * Put an active reference to @kn. This function is noop if @kn
452 * is %NULL.
453 */
454void kernfs_put_active(struct kernfs_node *kn)
455{
456 int v;
457
458 if (unlikely(!kn))
459 return;
460
461 if (kernfs_lockdep(kn))
462 rwsem_release(&kn->dep_map, _RET_IP_);
463 v = atomic_dec_return(v: &kn->active);
464 if (likely(v != KN_DEACTIVATED_BIAS))
465 return;
466
467 wake_up_all(&kernfs_root(kn)->deactivate_waitq);
468}
469
470/**
471 * kernfs_drain - drain kernfs_node
472 * @kn: kernfs_node to drain
473 *
474 * Drain existing usages and nuke all existing mmaps of @kn. Multiple
475 * removers may invoke this function concurrently on @kn and all will
476 * return after draining is complete.
477 */
478static void kernfs_drain(struct kernfs_node *kn)
479 __releases(&kernfs_root(kn)->kernfs_rwsem)
480 __acquires(&kernfs_root(kn)->kernfs_rwsem)
481{
482 struct kernfs_root *root = kernfs_root(kn);
483
484 lockdep_assert_held_write(&root->kernfs_rwsem);
485 WARN_ON_ONCE(kernfs_active(kn));
486
487 /*
488 * Skip draining if already fully drained. This avoids draining and its
489 * lockdep annotations for nodes which have never been activated
490 * allowing embedding kernfs_remove() in create error paths without
491 * worrying about draining.
492 */
493 if (atomic_read(v: &kn->active) == KN_DEACTIVATED_BIAS &&
494 !kernfs_should_drain_open_files(kn))
495 return;
496
497 up_write(sem: &root->kernfs_rwsem);
498
499 if (kernfs_lockdep(kn)) {
500 rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
501 if (atomic_read(v: &kn->active) != KN_DEACTIVATED_BIAS)
502 lock_contended(lock: &kn->dep_map, _RET_IP_);
503 }
504
505 wait_event(root->deactivate_waitq,
506 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
507
508 if (kernfs_lockdep(kn)) {
509 lock_acquired(lock: &kn->dep_map, _RET_IP_);
510 rwsem_release(&kn->dep_map, _RET_IP_);
511 }
512
513 if (kernfs_should_drain_open_files(kn))
514 kernfs_drain_open_files(kn);
515
516 down_write(sem: &root->kernfs_rwsem);
517}
518
519/**
520 * kernfs_get - get a reference count on a kernfs_node
521 * @kn: the target kernfs_node
522 */
523void kernfs_get(struct kernfs_node *kn)
524{
525 if (kn) {
526 WARN_ON(!atomic_read(&kn->count));
527 atomic_inc(v: &kn->count);
528 }
529}
530EXPORT_SYMBOL_GPL(kernfs_get);
531
532/**
533 * kernfs_put - put a reference count on a kernfs_node
534 * @kn: the target kernfs_node
535 *
536 * Put a reference count of @kn and destroy it if it reached zero.
537 */
538void kernfs_put(struct kernfs_node *kn)
539{
540 struct kernfs_node *parent;
541 struct kernfs_root *root;
542
543 if (!kn || !atomic_dec_and_test(v: &kn->count))
544 return;
545 root = kernfs_root(kn);
546 repeat:
547 /*
548 * Moving/renaming is always done while holding reference.
549 * kn->parent won't change beneath us.
550 */
551 parent = kn->parent;
552
553 WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS,
554 "kernfs_put: %s/%s: released with incorrect active_ref %d\n",
555 parent ? parent->name : "", kn->name, atomic_read(&kn->active));
556
557 if (kernfs_type(kn) == KERNFS_LINK)
558 kernfs_put(kn: kn->symlink.target_kn);
559
560 kfree_const(x: kn->name);
561
562 if (kn->iattr) {
563 simple_xattrs_free(xattrs: &kn->iattr->xattrs, NULL);
564 kmem_cache_free(s: kernfs_iattrs_cache, objp: kn->iattr);
565 }
566 spin_lock(lock: &kernfs_idr_lock);
567 idr_remove(&root->ino_idr, id: (u32)kernfs_ino(kn));
568 spin_unlock(lock: &kernfs_idr_lock);
569 kmem_cache_free(s: kernfs_node_cache, objp: kn);
570
571 kn = parent;
572 if (kn) {
573 if (atomic_dec_and_test(v: &kn->count))
574 goto repeat;
575 } else {
576 /* just released the root kn, free @root too */
577 idr_destroy(&root->ino_idr);
578 kfree(objp: root);
579 }
580}
581EXPORT_SYMBOL_GPL(kernfs_put);
582
583/**
584 * kernfs_node_from_dentry - determine kernfs_node associated with a dentry
585 * @dentry: the dentry in question
586 *
587 * Return: the kernfs_node associated with @dentry. If @dentry is not a
588 * kernfs one, %NULL is returned.
589 *
590 * While the returned kernfs_node will stay accessible as long as @dentry
591 * is accessible, the returned node can be in any state and the caller is
592 * fully responsible for determining what's accessible.
593 */
594struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
595{
596 if (dentry->d_sb->s_op == &kernfs_sops)
597 return kernfs_dentry_node(dentry);
598 return NULL;
599}
600
601static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
602 struct kernfs_node *parent,
603 const char *name, umode_t mode,
604 kuid_t uid, kgid_t gid,
605 unsigned flags)
606{
607 struct kernfs_node *kn;
608 u32 id_highbits;
609 int ret;
610
611 name = kstrdup_const(s: name, GFP_KERNEL);
612 if (!name)
613 return NULL;
614
615 kn = kmem_cache_zalloc(k: kernfs_node_cache, GFP_KERNEL);
616 if (!kn)
617 goto err_out1;
618
619 idr_preload(GFP_KERNEL);
620 spin_lock(lock: &kernfs_idr_lock);
621 ret = idr_alloc_cyclic(&root->ino_idr, ptr: kn, start: 1, end: 0, GFP_ATOMIC);
622 if (ret >= 0 && ret < root->last_id_lowbits)
623 root->id_highbits++;
624 id_highbits = root->id_highbits;
625 root->last_id_lowbits = ret;
626 spin_unlock(lock: &kernfs_idr_lock);
627 idr_preload_end();
628 if (ret < 0)
629 goto err_out2;
630
631 kn->id = (u64)id_highbits << 32 | ret;
632
633 atomic_set(v: &kn->count, i: 1);
634 atomic_set(v: &kn->active, KN_DEACTIVATED_BIAS);
635 RB_CLEAR_NODE(&kn->rb);
636
637 kn->name = name;
638 kn->mode = mode;
639 kn->flags = flags;
640
641 if (!uid_eq(left: uid, GLOBAL_ROOT_UID) || !gid_eq(gid, GLOBAL_ROOT_GID)) {
642 struct iattr iattr = {
643 .ia_valid = ATTR_UID | ATTR_GID,
644 .ia_uid = uid,
645 .ia_gid = gid,
646 };
647
648 ret = __kernfs_setattr(kn, &iattr);
649 if (ret < 0)
650 goto err_out3;
651 }
652
653 if (parent) {
654 ret = security_kernfs_init_security(parent, kn);
655 if (ret)
656 goto err_out3;
657 }
658
659 return kn;
660
661 err_out3:
662 spin_lock(&kernfs_idr_lock);
663 idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
664 spin_unlock(&kernfs_idr_lock);
665 err_out2:
666 kmem_cache_free(kernfs_node_cache, kn);
667 err_out1:
668 kfree_const(name);
669 return NULL;
670}
671
672struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
673 const char *name, umode_t mode,
674 kuid_t uid, kgid_t gid,
675 unsigned flags)
676{
677 struct kernfs_node *kn;
678
679 kn = __kernfs_new_node(root: kernfs_root(kn: parent), parent,
680 name, mode, uid, gid, flags);
681 if (kn) {
682 kernfs_get(parent);
683 kn->parent = parent;
684 }
685 return kn;
686}
687
688/*
689 * kernfs_find_and_get_node_by_id - get kernfs_node from node id
690 * @root: the kernfs root
691 * @id: the target node id
692 *
693 * @id's lower 32bits encode ino and upper gen. If the gen portion is
694 * zero, all generations are matched.
695 *
696 * Return: %NULL on failure,
697 * otherwise a kernfs node with reference counter incremented.
698 */
699struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
700 u64 id)
701{
702 struct kernfs_node *kn;
703 ino_t ino = kernfs_id_ino(id);
704 u32 gen = kernfs_id_gen(id);
705
706 spin_lock(lock: &kernfs_idr_lock);
707
708 kn = idr_find(&root->ino_idr, id: (u32)ino);
709 if (!kn)
710 goto err_unlock;
711
712 if (sizeof(ino_t) >= sizeof(u64)) {
713 /* we looked up with the low 32bits, compare the whole */
714 if (kernfs_ino(kn) != ino)
715 goto err_unlock;
716 } else {
717 /* 0 matches all generations */
718 if (unlikely(gen && kernfs_gen(kn) != gen))
719 goto err_unlock;
720 }
721
722 /*
723 * We should fail if @kn has never been activated and guarantee success
724 * if the caller knows that @kn is active. Both can be achieved by
725 * __kernfs_active() which tests @kn->active without kernfs_rwsem.
726 */
727 if (unlikely(!__kernfs_active(kn) || !atomic_inc_not_zero(&kn->count)))
728 goto err_unlock;
729
730 spin_unlock(lock: &kernfs_idr_lock);
731 return kn;
732err_unlock:
733 spin_unlock(lock: &kernfs_idr_lock);
734 return NULL;
735}
736
737/**
738 * kernfs_add_one - add kernfs_node to parent without warning
739 * @kn: kernfs_node to be added
740 *
741 * The caller must already have initialized @kn->parent. This
742 * function increments nlink of the parent's inode if @kn is a
743 * directory and link into the children list of the parent.
744 *
745 * Return:
746 * %0 on success, -EEXIST if entry with the given name already
747 * exists.
748 */
749int kernfs_add_one(struct kernfs_node *kn)
750{
751 struct kernfs_node *parent = kn->parent;
752 struct kernfs_root *root = kernfs_root(kn: parent);
753 struct kernfs_iattrs *ps_iattr;
754 bool has_ns;
755 int ret;
756
757 down_write(sem: &root->kernfs_rwsem);
758
759 ret = -EINVAL;
760 has_ns = kernfs_ns_enabled(kn: parent);
761 if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
762 has_ns ? "required" : "invalid", parent->name, kn->name))
763 goto out_unlock;
764
765 if (kernfs_type(kn: parent) != KERNFS_DIR)
766 goto out_unlock;
767
768 ret = -ENOENT;
769 if (parent->flags & (KERNFS_REMOVING | KERNFS_EMPTY_DIR))
770 goto out_unlock;
771
772 kn->hash = kernfs_name_hash(name: kn->name, ns: kn->ns);
773
774 ret = kernfs_link_sibling(kn);
775 if (ret)
776 goto out_unlock;
777
778 /* Update timestamps on the parent */
779 down_write(sem: &root->kernfs_iattr_rwsem);
780
781 ps_iattr = parent->iattr;
782 if (ps_iattr) {
783 ktime_get_real_ts64(tv: &ps_iattr->ia_ctime);
784 ps_iattr->ia_mtime = ps_iattr->ia_ctime;
785 }
786
787 up_write(sem: &root->kernfs_iattr_rwsem);
788 up_write(sem: &root->kernfs_rwsem);
789
790 /*
791 * Activate the new node unless CREATE_DEACTIVATED is requested.
792 * If not activated here, the kernfs user is responsible for
793 * activating the node with kernfs_activate(). A node which hasn't
794 * been activated is not visible to userland and its removal won't
795 * trigger deactivation.
796 */
797 if (!(kernfs_root(kn)->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
798 kernfs_activate(kn);
799 return 0;
800
801out_unlock:
802 up_write(sem: &root->kernfs_rwsem);
803 return ret;
804}
805
806/**
807 * kernfs_find_ns - find kernfs_node with the given name
808 * @parent: kernfs_node to search under
809 * @name: name to look for
810 * @ns: the namespace tag to use
811 *
812 * Look for kernfs_node with name @name under @parent.
813 *
814 * Return: pointer to the found kernfs_node on success, %NULL on failure.
815 */
816static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
817 const unsigned char *name,
818 const void *ns)
819{
820 struct rb_node *node = parent->dir.children.rb_node;
821 bool has_ns = kernfs_ns_enabled(kn: parent);
822 unsigned int hash;
823
824 lockdep_assert_held(&kernfs_root(parent)->kernfs_rwsem);
825
826 if (has_ns != (bool)ns) {
827 WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
828 has_ns ? "required" : "invalid", parent->name, name);
829 return NULL;
830 }
831
832 hash = kernfs_name_hash(name, ns);
833 while (node) {
834 struct kernfs_node *kn;
835 int result;
836
837 kn = rb_to_kn(node);
838 result = kernfs_name_compare(hash, name, ns, kn);
839 if (result < 0)
840 node = node->rb_left;
841 else if (result > 0)
842 node = node->rb_right;
843 else
844 return kn;
845 }
846 return NULL;
847}
848
849static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
850 const unsigned char *path,
851 const void *ns)
852{
853 size_t len;
854 char *p, *name;
855
856 lockdep_assert_held_read(&kernfs_root(parent)->kernfs_rwsem);
857
858 spin_lock_irq(lock: &kernfs_pr_cont_lock);
859
860 len = strlcpy(p: kernfs_pr_cont_buf, q: path, size: sizeof(kernfs_pr_cont_buf));
861
862 if (len >= sizeof(kernfs_pr_cont_buf)) {
863 spin_unlock_irq(lock: &kernfs_pr_cont_lock);
864 return NULL;
865 }
866
867 p = kernfs_pr_cont_buf;
868
869 while ((name = strsep(&p, "/")) && parent) {
870 if (*name == '\0')
871 continue;
872 parent = kernfs_find_ns(parent, name, ns);
873 }
874
875 spin_unlock_irq(lock: &kernfs_pr_cont_lock);
876
877 return parent;
878}
879
880/**
881 * kernfs_find_and_get_ns - find and get kernfs_node with the given name
882 * @parent: kernfs_node to search under
883 * @name: name to look for
884 * @ns: the namespace tag to use
885 *
886 * Look for kernfs_node with name @name under @parent and get a reference
887 * if found. This function may sleep.
888 *
889 * Return: pointer to the found kernfs_node on success, %NULL on failure.
890 */
891struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
892 const char *name, const void *ns)
893{
894 struct kernfs_node *kn;
895 struct kernfs_root *root = kernfs_root(kn: parent);
896
897 down_read(sem: &root->kernfs_rwsem);
898 kn = kernfs_find_ns(parent, name, ns);
899 kernfs_get(kn);
900 up_read(sem: &root->kernfs_rwsem);
901
902 return kn;
903}
904EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns);
905
906/**
907 * kernfs_walk_and_get_ns - find and get kernfs_node with the given path
908 * @parent: kernfs_node to search under
909 * @path: path to look for
910 * @ns: the namespace tag to use
911 *
912 * Look for kernfs_node with path @path under @parent and get a reference
913 * if found. This function may sleep.
914 *
915 * Return: pointer to the found kernfs_node on success, %NULL on failure.
916 */
917struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
918 const char *path, const void *ns)
919{
920 struct kernfs_node *kn;
921 struct kernfs_root *root = kernfs_root(kn: parent);
922
923 down_read(sem: &root->kernfs_rwsem);
924 kn = kernfs_walk_ns(parent, path, ns);
925 kernfs_get(kn);
926 up_read(sem: &root->kernfs_rwsem);
927
928 return kn;
929}
930
931/**
932 * kernfs_create_root - create a new kernfs hierarchy
933 * @scops: optional syscall operations for the hierarchy
934 * @flags: KERNFS_ROOT_* flags
935 * @priv: opaque data associated with the new directory
936 *
937 * Return: the root of the new hierarchy on success, ERR_PTR() value on
938 * failure.
939 */
940struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
941 unsigned int flags, void *priv)
942{
943 struct kernfs_root *root;
944 struct kernfs_node *kn;
945
946 root = kzalloc(size: sizeof(*root), GFP_KERNEL);
947 if (!root)
948 return ERR_PTR(error: -ENOMEM);
949
950 idr_init(idr: &root->ino_idr);
951 init_rwsem(&root->kernfs_rwsem);
952 init_rwsem(&root->kernfs_iattr_rwsem);
953 init_rwsem(&root->kernfs_supers_rwsem);
954 INIT_LIST_HEAD(list: &root->supers);
955
956 /*
957 * On 64bit ino setups, id is ino. On 32bit, low 32bits are ino.
958 * High bits generation. The starting value for both ino and
959 * genenration is 1. Initialize upper 32bit allocation
960 * accordingly.
961 */
962 if (sizeof(ino_t) >= sizeof(u64))
963 root->id_highbits = 0;
964 else
965 root->id_highbits = 1;
966
967 kn = __kernfs_new_node(root, NULL, name: "", S_IFDIR | S_IRUGO | S_IXUGO,
968 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
969 flags: KERNFS_DIR);
970 if (!kn) {
971 idr_destroy(&root->ino_idr);
972 kfree(root);
973 return ERR_PTR(-ENOMEM);
974 }
975
976 kn->priv = priv;
977 kn->dir.root = root;
978
979 root->syscall_ops = scops;
980 root->flags = flags;
981 root->kn = kn;
982 init_waitqueue_head(&root->deactivate_waitq);
983
984 if (!(root->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
985 kernfs_activate(kn);
986
987 return root;
988}
989
990/**
991 * kernfs_destroy_root - destroy a kernfs hierarchy
992 * @root: root of the hierarchy to destroy
993 *
994 * Destroy the hierarchy anchored at @root by removing all existing
995 * directories and destroying @root.
996 */
997void kernfs_destroy_root(struct kernfs_root *root)
998{
999 /*
1000 * kernfs_remove holds kernfs_rwsem from the root so the root
1001 * shouldn't be freed during the operation.
1002 */
1003 kernfs_get(root->kn);
1004 kernfs_remove(kn: root->kn);
1005 kernfs_put(root->kn); /* will also free @root */
1006}
1007
1008/**
1009 * kernfs_root_to_node - return the kernfs_node associated with a kernfs_root
1010 * @root: root to use to lookup
1011 *
1012 * Return: @root's kernfs_node
1013 */
1014struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root)
1015{
1016 return root->kn;
1017}
1018
1019/**
1020 * kernfs_create_dir_ns - create a directory
1021 * @parent: parent in which to create a new directory
1022 * @name: name of the new directory
1023 * @mode: mode of the new directory
1024 * @uid: uid of the new directory
1025 * @gid: gid of the new directory
1026 * @priv: opaque data associated with the new directory
1027 * @ns: optional namespace tag of the directory
1028 *
1029 * Return: the created node on success, ERR_PTR() value on failure.
1030 */
1031struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
1032 const char *name, umode_t mode,
1033 kuid_t uid, kgid_t gid,
1034 void *priv, const void *ns)
1035{
1036 struct kernfs_node *kn;
1037 int rc;
1038
1039 /* allocate */
1040 kn = kernfs_new_node(parent, name, mode: mode | S_IFDIR,
1041 uid, gid, flags: KERNFS_DIR);
1042 if (!kn)
1043 return ERR_PTR(error: -ENOMEM);
1044
1045 kn->dir.root = parent->dir.root;
1046 kn->ns = ns;
1047 kn->priv = priv;
1048
1049 /* link in */
1050 rc = kernfs_add_one(kn);
1051 if (!rc)
1052 return kn;
1053
1054 kernfs_put(kn);
1055 return ERR_PTR(error: rc);
1056}
1057
1058/**
1059 * kernfs_create_empty_dir - create an always empty directory
1060 * @parent: parent in which to create a new directory
1061 * @name: name of the new directory
1062 *
1063 * Return: the created node on success, ERR_PTR() value on failure.
1064 */
1065struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
1066 const char *name)
1067{
1068 struct kernfs_node *kn;
1069 int rc;
1070
1071 /* allocate */
1072 kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR,
1073 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, flags: KERNFS_DIR);
1074 if (!kn)
1075 return ERR_PTR(-ENOMEM);
1076
1077 kn->flags |= KERNFS_EMPTY_DIR;
1078 kn->dir.root = parent->dir.root;
1079 kn->ns = NULL;
1080 kn->priv = NULL;
1081
1082 /* link in */
1083 rc = kernfs_add_one(kn);
1084 if (!rc)
1085 return kn;
1086
1087 kernfs_put(kn);
1088 return ERR_PTR(rc);
1089}
1090
1091static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
1092{
1093 struct kernfs_node *kn;
1094 struct kernfs_root *root;
1095
1096 if (flags & LOOKUP_RCU)
1097 return -ECHILD;
1098
1099 /* Negative hashed dentry? */
1100 if (d_really_is_negative(dentry)) {
1101 struct kernfs_node *parent;
1102
1103 /* If the kernfs parent node has changed discard and
1104 * proceed to ->lookup.
1105 *
1106 * There's nothing special needed here when getting the
1107 * dentry parent, even if a concurrent rename is in
1108 * progress. That's because the dentry is negative so
1109 * it can only be the target of the rename and it will
1110 * be doing a d_move() not a replace. Consequently the
1111 * dentry d_parent won't change over the d_move().
1112 *
1113 * Also kernfs negative dentries transitioning from
1114 * negative to positive during revalidate won't happen
1115 * because they are invalidated on containing directory
1116 * changes and the lookup re-done so that a new positive
1117 * dentry can be properly created.
1118 */
1119 root = kernfs_root_from_sb(sb: dentry->d_sb);
1120 down_read(sem: &root->kernfs_rwsem);
1121 parent = kernfs_dentry_node(dentry: dentry->d_parent);
1122 if (parent) {
1123 if (kernfs_dir_changed(parent, dentry)) {
1124 up_read(sem: &root->kernfs_rwsem);
1125 return 0;
1126 }
1127 }
1128 up_read(sem: &root->kernfs_rwsem);
1129
1130 /* The kernfs parent node hasn't changed, leave the
1131 * dentry negative and return success.
1132 */
1133 return 1;
1134 }
1135
1136 kn = kernfs_dentry_node(dentry);
1137 root = kernfs_root(kn);
1138 down_read(sem: &root->kernfs_rwsem);
1139
1140 /* The kernfs node has been deactivated */
1141 if (!kernfs_active(kn))
1142 goto out_bad;
1143
1144 /* The kernfs node has been moved? */
1145 if (kernfs_dentry_node(dentry: dentry->d_parent) != kn->parent)
1146 goto out_bad;
1147
1148 /* The kernfs node has been renamed */
1149 if (strcmp(dentry->d_name.name, kn->name) != 0)
1150 goto out_bad;
1151
1152 /* The kernfs node has been moved to a different namespace */
1153 if (kn->parent && kernfs_ns_enabled(kn: kn->parent) &&
1154 kernfs_info(dentry->d_sb)->ns != kn->ns)
1155 goto out_bad;
1156
1157 up_read(sem: &root->kernfs_rwsem);
1158 return 1;
1159out_bad:
1160 up_read(sem: &root->kernfs_rwsem);
1161 return 0;
1162}
1163
1164const struct dentry_operations kernfs_dops = {
1165 .d_revalidate = kernfs_dop_revalidate,
1166};
1167
1168static struct dentry *kernfs_iop_lookup(struct inode *dir,
1169 struct dentry *dentry,
1170 unsigned int flags)
1171{
1172 struct kernfs_node *parent = dir->i_private;
1173 struct kernfs_node *kn;
1174 struct kernfs_root *root;
1175 struct inode *inode = NULL;
1176 const void *ns = NULL;
1177
1178 root = kernfs_root(kn: parent);
1179 down_read(sem: &root->kernfs_rwsem);
1180 if (kernfs_ns_enabled(kn: parent))
1181 ns = kernfs_info(dir->i_sb)->ns;
1182
1183 kn = kernfs_find_ns(parent, name: dentry->d_name.name, ns);
1184 /* attach dentry and inode */
1185 if (kn) {
1186 /* Inactive nodes are invisible to the VFS so don't
1187 * create a negative.
1188 */
1189 if (!kernfs_active(kn)) {
1190 up_read(sem: &root->kernfs_rwsem);
1191 return NULL;
1192 }
1193 inode = kernfs_get_inode(sb: dir->i_sb, kn);
1194 if (!inode)
1195 inode = ERR_PTR(error: -ENOMEM);
1196 }
1197 /*
1198 * Needed for negative dentry validation.
1199 * The negative dentry can be created in kernfs_iop_lookup()
1200 * or transforms from positive dentry in dentry_unlink_inode()
1201 * called from vfs_rmdir().
1202 */
1203 if (!IS_ERR(ptr: inode))
1204 kernfs_set_rev(parent, dentry);
1205 up_read(sem: &root->kernfs_rwsem);
1206
1207 /* instantiate and hash (possibly negative) dentry */
1208 return d_splice_alias(inode, dentry);
1209}
1210
1211static int kernfs_iop_mkdir(struct mnt_idmap *idmap,
1212 struct inode *dir, struct dentry *dentry,
1213 umode_t mode)
1214{
1215 struct kernfs_node *parent = dir->i_private;
1216 struct kernfs_syscall_ops *scops = kernfs_root(kn: parent)->syscall_ops;
1217 int ret;
1218
1219 if (!scops || !scops->mkdir)
1220 return -EPERM;
1221
1222 if (!kernfs_get_active(kn: parent))
1223 return -ENODEV;
1224
1225 ret = scops->mkdir(parent, dentry->d_name.name, mode);
1226
1227 kernfs_put_active(kn: parent);
1228 return ret;
1229}
1230
1231static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
1232{
1233 struct kernfs_node *kn = kernfs_dentry_node(dentry);
1234 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
1235 int ret;
1236
1237 if (!scops || !scops->rmdir)
1238 return -EPERM;
1239
1240 if (!kernfs_get_active(kn))
1241 return -ENODEV;
1242
1243 ret = scops->rmdir(kn);
1244
1245 kernfs_put_active(kn);
1246 return ret;
1247}
1248
1249static int kernfs_iop_rename(struct mnt_idmap *idmap,
1250 struct inode *old_dir, struct dentry *old_dentry,
1251 struct inode *new_dir, struct dentry *new_dentry,
1252 unsigned int flags)
1253{
1254 struct kernfs_node *kn = kernfs_dentry_node(dentry: old_dentry);
1255 struct kernfs_node *new_parent = new_dir->i_private;
1256 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
1257 int ret;
1258
1259 if (flags)
1260 return -EINVAL;
1261
1262 if (!scops || !scops->rename)
1263 return -EPERM;
1264
1265 if (!kernfs_get_active(kn))
1266 return -ENODEV;
1267
1268 if (!kernfs_get_active(kn: new_parent)) {
1269 kernfs_put_active(kn);
1270 return -ENODEV;
1271 }
1272
1273 ret = scops->rename(kn, new_parent, new_dentry->d_name.name);
1274
1275 kernfs_put_active(kn: new_parent);
1276 kernfs_put_active(kn);
1277 return ret;
1278}
1279
1280const struct inode_operations kernfs_dir_iops = {
1281 .lookup = kernfs_iop_lookup,
1282 .permission = kernfs_iop_permission,
1283 .setattr = kernfs_iop_setattr,
1284 .getattr = kernfs_iop_getattr,
1285 .listxattr = kernfs_iop_listxattr,
1286
1287 .mkdir = kernfs_iop_mkdir,
1288 .rmdir = kernfs_iop_rmdir,
1289 .rename = kernfs_iop_rename,
1290};
1291
1292static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos)
1293{
1294 struct kernfs_node *last;
1295
1296 while (true) {
1297 struct rb_node *rbn;
1298
1299 last = pos;
1300
1301 if (kernfs_type(kn: pos) != KERNFS_DIR)
1302 break;
1303
1304 rbn = rb_first(&pos->dir.children);
1305 if (!rbn)
1306 break;
1307
1308 pos = rb_to_kn(rbn);
1309 }
1310
1311 return last;
1312}
1313
1314/**
1315 * kernfs_next_descendant_post - find the next descendant for post-order walk
1316 * @pos: the current position (%NULL to initiate traversal)
1317 * @root: kernfs_node whose descendants to walk
1318 *
1319 * Find the next descendant to visit for post-order traversal of @root's
1320 * descendants. @root is included in the iteration and the last node to be
1321 * visited.
1322 *
1323 * Return: the next descendant to visit or %NULL when done.
1324 */
1325static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
1326 struct kernfs_node *root)
1327{
1328 struct rb_node *rbn;
1329
1330 lockdep_assert_held_write(&kernfs_root(root)->kernfs_rwsem);
1331
1332 /* if first iteration, visit leftmost descendant which may be root */
1333 if (!pos)
1334 return kernfs_leftmost_descendant(pos: root);
1335
1336 /* if we visited @root, we're done */
1337 if (pos == root)
1338 return NULL;
1339
1340 /* if there's an unvisited sibling, visit its leftmost descendant */
1341 rbn = rb_next(&pos->rb);
1342 if (rbn)
1343 return kernfs_leftmost_descendant(rb_to_kn(rbn));
1344
1345 /* no sibling left, visit parent */
1346 return pos->parent;
1347}
1348
1349static void kernfs_activate_one(struct kernfs_node *kn)
1350{
1351 lockdep_assert_held_write(&kernfs_root(kn)->kernfs_rwsem);
1352
1353 kn->flags |= KERNFS_ACTIVATED;
1354
1355 if (kernfs_active(kn) || (kn->flags & (KERNFS_HIDDEN | KERNFS_REMOVING)))
1356 return;
1357
1358 WARN_ON_ONCE(kn->parent && RB_EMPTY_NODE(&kn->rb));
1359 WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
1360
1361 atomic_sub(KN_DEACTIVATED_BIAS, v: &kn->active);
1362}
1363
1364/**
1365 * kernfs_activate - activate a node which started deactivated
1366 * @kn: kernfs_node whose subtree is to be activated
1367 *
1368 * If the root has KERNFS_ROOT_CREATE_DEACTIVATED set, a newly created node
1369 * needs to be explicitly activated. A node which hasn't been activated
1370 * isn't visible to userland and deactivation is skipped during its
1371 * removal. This is useful to construct atomic init sequences where
1372 * creation of multiple nodes should either succeed or fail atomically.
1373 *
1374 * The caller is responsible for ensuring that this function is not called
1375 * after kernfs_remove*() is invoked on @kn.
1376 */
1377void kernfs_activate(struct kernfs_node *kn)
1378{
1379 struct kernfs_node *pos;
1380 struct kernfs_root *root = kernfs_root(kn);
1381
1382 down_write(sem: &root->kernfs_rwsem);
1383
1384 pos = NULL;
1385 while ((pos = kernfs_next_descendant_post(pos, root: kn)))
1386 kernfs_activate_one(kn: pos);
1387
1388 up_write(sem: &root->kernfs_rwsem);
1389}
1390
1391/**
1392 * kernfs_show - show or hide a node
1393 * @kn: kernfs_node to show or hide
1394 * @show: whether to show or hide
1395 *
1396 * If @show is %false, @kn is marked hidden and deactivated. A hidden node is
1397 * ignored in future activaitons. If %true, the mark is removed and activation
1398 * state is restored. This function won't implicitly activate a new node in a
1399 * %KERNFS_ROOT_CREATE_DEACTIVATED root which hasn't been activated yet.
1400 *
1401 * To avoid recursion complexities, directories aren't supported for now.
1402 */
1403void kernfs_show(struct kernfs_node *kn, bool show)
1404{
1405 struct kernfs_root *root = kernfs_root(kn);
1406
1407 if (WARN_ON_ONCE(kernfs_type(kn) == KERNFS_DIR))
1408 return;
1409
1410 down_write(sem: &root->kernfs_rwsem);
1411
1412 if (show) {
1413 kn->flags &= ~KERNFS_HIDDEN;
1414 if (kn->flags & KERNFS_ACTIVATED)
1415 kernfs_activate_one(kn);
1416 } else {
1417 kn->flags |= KERNFS_HIDDEN;
1418 if (kernfs_active(kn))
1419 atomic_add(KN_DEACTIVATED_BIAS, v: &kn->active);
1420 kernfs_drain(kn);
1421 }
1422
1423 up_write(sem: &root->kernfs_rwsem);
1424}
1425
1426static void __kernfs_remove(struct kernfs_node *kn)
1427{
1428 struct kernfs_node *pos;
1429
1430 /* Short-circuit if non-root @kn has already finished removal. */
1431 if (!kn)
1432 return;
1433
1434 lockdep_assert_held_write(&kernfs_root(kn)->kernfs_rwsem);
1435
1436 /*
1437 * This is for kernfs_remove_self() which plays with active ref
1438 * after removal.
1439 */
1440 if (kn->parent && RB_EMPTY_NODE(&kn->rb))
1441 return;
1442
1443 pr_debug("kernfs %s: removing\n", kn->name);
1444
1445 /* prevent new usage by marking all nodes removing and deactivating */
1446 pos = NULL;
1447 while ((pos = kernfs_next_descendant_post(pos, root: kn))) {
1448 pos->flags |= KERNFS_REMOVING;
1449 if (kernfs_active(kn: pos))
1450 atomic_add(KN_DEACTIVATED_BIAS, v: &pos->active);
1451 }
1452
1453 /* deactivate and unlink the subtree node-by-node */
1454 do {
1455 pos = kernfs_leftmost_descendant(pos: kn);
1456
1457 /*
1458 * kernfs_drain() may drop kernfs_rwsem temporarily and @pos's
1459 * base ref could have been put by someone else by the time
1460 * the function returns. Make sure it doesn't go away
1461 * underneath us.
1462 */
1463 kernfs_get(pos);
1464
1465 kernfs_drain(kn: pos);
1466
1467 /*
1468 * kernfs_unlink_sibling() succeeds once per node. Use it
1469 * to decide who's responsible for cleanups.
1470 */
1471 if (!pos->parent || kernfs_unlink_sibling(kn: pos)) {
1472 struct kernfs_iattrs *ps_iattr =
1473 pos->parent ? pos->parent->iattr : NULL;
1474
1475 /* update timestamps on the parent */
1476 down_write(sem: &kernfs_root(kn)->kernfs_iattr_rwsem);
1477
1478 if (ps_iattr) {
1479 ktime_get_real_ts64(tv: &ps_iattr->ia_ctime);
1480 ps_iattr->ia_mtime = ps_iattr->ia_ctime;
1481 }
1482
1483 up_write(sem: &kernfs_root(kn)->kernfs_iattr_rwsem);
1484 kernfs_put(pos);
1485 }
1486
1487 kernfs_put(pos);
1488 } while (pos != kn);
1489}
1490
1491/**
1492 * kernfs_remove - remove a kernfs_node recursively
1493 * @kn: the kernfs_node to remove
1494 *
1495 * Remove @kn along with all its subdirectories and files.
1496 */
1497void kernfs_remove(struct kernfs_node *kn)
1498{
1499 struct kernfs_root *root;
1500
1501 if (!kn)
1502 return;
1503
1504 root = kernfs_root(kn);
1505
1506 down_write(sem: &root->kernfs_rwsem);
1507 __kernfs_remove(kn);
1508 up_write(sem: &root->kernfs_rwsem);
1509}
1510
1511/**
1512 * kernfs_break_active_protection - break out of active protection
1513 * @kn: the self kernfs_node
1514 *
1515 * The caller must be running off of a kernfs operation which is invoked
1516 * with an active reference - e.g. one of kernfs_ops. Each invocation of
1517 * this function must also be matched with an invocation of
1518 * kernfs_unbreak_active_protection().
1519 *
1520 * This function releases the active reference of @kn the caller is
1521 * holding. Once this function is called, @kn may be removed at any point
1522 * and the caller is solely responsible for ensuring that the objects it
1523 * dereferences are accessible.
1524 */
1525void kernfs_break_active_protection(struct kernfs_node *kn)
1526{
1527 /*
1528 * Take out ourself out of the active ref dependency chain. If
1529 * we're called without an active ref, lockdep will complain.
1530 */
1531 kernfs_put_active(kn);
1532}
1533
1534/**
1535 * kernfs_unbreak_active_protection - undo kernfs_break_active_protection()
1536 * @kn: the self kernfs_node
1537 *
1538 * If kernfs_break_active_protection() was called, this function must be
1539 * invoked before finishing the kernfs operation. Note that while this
1540 * function restores the active reference, it doesn't and can't actually
1541 * restore the active protection - @kn may already or be in the process of
1542 * being removed. Once kernfs_break_active_protection() is invoked, that
1543 * protection is irreversibly gone for the kernfs operation instance.
1544 *
1545 * While this function may be called at any point after
1546 * kernfs_break_active_protection() is invoked, its most useful location
1547 * would be right before the enclosing kernfs operation returns.
1548 */
1549void kernfs_unbreak_active_protection(struct kernfs_node *kn)
1550{
1551 /*
1552 * @kn->active could be in any state; however, the increment we do
1553 * here will be undone as soon as the enclosing kernfs operation
1554 * finishes and this temporary bump can't break anything. If @kn
1555 * is alive, nothing changes. If @kn is being deactivated, the
1556 * soon-to-follow put will either finish deactivation or restore
1557 * deactivated state. If @kn is already removed, the temporary
1558 * bump is guaranteed to be gone before @kn is released.
1559 */
1560 atomic_inc(v: &kn->active);
1561 if (kernfs_lockdep(kn))
1562 rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_);
1563}
1564
1565/**
1566 * kernfs_remove_self - remove a kernfs_node from its own method
1567 * @kn: the self kernfs_node to remove
1568 *
1569 * The caller must be running off of a kernfs operation which is invoked
1570 * with an active reference - e.g. one of kernfs_ops. This can be used to
1571 * implement a file operation which deletes itself.
1572 *
1573 * For example, the "delete" file for a sysfs device directory can be
1574 * implemented by invoking kernfs_remove_self() on the "delete" file
1575 * itself. This function breaks the circular dependency of trying to
1576 * deactivate self while holding an active ref itself. It isn't necessary
1577 * to modify the usual removal path to use kernfs_remove_self(). The
1578 * "delete" implementation can simply invoke kernfs_remove_self() on self
1579 * before proceeding with the usual removal path. kernfs will ignore later
1580 * kernfs_remove() on self.
1581 *
1582 * kernfs_remove_self() can be called multiple times concurrently on the
1583 * same kernfs_node. Only the first one actually performs removal and
1584 * returns %true. All others will wait until the kernfs operation which
1585 * won self-removal finishes and return %false. Note that the losers wait
1586 * for the completion of not only the winning kernfs_remove_self() but also
1587 * the whole kernfs_ops which won the arbitration. This can be used to
1588 * guarantee, for example, all concurrent writes to a "delete" file to
1589 * finish only after the whole operation is complete.
1590 *
1591 * Return: %true if @kn is removed by this call, otherwise %false.
1592 */
1593bool kernfs_remove_self(struct kernfs_node *kn)
1594{
1595 bool ret;
1596 struct kernfs_root *root = kernfs_root(kn);
1597
1598 down_write(sem: &root->kernfs_rwsem);
1599 kernfs_break_active_protection(kn);
1600
1601 /*
1602 * SUICIDAL is used to arbitrate among competing invocations. Only
1603 * the first one will actually perform removal. When the removal
1604 * is complete, SUICIDED is set and the active ref is restored
1605 * while kernfs_rwsem for held exclusive. The ones which lost
1606 * arbitration waits for SUICIDED && drained which can happen only
1607 * after the enclosing kernfs operation which executed the winning
1608 * instance of kernfs_remove_self() finished.
1609 */
1610 if (!(kn->flags & KERNFS_SUICIDAL)) {
1611 kn->flags |= KERNFS_SUICIDAL;
1612 __kernfs_remove(kn);
1613 kn->flags |= KERNFS_SUICIDED;
1614 ret = true;
1615 } else {
1616 wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq;
1617 DEFINE_WAIT(wait);
1618
1619 while (true) {
1620 prepare_to_wait(wq_head: waitq, wq_entry: &wait, TASK_UNINTERRUPTIBLE);
1621
1622 if ((kn->flags & KERNFS_SUICIDED) &&
1623 atomic_read(v: &kn->active) == KN_DEACTIVATED_BIAS)
1624 break;
1625
1626 up_write(sem: &root->kernfs_rwsem);
1627 schedule();
1628 down_write(sem: &root->kernfs_rwsem);
1629 }
1630 finish_wait(wq_head: waitq, wq_entry: &wait);
1631 WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb));
1632 ret = false;
1633 }
1634
1635 /*
1636 * This must be done while kernfs_rwsem held exclusive; otherwise,
1637 * waiting for SUICIDED && deactivated could finish prematurely.
1638 */
1639 kernfs_unbreak_active_protection(kn);
1640
1641 up_write(sem: &root->kernfs_rwsem);
1642 return ret;
1643}
1644
1645/**
1646 * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it
1647 * @parent: parent of the target
1648 * @name: name of the kernfs_node to remove
1649 * @ns: namespace tag of the kernfs_node to remove
1650 *
1651 * Look for the kernfs_node with @name and @ns under @parent and remove it.
1652 *
1653 * Return: %0 on success, -ENOENT if such entry doesn't exist.
1654 */
1655int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
1656 const void *ns)
1657{
1658 struct kernfs_node *kn;
1659 struct kernfs_root *root;
1660
1661 if (!parent) {
1662 WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
1663 name);
1664 return -ENOENT;
1665 }
1666
1667 root = kernfs_root(kn: parent);
1668 down_write(sem: &root->kernfs_rwsem);
1669
1670 kn = kernfs_find_ns(parent, name, ns);
1671 if (kn) {
1672 kernfs_get(kn);
1673 __kernfs_remove(kn);
1674 kernfs_put(kn);
1675 }
1676
1677 up_write(sem: &root->kernfs_rwsem);
1678
1679 if (kn)
1680 return 0;
1681 else
1682 return -ENOENT;
1683}
1684
1685/**
1686 * kernfs_rename_ns - move and rename a kernfs_node
1687 * @kn: target node
1688 * @new_parent: new parent to put @sd under
1689 * @new_name: new name
1690 * @new_ns: new namespace tag
1691 *
1692 * Return: %0 on success, -errno on failure.
1693 */
1694int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
1695 const char *new_name, const void *new_ns)
1696{
1697 struct kernfs_node *old_parent;
1698 struct kernfs_root *root;
1699 const char *old_name = NULL;
1700 int error;
1701
1702 /* can't move or rename root */
1703 if (!kn->parent)
1704 return -EINVAL;
1705
1706 root = kernfs_root(kn);
1707 down_write(sem: &root->kernfs_rwsem);
1708
1709 error = -ENOENT;
1710 if (!kernfs_active(kn) || !kernfs_active(kn: new_parent) ||
1711 (new_parent->flags & KERNFS_EMPTY_DIR))
1712 goto out;
1713
1714 error = 0;
1715 if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
1716 (strcmp(kn->name, new_name) == 0))
1717 goto out; /* nothing to rename */
1718
1719 error = -EEXIST;
1720 if (kernfs_find_ns(parent: new_parent, name: new_name, ns: new_ns))
1721 goto out;
1722
1723 /* rename kernfs_node */
1724 if (strcmp(kn->name, new_name) != 0) {
1725 error = -ENOMEM;
1726 new_name = kstrdup_const(s: new_name, GFP_KERNEL);
1727 if (!new_name)
1728 goto out;
1729 } else {
1730 new_name = NULL;
1731 }
1732
1733 /*
1734 * Move to the appropriate place in the appropriate directories rbtree.
1735 */
1736 kernfs_unlink_sibling(kn);
1737 kernfs_get(new_parent);
1738
1739 /* rename_lock protects ->parent and ->name accessors */
1740 write_lock_irq(&kernfs_rename_lock);
1741
1742 old_parent = kn->parent;
1743 kn->parent = new_parent;
1744
1745 kn->ns = new_ns;
1746 if (new_name) {
1747 old_name = kn->name;
1748 kn->name = new_name;
1749 }
1750
1751 write_unlock_irq(&kernfs_rename_lock);
1752
1753 kn->hash = kernfs_name_hash(name: kn->name, ns: kn->ns);
1754 kernfs_link_sibling(kn);
1755
1756 kernfs_put(old_parent);
1757 kfree_const(x: old_name);
1758
1759 error = 0;
1760 out:
1761 up_write(sem: &root->kernfs_rwsem);
1762 return error;
1763}
1764
1765static int kernfs_dir_fop_release(struct inode *inode, struct file *filp)
1766{
1767 kernfs_put(filp->private_data);
1768 return 0;
1769}
1770
1771static struct kernfs_node *kernfs_dir_pos(const void *ns,
1772 struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos)
1773{
1774 if (pos) {
1775 int valid = kernfs_active(kn: pos) &&
1776 pos->parent == parent && hash == pos->hash;
1777 kernfs_put(pos);
1778 if (!valid)
1779 pos = NULL;
1780 }
1781 if (!pos && (hash > 1) && (hash < INT_MAX)) {
1782 struct rb_node *node = parent->dir.children.rb_node;
1783 while (node) {
1784 pos = rb_to_kn(node);
1785
1786 if (hash < pos->hash)
1787 node = node->rb_left;
1788 else if (hash > pos->hash)
1789 node = node->rb_right;
1790 else
1791 break;
1792 }
1793 }
1794 /* Skip over entries which are dying/dead or in the wrong namespace */
1795 while (pos && (!kernfs_active(kn: pos) || pos->ns != ns)) {
1796 struct rb_node *node = rb_next(&pos->rb);
1797 if (!node)
1798 pos = NULL;
1799 else
1800 pos = rb_to_kn(node);
1801 }
1802 return pos;
1803}
1804
1805static struct kernfs_node *kernfs_dir_next_pos(const void *ns,
1806 struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos)
1807{
1808 pos = kernfs_dir_pos(ns, parent, hash: ino, pos);
1809 if (pos) {
1810 do {
1811 struct rb_node *node = rb_next(&pos->rb);
1812 if (!node)
1813 pos = NULL;
1814 else
1815 pos = rb_to_kn(node);
1816 } while (pos && (!kernfs_active(kn: pos) || pos->ns != ns));
1817 }
1818 return pos;
1819}
1820
1821static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
1822{
1823 struct dentry *dentry = file->f_path.dentry;
1824 struct kernfs_node *parent = kernfs_dentry_node(dentry);
1825 struct kernfs_node *pos = file->private_data;
1826 struct kernfs_root *root;
1827 const void *ns = NULL;
1828
1829 if (!dir_emit_dots(file, ctx))
1830 return 0;
1831
1832 root = kernfs_root(kn: parent);
1833 down_read(sem: &root->kernfs_rwsem);
1834
1835 if (kernfs_ns_enabled(kn: parent))
1836 ns = kernfs_info(dentry->d_sb)->ns;
1837
1838 for (pos = kernfs_dir_pos(ns, parent, hash: ctx->pos, pos);
1839 pos;
1840 pos = kernfs_dir_next_pos(ns, parent, ino: ctx->pos, pos)) {
1841 const char *name = pos->name;
1842 unsigned int type = fs_umode_to_dtype(mode: pos->mode);
1843 int len = strlen(name);
1844 ino_t ino = kernfs_ino(kn: pos);
1845
1846 ctx->pos = pos->hash;
1847 file->private_data = pos;
1848 kernfs_get(pos);
1849
1850 up_read(sem: &root->kernfs_rwsem);
1851 if (!dir_emit(ctx, name, namelen: len, ino, type))
1852 return 0;
1853 down_read(sem: &root->kernfs_rwsem);
1854 }
1855 up_read(sem: &root->kernfs_rwsem);
1856 file->private_data = NULL;
1857 ctx->pos = INT_MAX;
1858 return 0;
1859}
1860
1861const struct file_operations kernfs_dir_fops = {
1862 .read = generic_read_dir,
1863 .iterate_shared = kernfs_fop_readdir,
1864 .release = kernfs_dir_fop_release,
1865 .llseek = generic_file_llseek,
1866};
1867

source code of linux/fs/kernfs/dir.c