1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * drivers/base/core.c - core driver model code (device registration, etc) |
4 | * |
5 | * Copyright (c) 2002-3 Patrick Mochel |
6 | * Copyright (c) 2002-3 Open Source Development Labs |
7 | * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de> |
8 | * Copyright (c) 2006 Novell, Inc. |
9 | */ |
10 | |
11 | #include <linux/acpi.h> |
12 | #include <linux/device.h> |
13 | #include <linux/err.h> |
14 | #include <linux/fwnode.h> |
15 | #include <linux/init.h> |
16 | #include <linux/module.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/string.h> |
19 | #include <linux/kdev_t.h> |
20 | #include <linux/notifier.h> |
21 | #include <linux/of.h> |
22 | #include <linux/of_device.h> |
23 | #include <linux/genhd.h> |
24 | #include <linux/mutex.h> |
25 | #include <linux/pm_runtime.h> |
26 | #include <linux/netdevice.h> |
27 | #include <linux/sched/signal.h> |
28 | #include <linux/sysfs.h> |
29 | |
30 | #include "base.h" |
31 | #include "power/power.h" |
32 | |
33 | #ifdef CONFIG_SYSFS_DEPRECATED |
34 | #ifdef CONFIG_SYSFS_DEPRECATED_V2 |
35 | long sysfs_deprecated = 1; |
36 | #else |
37 | long sysfs_deprecated = 0; |
38 | #endif |
39 | static int __init sysfs_deprecated_setup(char *arg) |
40 | { |
41 | return kstrtol(arg, 10, &sysfs_deprecated); |
42 | } |
43 | early_param("sysfs.deprecated" , sysfs_deprecated_setup); |
44 | #endif |
45 | |
46 | /* Device links support. */ |
47 | |
48 | #ifdef CONFIG_SRCU |
49 | static DEFINE_MUTEX(device_links_lock); |
50 | DEFINE_STATIC_SRCU(device_links_srcu); |
51 | |
52 | static inline void device_links_write_lock(void) |
53 | { |
54 | mutex_lock(&device_links_lock); |
55 | } |
56 | |
57 | static inline void device_links_write_unlock(void) |
58 | { |
59 | mutex_unlock(&device_links_lock); |
60 | } |
61 | |
62 | int device_links_read_lock(void) |
63 | { |
64 | return srcu_read_lock(&device_links_srcu); |
65 | } |
66 | |
67 | void device_links_read_unlock(int idx) |
68 | { |
69 | srcu_read_unlock(&device_links_srcu, idx); |
70 | } |
71 | #else /* !CONFIG_SRCU */ |
72 | static DECLARE_RWSEM(device_links_lock); |
73 | |
74 | static inline void device_links_write_lock(void) |
75 | { |
76 | down_write(&device_links_lock); |
77 | } |
78 | |
79 | static inline void device_links_write_unlock(void) |
80 | { |
81 | up_write(&device_links_lock); |
82 | } |
83 | |
84 | int device_links_read_lock(void) |
85 | { |
86 | down_read(&device_links_lock); |
87 | return 0; |
88 | } |
89 | |
90 | void device_links_read_unlock(int not_used) |
91 | { |
92 | up_read(&device_links_lock); |
93 | } |
94 | #endif /* !CONFIG_SRCU */ |
95 | |
96 | /** |
97 | * device_is_dependent - Check if one device depends on another one |
98 | * @dev: Device to check dependencies for. |
99 | * @target: Device to check against. |
100 | * |
101 | * Check if @target depends on @dev or any device dependent on it (its child or |
102 | * its consumer etc). Return 1 if that is the case or 0 otherwise. |
103 | */ |
104 | static int device_is_dependent(struct device *dev, void *target) |
105 | { |
106 | struct device_link *link; |
107 | int ret; |
108 | |
109 | if (dev == target) |
110 | return 1; |
111 | |
112 | ret = device_for_each_child(dev, target, device_is_dependent); |
113 | if (ret) |
114 | return ret; |
115 | |
116 | list_for_each_entry(link, &dev->links.consumers, s_node) { |
117 | if (link->consumer == target) |
118 | return 1; |
119 | |
120 | ret = device_is_dependent(link->consumer, target); |
121 | if (ret) |
122 | break; |
123 | } |
124 | return ret; |
125 | } |
126 | |
127 | static int device_reorder_to_tail(struct device *dev, void *not_used) |
128 | { |
129 | struct device_link *link; |
130 | |
131 | /* |
132 | * Devices that have not been registered yet will be put to the ends |
133 | * of the lists during the registration, so skip them here. |
134 | */ |
135 | if (device_is_registered(dev)) |
136 | devices_kset_move_last(dev); |
137 | |
138 | if (device_pm_initialized(dev)) |
139 | device_pm_move_last(dev); |
140 | |
141 | device_for_each_child(dev, NULL, device_reorder_to_tail); |
142 | list_for_each_entry(link, &dev->links.consumers, s_node) |
143 | device_reorder_to_tail(link->consumer, NULL); |
144 | |
145 | return 0; |
146 | } |
147 | |
148 | /** |
149 | * device_pm_move_to_tail - Move set of devices to the end of device lists |
150 | * @dev: Device to move |
151 | * |
152 | * This is a device_reorder_to_tail() wrapper taking the requisite locks. |
153 | * |
154 | * It moves the @dev along with all of its children and all of its consumers |
155 | * to the ends of the device_kset and dpm_list, recursively. |
156 | */ |
157 | void device_pm_move_to_tail(struct device *dev) |
158 | { |
159 | int idx; |
160 | |
161 | idx = device_links_read_lock(); |
162 | device_pm_lock(); |
163 | device_reorder_to_tail(dev, NULL); |
164 | device_pm_unlock(); |
165 | device_links_read_unlock(idx); |
166 | } |
167 | |
168 | /** |
169 | * device_link_add - Create a link between two devices. |
170 | * @consumer: Consumer end of the link. |
171 | * @supplier: Supplier end of the link. |
172 | * @flags: Link flags. |
173 | * |
174 | * The caller is responsible for the proper synchronization of the link creation |
175 | * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the |
176 | * runtime PM framework to take the link into account. Second, if the |
177 | * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will |
178 | * be forced into the active metastate and reference-counted upon the creation |
179 | * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be |
180 | * ignored. |
181 | * |
182 | * If DL_FLAG_STATELESS is set in @flags, the link is not going to be managed by |
183 | * the driver core and, in particular, the caller of this function is expected |
184 | * to drop the reference to the link acquired by it directly. |
185 | * |
186 | * If that flag is not set, however, the caller of this function is handing the |
187 | * management of the link over to the driver core entirely and its return value |
188 | * can only be used to check whether or not the link is present. In that case, |
189 | * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link |
190 | * flags can be used to indicate to the driver core when the link can be safely |
191 | * deleted. Namely, setting one of them in @flags indicates to the driver core |
192 | * that the link is not going to be used (by the given caller of this function) |
193 | * after unbinding the consumer or supplier driver, respectively, from its |
194 | * device, so the link can be deleted at that point. If none of them is set, |
195 | * the link will be maintained until one of the devices pointed to by it (either |
196 | * the consumer or the supplier) is unregistered. |
197 | * |
198 | * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and |
199 | * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent |
200 | * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can |
201 | * be used to request the driver core to automaticall probe for a consmer |
202 | * driver after successfully binding a driver to the supplier device. |
203 | * |
204 | * The combination of DL_FLAG_STATELESS and either DL_FLAG_AUTOREMOVE_CONSUMER |
205 | * or DL_FLAG_AUTOREMOVE_SUPPLIER set in @flags at the same time is invalid and |
206 | * will cause NULL to be returned upfront. |
207 | * |
208 | * A side effect of the link creation is re-ordering of dpm_list and the |
209 | * devices_kset list by moving the consumer device and all devices depending |
210 | * on it to the ends of these lists (that does not happen to devices that have |
211 | * not been registered when this function is called). |
212 | * |
213 | * The supplier device is required to be registered when this function is called |
214 | * and NULL will be returned if that is not the case. The consumer device need |
215 | * not be registered, however. |
216 | */ |
217 | struct device_link *device_link_add(struct device *consumer, |
218 | struct device *supplier, u32 flags) |
219 | { |
220 | struct device_link *link; |
221 | |
222 | if (!consumer || !supplier || |
223 | (flags & DL_FLAG_STATELESS && |
224 | flags & (DL_FLAG_AUTOREMOVE_CONSUMER | |
225 | DL_FLAG_AUTOREMOVE_SUPPLIER | |
226 | DL_FLAG_AUTOPROBE_CONSUMER)) || |
227 | (flags & DL_FLAG_AUTOPROBE_CONSUMER && |
228 | flags & (DL_FLAG_AUTOREMOVE_CONSUMER | |
229 | DL_FLAG_AUTOREMOVE_SUPPLIER))) |
230 | return NULL; |
231 | |
232 | if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) { |
233 | if (pm_runtime_get_sync(supplier) < 0) { |
234 | pm_runtime_put_noidle(supplier); |
235 | return NULL; |
236 | } |
237 | } |
238 | |
239 | device_links_write_lock(); |
240 | device_pm_lock(); |
241 | |
242 | /* |
243 | * If the supplier has not been fully registered yet or there is a |
244 | * reverse dependency between the consumer and the supplier already in |
245 | * the graph, return NULL. |
246 | */ |
247 | if (!device_pm_initialized(supplier) |
248 | || device_is_dependent(consumer, supplier)) { |
249 | link = NULL; |
250 | goto out; |
251 | } |
252 | |
253 | /* |
254 | * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed |
255 | * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both |
256 | * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER. |
257 | */ |
258 | if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) |
259 | flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; |
260 | |
261 | list_for_each_entry(link, &supplier->links.consumers, s_node) { |
262 | if (link->consumer != consumer) |
263 | continue; |
264 | |
265 | /* |
266 | * Don't return a stateless link if the caller wants a stateful |
267 | * one and vice versa. |
268 | */ |
269 | if (WARN_ON((flags & DL_FLAG_STATELESS) != (link->flags & DL_FLAG_STATELESS))) { |
270 | link = NULL; |
271 | goto out; |
272 | } |
273 | |
274 | if (flags & DL_FLAG_PM_RUNTIME) { |
275 | if (!(link->flags & DL_FLAG_PM_RUNTIME)) { |
276 | pm_runtime_new_link(consumer); |
277 | link->flags |= DL_FLAG_PM_RUNTIME; |
278 | } |
279 | if (flags & DL_FLAG_RPM_ACTIVE) |
280 | refcount_inc(&link->rpm_active); |
281 | } |
282 | |
283 | if (flags & DL_FLAG_STATELESS) { |
284 | kref_get(&link->kref); |
285 | goto out; |
286 | } |
287 | |
288 | /* |
289 | * If the life time of the link following from the new flags is |
290 | * longer than indicated by the flags of the existing link, |
291 | * update the existing link to stay around longer. |
292 | */ |
293 | if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { |
294 | if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { |
295 | link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; |
296 | link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; |
297 | } |
298 | } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) { |
299 | link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | |
300 | DL_FLAG_AUTOREMOVE_SUPPLIER); |
301 | } |
302 | goto out; |
303 | } |
304 | |
305 | link = kzalloc(sizeof(*link), GFP_KERNEL); |
306 | if (!link) |
307 | goto out; |
308 | |
309 | refcount_set(&link->rpm_active, 1); |
310 | |
311 | if (flags & DL_FLAG_PM_RUNTIME) { |
312 | if (flags & DL_FLAG_RPM_ACTIVE) |
313 | refcount_inc(&link->rpm_active); |
314 | |
315 | pm_runtime_new_link(consumer); |
316 | } |
317 | |
318 | get_device(supplier); |
319 | link->supplier = supplier; |
320 | INIT_LIST_HEAD(&link->s_node); |
321 | get_device(consumer); |
322 | link->consumer = consumer; |
323 | INIT_LIST_HEAD(&link->c_node); |
324 | link->flags = flags; |
325 | kref_init(&link->kref); |
326 | |
327 | /* Determine the initial link state. */ |
328 | if (flags & DL_FLAG_STATELESS) { |
329 | link->status = DL_STATE_NONE; |
330 | } else { |
331 | switch (supplier->links.status) { |
332 | case DL_DEV_PROBING: |
333 | switch (consumer->links.status) { |
334 | case DL_DEV_PROBING: |
335 | /* |
336 | * A consumer driver can create a link to a |
337 | * supplier that has not completed its probing |
338 | * yet as long as it knows that the supplier is |
339 | * already functional (for example, it has just |
340 | * acquired some resources from the supplier). |
341 | */ |
342 | link->status = DL_STATE_CONSUMER_PROBE; |
343 | break; |
344 | default: |
345 | link->status = DL_STATE_DORMANT; |
346 | break; |
347 | } |
348 | break; |
349 | case DL_DEV_DRIVER_BOUND: |
350 | switch (consumer->links.status) { |
351 | case DL_DEV_PROBING: |
352 | link->status = DL_STATE_CONSUMER_PROBE; |
353 | break; |
354 | case DL_DEV_DRIVER_BOUND: |
355 | link->status = DL_STATE_ACTIVE; |
356 | break; |
357 | default: |
358 | link->status = DL_STATE_AVAILABLE; |
359 | break; |
360 | } |
361 | break; |
362 | case DL_DEV_UNBINDING: |
363 | link->status = DL_STATE_SUPPLIER_UNBIND; |
364 | break; |
365 | default: |
366 | link->status = DL_STATE_DORMANT; |
367 | break; |
368 | } |
369 | } |
370 | |
371 | /* |
372 | * Some callers expect the link creation during consumer driver probe to |
373 | * resume the supplier even without DL_FLAG_RPM_ACTIVE. |
374 | */ |
375 | if (link->status == DL_STATE_CONSUMER_PROBE && |
376 | flags & DL_FLAG_PM_RUNTIME) |
377 | pm_runtime_resume(supplier); |
378 | |
379 | /* |
380 | * Move the consumer and all of the devices depending on it to the end |
381 | * of dpm_list and the devices_kset list. |
382 | * |
383 | * It is necessary to hold dpm_list locked throughout all that or else |
384 | * we may end up suspending with a wrong ordering of it. |
385 | */ |
386 | device_reorder_to_tail(consumer, NULL); |
387 | |
388 | list_add_tail_rcu(&link->s_node, &supplier->links.consumers); |
389 | list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); |
390 | |
391 | dev_dbg(consumer, "Linked as a consumer to %s\n" , dev_name(supplier)); |
392 | |
393 | out: |
394 | device_pm_unlock(); |
395 | device_links_write_unlock(); |
396 | |
397 | if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link) |
398 | pm_runtime_put(supplier); |
399 | |
400 | return link; |
401 | } |
402 | EXPORT_SYMBOL_GPL(device_link_add); |
403 | |
404 | static void device_link_free(struct device_link *link) |
405 | { |
406 | while (refcount_dec_not_one(&link->rpm_active)) |
407 | pm_runtime_put(link->supplier); |
408 | |
409 | put_device(link->consumer); |
410 | put_device(link->supplier); |
411 | kfree(link); |
412 | } |
413 | |
414 | #ifdef CONFIG_SRCU |
415 | static void __device_link_free_srcu(struct rcu_head *rhead) |
416 | { |
417 | device_link_free(container_of(rhead, struct device_link, rcu_head)); |
418 | } |
419 | |
420 | static void __device_link_del(struct kref *kref) |
421 | { |
422 | struct device_link *link = container_of(kref, struct device_link, kref); |
423 | |
424 | dev_dbg(link->consumer, "Dropping the link to %s\n" , |
425 | dev_name(link->supplier)); |
426 | |
427 | if (link->flags & DL_FLAG_PM_RUNTIME) |
428 | pm_runtime_drop_link(link->consumer); |
429 | |
430 | list_del_rcu(&link->s_node); |
431 | list_del_rcu(&link->c_node); |
432 | call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu); |
433 | } |
434 | #else /* !CONFIG_SRCU */ |
435 | static void __device_link_del(struct kref *kref) |
436 | { |
437 | struct device_link *link = container_of(kref, struct device_link, kref); |
438 | |
439 | dev_info(link->consumer, "Dropping the link to %s\n" , |
440 | dev_name(link->supplier)); |
441 | |
442 | if (link->flags & DL_FLAG_PM_RUNTIME) |
443 | pm_runtime_drop_link(link->consumer); |
444 | |
445 | list_del(&link->s_node); |
446 | list_del(&link->c_node); |
447 | device_link_free(link); |
448 | } |
449 | #endif /* !CONFIG_SRCU */ |
450 | |
451 | static void device_link_put_kref(struct device_link *link) |
452 | { |
453 | if (link->flags & DL_FLAG_STATELESS) |
454 | kref_put(&link->kref, __device_link_del); |
455 | else |
456 | WARN(1, "Unable to drop a managed device link reference\n" ); |
457 | } |
458 | |
459 | /** |
460 | * device_link_del - Delete a stateless link between two devices. |
461 | * @link: Device link to delete. |
462 | * |
463 | * The caller must ensure proper synchronization of this function with runtime |
464 | * PM. If the link was added multiple times, it needs to be deleted as often. |
465 | * Care is required for hotplugged devices: Their links are purged on removal |
466 | * and calling device_link_del() is then no longer allowed. |
467 | */ |
468 | void device_link_del(struct device_link *link) |
469 | { |
470 | device_links_write_lock(); |
471 | device_pm_lock(); |
472 | device_link_put_kref(link); |
473 | device_pm_unlock(); |
474 | device_links_write_unlock(); |
475 | } |
476 | EXPORT_SYMBOL_GPL(device_link_del); |
477 | |
478 | /** |
479 | * device_link_remove - Delete a stateless link between two devices. |
480 | * @consumer: Consumer end of the link. |
481 | * @supplier: Supplier end of the link. |
482 | * |
483 | * The caller must ensure proper synchronization of this function with runtime |
484 | * PM. |
485 | */ |
486 | void device_link_remove(void *consumer, struct device *supplier) |
487 | { |
488 | struct device_link *link; |
489 | |
490 | if (WARN_ON(consumer == supplier)) |
491 | return; |
492 | |
493 | device_links_write_lock(); |
494 | device_pm_lock(); |
495 | |
496 | list_for_each_entry(link, &supplier->links.consumers, s_node) { |
497 | if (link->consumer == consumer) { |
498 | device_link_put_kref(link); |
499 | break; |
500 | } |
501 | } |
502 | |
503 | device_pm_unlock(); |
504 | device_links_write_unlock(); |
505 | } |
506 | EXPORT_SYMBOL_GPL(device_link_remove); |
507 | |
508 | static void device_links_missing_supplier(struct device *dev) |
509 | { |
510 | struct device_link *link; |
511 | |
512 | list_for_each_entry(link, &dev->links.suppliers, c_node) |
513 | if (link->status == DL_STATE_CONSUMER_PROBE) |
514 | WRITE_ONCE(link->status, DL_STATE_AVAILABLE); |
515 | } |
516 | |
517 | /** |
518 | * device_links_check_suppliers - Check presence of supplier drivers. |
519 | * @dev: Consumer device. |
520 | * |
521 | * Check links from this device to any suppliers. Walk the list of the device's |
522 | * links to suppliers and see if all of them are available. If not, simply |
523 | * return -EPROBE_DEFER. |
524 | * |
525 | * We need to guarantee that the supplier will not go away after the check has |
526 | * been positive here. It only can go away in __device_release_driver() and |
527 | * that function checks the device's links to consumers. This means we need to |
528 | * mark the link as "consumer probe in progress" to make the supplier removal |
529 | * wait for us to complete (or bad things may happen). |
530 | * |
531 | * Links with the DL_FLAG_STATELESS flag set are ignored. |
532 | */ |
533 | int device_links_check_suppliers(struct device *dev) |
534 | { |
535 | struct device_link *link; |
536 | int ret = 0; |
537 | |
538 | device_links_write_lock(); |
539 | |
540 | list_for_each_entry(link, &dev->links.suppliers, c_node) { |
541 | if (link->flags & DL_FLAG_STATELESS) |
542 | continue; |
543 | |
544 | if (link->status != DL_STATE_AVAILABLE) { |
545 | device_links_missing_supplier(dev); |
546 | ret = -EPROBE_DEFER; |
547 | break; |
548 | } |
549 | WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); |
550 | } |
551 | dev->links.status = DL_DEV_PROBING; |
552 | |
553 | device_links_write_unlock(); |
554 | return ret; |
555 | } |
556 | |
557 | /** |
558 | * device_links_driver_bound - Update device links after probing its driver. |
559 | * @dev: Device to update the links for. |
560 | * |
561 | * The probe has been successful, so update links from this device to any |
562 | * consumers by changing their status to "available". |
563 | * |
564 | * Also change the status of @dev's links to suppliers to "active". |
565 | * |
566 | * Links with the DL_FLAG_STATELESS flag set are ignored. |
567 | */ |
568 | void device_links_driver_bound(struct device *dev) |
569 | { |
570 | struct device_link *link; |
571 | |
572 | device_links_write_lock(); |
573 | |
574 | list_for_each_entry(link, &dev->links.consumers, s_node) { |
575 | if (link->flags & DL_FLAG_STATELESS) |
576 | continue; |
577 | |
578 | /* |
579 | * Links created during consumer probe may be in the "consumer |
580 | * probe" state to start with if the supplier is still probing |
581 | * when they are created and they may become "active" if the |
582 | * consumer probe returns first. Skip them here. |
583 | */ |
584 | if (link->status == DL_STATE_CONSUMER_PROBE || |
585 | link->status == DL_STATE_ACTIVE) |
586 | continue; |
587 | |
588 | WARN_ON(link->status != DL_STATE_DORMANT); |
589 | WRITE_ONCE(link->status, DL_STATE_AVAILABLE); |
590 | |
591 | if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) |
592 | driver_deferred_probe_add(link->consumer); |
593 | } |
594 | |
595 | list_for_each_entry(link, &dev->links.suppliers, c_node) { |
596 | if (link->flags & DL_FLAG_STATELESS) |
597 | continue; |
598 | |
599 | WARN_ON(link->status != DL_STATE_CONSUMER_PROBE); |
600 | WRITE_ONCE(link->status, DL_STATE_ACTIVE); |
601 | } |
602 | |
603 | dev->links.status = DL_DEV_DRIVER_BOUND; |
604 | |
605 | device_links_write_unlock(); |
606 | } |
607 | |
608 | /** |
609 | * __device_links_no_driver - Update links of a device without a driver. |
610 | * @dev: Device without a drvier. |
611 | * |
612 | * Delete all non-persistent links from this device to any suppliers. |
613 | * |
614 | * Persistent links stay around, but their status is changed to "available", |
615 | * unless they already are in the "supplier unbind in progress" state in which |
616 | * case they need not be updated. |
617 | * |
618 | * Links with the DL_FLAG_STATELESS flag set are ignored. |
619 | */ |
620 | static void __device_links_no_driver(struct device *dev) |
621 | { |
622 | struct device_link *link, *ln; |
623 | |
624 | list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { |
625 | if (link->flags & DL_FLAG_STATELESS) |
626 | continue; |
627 | |
628 | if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) |
629 | __device_link_del(&link->kref); |
630 | else if (link->status == DL_STATE_CONSUMER_PROBE || |
631 | link->status == DL_STATE_ACTIVE) |
632 | WRITE_ONCE(link->status, DL_STATE_AVAILABLE); |
633 | } |
634 | |
635 | dev->links.status = DL_DEV_NO_DRIVER; |
636 | } |
637 | |
638 | /** |
639 | * device_links_no_driver - Update links after failing driver probe. |
640 | * @dev: Device whose driver has just failed to probe. |
641 | * |
642 | * Clean up leftover links to consumers for @dev and invoke |
643 | * %__device_links_no_driver() to update links to suppliers for it as |
644 | * appropriate. |
645 | * |
646 | * Links with the DL_FLAG_STATELESS flag set are ignored. |
647 | */ |
648 | void device_links_no_driver(struct device *dev) |
649 | { |
650 | struct device_link *link; |
651 | |
652 | device_links_write_lock(); |
653 | |
654 | list_for_each_entry(link, &dev->links.consumers, s_node) { |
655 | if (link->flags & DL_FLAG_STATELESS) |
656 | continue; |
657 | |
658 | /* |
659 | * The probe has failed, so if the status of the link is |
660 | * "consumer probe" or "active", it must have been added by |
661 | * a probing consumer while this device was still probing. |
662 | * Change its state to "dormant", as it represents a valid |
663 | * relationship, but it is not functionally meaningful. |
664 | */ |
665 | if (link->status == DL_STATE_CONSUMER_PROBE || |
666 | link->status == DL_STATE_ACTIVE) |
667 | WRITE_ONCE(link->status, DL_STATE_DORMANT); |
668 | } |
669 | |
670 | __device_links_no_driver(dev); |
671 | |
672 | device_links_write_unlock(); |
673 | } |
674 | |
675 | /** |
676 | * device_links_driver_cleanup - Update links after driver removal. |
677 | * @dev: Device whose driver has just gone away. |
678 | * |
679 | * Update links to consumers for @dev by changing their status to "dormant" and |
680 | * invoke %__device_links_no_driver() to update links to suppliers for it as |
681 | * appropriate. |
682 | * |
683 | * Links with the DL_FLAG_STATELESS flag set are ignored. |
684 | */ |
685 | void device_links_driver_cleanup(struct device *dev) |
686 | { |
687 | struct device_link *link, *ln; |
688 | |
689 | device_links_write_lock(); |
690 | |
691 | list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { |
692 | if (link->flags & DL_FLAG_STATELESS) |
693 | continue; |
694 | |
695 | WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); |
696 | WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND); |
697 | |
698 | /* |
699 | * autoremove the links between this @dev and its consumer |
700 | * devices that are not active, i.e. where the link state |
701 | * has moved to DL_STATE_SUPPLIER_UNBIND. |
702 | */ |
703 | if (link->status == DL_STATE_SUPPLIER_UNBIND && |
704 | link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) |
705 | __device_link_del(&link->kref); |
706 | |
707 | WRITE_ONCE(link->status, DL_STATE_DORMANT); |
708 | } |
709 | |
710 | __device_links_no_driver(dev); |
711 | |
712 | device_links_write_unlock(); |
713 | } |
714 | |
715 | /** |
716 | * device_links_busy - Check if there are any busy links to consumers. |
717 | * @dev: Device to check. |
718 | * |
719 | * Check each consumer of the device and return 'true' if its link's status |
720 | * is one of "consumer probe" or "active" (meaning that the given consumer is |
721 | * probing right now or its driver is present). Otherwise, change the link |
722 | * state to "supplier unbind" to prevent the consumer from being probed |
723 | * successfully going forward. |
724 | * |
725 | * Return 'false' if there are no probing or active consumers. |
726 | * |
727 | * Links with the DL_FLAG_STATELESS flag set are ignored. |
728 | */ |
729 | bool device_links_busy(struct device *dev) |
730 | { |
731 | struct device_link *link; |
732 | bool ret = false; |
733 | |
734 | device_links_write_lock(); |
735 | |
736 | list_for_each_entry(link, &dev->links.consumers, s_node) { |
737 | if (link->flags & DL_FLAG_STATELESS) |
738 | continue; |
739 | |
740 | if (link->status == DL_STATE_CONSUMER_PROBE |
741 | || link->status == DL_STATE_ACTIVE) { |
742 | ret = true; |
743 | break; |
744 | } |
745 | WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); |
746 | } |
747 | |
748 | dev->links.status = DL_DEV_UNBINDING; |
749 | |
750 | device_links_write_unlock(); |
751 | return ret; |
752 | } |
753 | |
754 | /** |
755 | * device_links_unbind_consumers - Force unbind consumers of the given device. |
756 | * @dev: Device to unbind the consumers of. |
757 | * |
758 | * Walk the list of links to consumers for @dev and if any of them is in the |
759 | * "consumer probe" state, wait for all device probes in progress to complete |
760 | * and start over. |
761 | * |
762 | * If that's not the case, change the status of the link to "supplier unbind" |
763 | * and check if the link was in the "active" state. If so, force the consumer |
764 | * driver to unbind and start over (the consumer will not re-probe as we have |
765 | * changed the state of the link already). |
766 | * |
767 | * Links with the DL_FLAG_STATELESS flag set are ignored. |
768 | */ |
769 | void device_links_unbind_consumers(struct device *dev) |
770 | { |
771 | struct device_link *link; |
772 | |
773 | start: |
774 | device_links_write_lock(); |
775 | |
776 | list_for_each_entry(link, &dev->links.consumers, s_node) { |
777 | enum device_link_state status; |
778 | |
779 | if (link->flags & DL_FLAG_STATELESS) |
780 | continue; |
781 | |
782 | status = link->status; |
783 | if (status == DL_STATE_CONSUMER_PROBE) { |
784 | device_links_write_unlock(); |
785 | |
786 | wait_for_device_probe(); |
787 | goto start; |
788 | } |
789 | WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); |
790 | if (status == DL_STATE_ACTIVE) { |
791 | struct device *consumer = link->consumer; |
792 | |
793 | get_device(consumer); |
794 | |
795 | device_links_write_unlock(); |
796 | |
797 | device_release_driver_internal(consumer, NULL, |
798 | consumer->parent); |
799 | put_device(consumer); |
800 | goto start; |
801 | } |
802 | } |
803 | |
804 | device_links_write_unlock(); |
805 | } |
806 | |
807 | /** |
808 | * device_links_purge - Delete existing links to other devices. |
809 | * @dev: Target device. |
810 | */ |
811 | static void device_links_purge(struct device *dev) |
812 | { |
813 | struct device_link *link, *ln; |
814 | |
815 | /* |
816 | * Delete all of the remaining links from this device to any other |
817 | * devices (either consumers or suppliers). |
818 | */ |
819 | device_links_write_lock(); |
820 | |
821 | list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { |
822 | WARN_ON(link->status == DL_STATE_ACTIVE); |
823 | __device_link_del(&link->kref); |
824 | } |
825 | |
826 | list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) { |
827 | WARN_ON(link->status != DL_STATE_DORMANT && |
828 | link->status != DL_STATE_NONE); |
829 | __device_link_del(&link->kref); |
830 | } |
831 | |
832 | device_links_write_unlock(); |
833 | } |
834 | |
835 | /* Device links support end. */ |
836 | |
837 | int (*platform_notify)(struct device *dev) = NULL; |
838 | int (*platform_notify_remove)(struct device *dev) = NULL; |
839 | static struct kobject *dev_kobj; |
840 | struct kobject *sysfs_dev_char_kobj; |
841 | struct kobject *sysfs_dev_block_kobj; |
842 | |
843 | static DEFINE_MUTEX(device_hotplug_lock); |
844 | |
845 | void lock_device_hotplug(void) |
846 | { |
847 | mutex_lock(&device_hotplug_lock); |
848 | } |
849 | |
850 | void unlock_device_hotplug(void) |
851 | { |
852 | mutex_unlock(&device_hotplug_lock); |
853 | } |
854 | |
855 | int lock_device_hotplug_sysfs(void) |
856 | { |
857 | if (mutex_trylock(&device_hotplug_lock)) |
858 | return 0; |
859 | |
860 | /* Avoid busy looping (5 ms of sleep should do). */ |
861 | msleep(5); |
862 | return restart_syscall(); |
863 | } |
864 | |
865 | #ifdef CONFIG_BLOCK |
866 | static inline int device_is_not_partition(struct device *dev) |
867 | { |
868 | return !(dev->type == &part_type); |
869 | } |
870 | #else |
871 | static inline int device_is_not_partition(struct device *dev) |
872 | { |
873 | return 1; |
874 | } |
875 | #endif |
876 | |
877 | static int |
878 | device_platform_notify(struct device *dev, enum kobject_action action) |
879 | { |
880 | int ret; |
881 | |
882 | ret = acpi_platform_notify(dev, action); |
883 | if (ret) |
884 | return ret; |
885 | |
886 | ret = software_node_notify(dev, action); |
887 | if (ret) |
888 | return ret; |
889 | |
890 | if (platform_notify && action == KOBJ_ADD) |
891 | platform_notify(dev); |
892 | else if (platform_notify_remove && action == KOBJ_REMOVE) |
893 | platform_notify_remove(dev); |
894 | return 0; |
895 | } |
896 | |
897 | /** |
898 | * dev_driver_string - Return a device's driver name, if at all possible |
899 | * @dev: struct device to get the name of |
900 | * |
901 | * Will return the device's driver's name if it is bound to a device. If |
902 | * the device is not bound to a driver, it will return the name of the bus |
903 | * it is attached to. If it is not attached to a bus either, an empty |
904 | * string will be returned. |
905 | */ |
906 | const char *dev_driver_string(const struct device *dev) |
907 | { |
908 | struct device_driver *drv; |
909 | |
910 | /* dev->driver can change to NULL underneath us because of unbinding, |
911 | * so be careful about accessing it. dev->bus and dev->class should |
912 | * never change once they are set, so they don't need special care. |
913 | */ |
914 | drv = READ_ONCE(dev->driver); |
915 | return drv ? drv->name : |
916 | (dev->bus ? dev->bus->name : |
917 | (dev->class ? dev->class->name : "" )); |
918 | } |
919 | EXPORT_SYMBOL(dev_driver_string); |
920 | |
921 | #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) |
922 | |
923 | static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr, |
924 | char *buf) |
925 | { |
926 | struct device_attribute *dev_attr = to_dev_attr(attr); |
927 | struct device *dev = kobj_to_dev(kobj); |
928 | ssize_t ret = -EIO; |
929 | |
930 | if (dev_attr->show) |
931 | ret = dev_attr->show(dev, dev_attr, buf); |
932 | if (ret >= (ssize_t)PAGE_SIZE) { |
933 | printk("dev_attr_show: %pS returned bad count\n" , |
934 | dev_attr->show); |
935 | } |
936 | return ret; |
937 | } |
938 | |
939 | static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr, |
940 | const char *buf, size_t count) |
941 | { |
942 | struct device_attribute *dev_attr = to_dev_attr(attr); |
943 | struct device *dev = kobj_to_dev(kobj); |
944 | ssize_t ret = -EIO; |
945 | |
946 | if (dev_attr->store) |
947 | ret = dev_attr->store(dev, dev_attr, buf, count); |
948 | return ret; |
949 | } |
950 | |
951 | static const struct sysfs_ops dev_sysfs_ops = { |
952 | .show = dev_attr_show, |
953 | .store = dev_attr_store, |
954 | }; |
955 | |
956 | #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) |
957 | |
958 | ssize_t device_store_ulong(struct device *dev, |
959 | struct device_attribute *attr, |
960 | const char *buf, size_t size) |
961 | { |
962 | struct dev_ext_attribute *ea = to_ext_attr(attr); |
963 | int ret; |
964 | unsigned long new; |
965 | |
966 | ret = kstrtoul(buf, 0, &new); |
967 | if (ret) |
968 | return ret; |
969 | *(unsigned long *)(ea->var) = new; |
970 | /* Always return full write size even if we didn't consume all */ |
971 | return size; |
972 | } |
973 | EXPORT_SYMBOL_GPL(device_store_ulong); |
974 | |
975 | ssize_t device_show_ulong(struct device *dev, |
976 | struct device_attribute *attr, |
977 | char *buf) |
978 | { |
979 | struct dev_ext_attribute *ea = to_ext_attr(attr); |
980 | return snprintf(buf, PAGE_SIZE, "%lx\n" , *(unsigned long *)(ea->var)); |
981 | } |
982 | EXPORT_SYMBOL_GPL(device_show_ulong); |
983 | |
984 | ssize_t device_store_int(struct device *dev, |
985 | struct device_attribute *attr, |
986 | const char *buf, size_t size) |
987 | { |
988 | struct dev_ext_attribute *ea = to_ext_attr(attr); |
989 | int ret; |
990 | long new; |
991 | |
992 | ret = kstrtol(buf, 0, &new); |
993 | if (ret) |
994 | return ret; |
995 | |
996 | if (new > INT_MAX || new < INT_MIN) |
997 | return -EINVAL; |
998 | *(int *)(ea->var) = new; |
999 | /* Always return full write size even if we didn't consume all */ |
1000 | return size; |
1001 | } |
1002 | EXPORT_SYMBOL_GPL(device_store_int); |
1003 | |
1004 | ssize_t device_show_int(struct device *dev, |
1005 | struct device_attribute *attr, |
1006 | char *buf) |
1007 | { |
1008 | struct dev_ext_attribute *ea = to_ext_attr(attr); |
1009 | |
1010 | return snprintf(buf, PAGE_SIZE, "%d\n" , *(int *)(ea->var)); |
1011 | } |
1012 | EXPORT_SYMBOL_GPL(device_show_int); |
1013 | |
1014 | ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, |
1015 | const char *buf, size_t size) |
1016 | { |
1017 | struct dev_ext_attribute *ea = to_ext_attr(attr); |
1018 | |
1019 | if (strtobool(buf, ea->var) < 0) |
1020 | return -EINVAL; |
1021 | |
1022 | return size; |
1023 | } |
1024 | EXPORT_SYMBOL_GPL(device_store_bool); |
1025 | |
1026 | ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, |
1027 | char *buf) |
1028 | { |
1029 | struct dev_ext_attribute *ea = to_ext_attr(attr); |
1030 | |
1031 | return snprintf(buf, PAGE_SIZE, "%d\n" , *(bool *)(ea->var)); |
1032 | } |
1033 | EXPORT_SYMBOL_GPL(device_show_bool); |
1034 | |
1035 | /** |
1036 | * device_release - free device structure. |
1037 | * @kobj: device's kobject. |
1038 | * |
1039 | * This is called once the reference count for the object |
1040 | * reaches 0. We forward the call to the device's release |
1041 | * method, which should handle actually freeing the structure. |
1042 | */ |
1043 | static void device_release(struct kobject *kobj) |
1044 | { |
1045 | struct device *dev = kobj_to_dev(kobj); |
1046 | struct device_private *p = dev->p; |
1047 | |
1048 | /* |
1049 | * Some platform devices are driven without driver attached |
1050 | * and managed resources may have been acquired. Make sure |
1051 | * all resources are released. |
1052 | * |
1053 | * Drivers still can add resources into device after device |
1054 | * is deleted but alive, so release devres here to avoid |
1055 | * possible memory leak. |
1056 | */ |
1057 | devres_release_all(dev); |
1058 | |
1059 | if (dev->release) |
1060 | dev->release(dev); |
1061 | else if (dev->type && dev->type->release) |
1062 | dev->type->release(dev); |
1063 | else if (dev->class && dev->class->dev_release) |
1064 | dev->class->dev_release(dev); |
1065 | else |
1066 | WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n" , |
1067 | dev_name(dev)); |
1068 | kfree(p); |
1069 | } |
1070 | |
1071 | static const void *device_namespace(struct kobject *kobj) |
1072 | { |
1073 | struct device *dev = kobj_to_dev(kobj); |
1074 | const void *ns = NULL; |
1075 | |
1076 | if (dev->class && dev->class->ns_type) |
1077 | ns = dev->class->namespace(dev); |
1078 | |
1079 | return ns; |
1080 | } |
1081 | |
1082 | static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) |
1083 | { |
1084 | struct device *dev = kobj_to_dev(kobj); |
1085 | |
1086 | if (dev->class && dev->class->get_ownership) |
1087 | dev->class->get_ownership(dev, uid, gid); |
1088 | } |
1089 | |
1090 | static struct kobj_type device_ktype = { |
1091 | .release = device_release, |
1092 | .sysfs_ops = &dev_sysfs_ops, |
1093 | .namespace = device_namespace, |
1094 | .get_ownership = device_get_ownership, |
1095 | }; |
1096 | |
1097 | |
1098 | static int dev_uevent_filter(struct kset *kset, struct kobject *kobj) |
1099 | { |
1100 | struct kobj_type *ktype = get_ktype(kobj); |
1101 | |
1102 | if (ktype == &device_ktype) { |
1103 | struct device *dev = kobj_to_dev(kobj); |
1104 | if (dev->bus) |
1105 | return 1; |
1106 | if (dev->class) |
1107 | return 1; |
1108 | } |
1109 | return 0; |
1110 | } |
1111 | |
1112 | static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj) |
1113 | { |
1114 | struct device *dev = kobj_to_dev(kobj); |
1115 | |
1116 | if (dev->bus) |
1117 | return dev->bus->name; |
1118 | if (dev->class) |
1119 | return dev->class->name; |
1120 | return NULL; |
1121 | } |
1122 | |
1123 | static int dev_uevent(struct kset *kset, struct kobject *kobj, |
1124 | struct kobj_uevent_env *env) |
1125 | { |
1126 | struct device *dev = kobj_to_dev(kobj); |
1127 | int retval = 0; |
1128 | |
1129 | /* add device node properties if present */ |
1130 | if (MAJOR(dev->devt)) { |
1131 | const char *tmp; |
1132 | const char *name; |
1133 | umode_t mode = 0; |
1134 | kuid_t uid = GLOBAL_ROOT_UID; |
1135 | kgid_t gid = GLOBAL_ROOT_GID; |
1136 | |
1137 | add_uevent_var(env, "MAJOR=%u" , MAJOR(dev->devt)); |
1138 | add_uevent_var(env, "MINOR=%u" , MINOR(dev->devt)); |
1139 | name = device_get_devnode(dev, &mode, &uid, &gid, &tmp); |
1140 | if (name) { |
1141 | add_uevent_var(env, "DEVNAME=%s" , name); |
1142 | if (mode) |
1143 | add_uevent_var(env, "DEVMODE=%#o" , mode & 0777); |
1144 | if (!uid_eq(uid, GLOBAL_ROOT_UID)) |
1145 | add_uevent_var(env, "DEVUID=%u" , from_kuid(&init_user_ns, uid)); |
1146 | if (!gid_eq(gid, GLOBAL_ROOT_GID)) |
1147 | add_uevent_var(env, "DEVGID=%u" , from_kgid(&init_user_ns, gid)); |
1148 | kfree(tmp); |
1149 | } |
1150 | } |
1151 | |
1152 | if (dev->type && dev->type->name) |
1153 | add_uevent_var(env, "DEVTYPE=%s" , dev->type->name); |
1154 | |
1155 | if (dev->driver) |
1156 | add_uevent_var(env, "DRIVER=%s" , dev->driver->name); |
1157 | |
1158 | /* Add common DT information about the device */ |
1159 | of_device_uevent(dev, env); |
1160 | |
1161 | /* have the bus specific function add its stuff */ |
1162 | if (dev->bus && dev->bus->uevent) { |
1163 | retval = dev->bus->uevent(dev, env); |
1164 | if (retval) |
1165 | pr_debug("device: '%s': %s: bus uevent() returned %d\n" , |
1166 | dev_name(dev), __func__, retval); |
1167 | } |
1168 | |
1169 | /* have the class specific function add its stuff */ |
1170 | if (dev->class && dev->class->dev_uevent) { |
1171 | retval = dev->class->dev_uevent(dev, env); |
1172 | if (retval) |
1173 | pr_debug("device: '%s': %s: class uevent() " |
1174 | "returned %d\n" , dev_name(dev), |
1175 | __func__, retval); |
1176 | } |
1177 | |
1178 | /* have the device type specific function add its stuff */ |
1179 | if (dev->type && dev->type->uevent) { |
1180 | retval = dev->type->uevent(dev, env); |
1181 | if (retval) |
1182 | pr_debug("device: '%s': %s: dev_type uevent() " |
1183 | "returned %d\n" , dev_name(dev), |
1184 | __func__, retval); |
1185 | } |
1186 | |
1187 | return retval; |
1188 | } |
1189 | |
1190 | static const struct kset_uevent_ops device_uevent_ops = { |
1191 | .filter = dev_uevent_filter, |
1192 | .name = dev_uevent_name, |
1193 | .uevent = dev_uevent, |
1194 | }; |
1195 | |
1196 | static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, |
1197 | char *buf) |
1198 | { |
1199 | struct kobject *top_kobj; |
1200 | struct kset *kset; |
1201 | struct kobj_uevent_env *env = NULL; |
1202 | int i; |
1203 | size_t count = 0; |
1204 | int retval; |
1205 | |
1206 | /* search the kset, the device belongs to */ |
1207 | top_kobj = &dev->kobj; |
1208 | while (!top_kobj->kset && top_kobj->parent) |
1209 | top_kobj = top_kobj->parent; |
1210 | if (!top_kobj->kset) |
1211 | goto out; |
1212 | |
1213 | kset = top_kobj->kset; |
1214 | if (!kset->uevent_ops || !kset->uevent_ops->uevent) |
1215 | goto out; |
1216 | |
1217 | /* respect filter */ |
1218 | if (kset->uevent_ops && kset->uevent_ops->filter) |
1219 | if (!kset->uevent_ops->filter(kset, &dev->kobj)) |
1220 | goto out; |
1221 | |
1222 | env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); |
1223 | if (!env) |
1224 | return -ENOMEM; |
1225 | |
1226 | /* let the kset specific function add its keys */ |
1227 | retval = kset->uevent_ops->uevent(kset, &dev->kobj, env); |
1228 | if (retval) |
1229 | goto out; |
1230 | |
1231 | /* copy keys to file */ |
1232 | for (i = 0; i < env->envp_idx; i++) |
1233 | count += sprintf(&buf[count], "%s\n" , env->envp[i]); |
1234 | out: |
1235 | kfree(env); |
1236 | return count; |
1237 | } |
1238 | |
1239 | static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, |
1240 | const char *buf, size_t count) |
1241 | { |
1242 | int rc; |
1243 | |
1244 | rc = kobject_synth_uevent(&dev->kobj, buf, count); |
1245 | |
1246 | if (rc) { |
1247 | dev_err(dev, "uevent: failed to send synthetic uevent\n" ); |
1248 | return rc; |
1249 | } |
1250 | |
1251 | return count; |
1252 | } |
1253 | static DEVICE_ATTR_RW(uevent); |
1254 | |
1255 | static ssize_t online_show(struct device *dev, struct device_attribute *attr, |
1256 | char *buf) |
1257 | { |
1258 | bool val; |
1259 | |
1260 | device_lock(dev); |
1261 | val = !dev->offline; |
1262 | device_unlock(dev); |
1263 | return sprintf(buf, "%u\n" , val); |
1264 | } |
1265 | |
1266 | static ssize_t online_store(struct device *dev, struct device_attribute *attr, |
1267 | const char *buf, size_t count) |
1268 | { |
1269 | bool val; |
1270 | int ret; |
1271 | |
1272 | ret = strtobool(buf, &val); |
1273 | if (ret < 0) |
1274 | return ret; |
1275 | |
1276 | ret = lock_device_hotplug_sysfs(); |
1277 | if (ret) |
1278 | return ret; |
1279 | |
1280 | ret = val ? device_online(dev) : device_offline(dev); |
1281 | unlock_device_hotplug(); |
1282 | return ret < 0 ? ret : count; |
1283 | } |
1284 | static DEVICE_ATTR_RW(online); |
1285 | |
1286 | int device_add_groups(struct device *dev, const struct attribute_group **groups) |
1287 | { |
1288 | return sysfs_create_groups(&dev->kobj, groups); |
1289 | } |
1290 | EXPORT_SYMBOL_GPL(device_add_groups); |
1291 | |
1292 | void device_remove_groups(struct device *dev, |
1293 | const struct attribute_group **groups) |
1294 | { |
1295 | sysfs_remove_groups(&dev->kobj, groups); |
1296 | } |
1297 | EXPORT_SYMBOL_GPL(device_remove_groups); |
1298 | |
1299 | union device_attr_group_devres { |
1300 | const struct attribute_group *group; |
1301 | const struct attribute_group **groups; |
1302 | }; |
1303 | |
1304 | static int devm_attr_group_match(struct device *dev, void *res, void *data) |
1305 | { |
1306 | return ((union device_attr_group_devres *)res)->group == data; |
1307 | } |
1308 | |
1309 | static void devm_attr_group_remove(struct device *dev, void *res) |
1310 | { |
1311 | union device_attr_group_devres *devres = res; |
1312 | const struct attribute_group *group = devres->group; |
1313 | |
1314 | dev_dbg(dev, "%s: removing group %p\n" , __func__, group); |
1315 | sysfs_remove_group(&dev->kobj, group); |
1316 | } |
1317 | |
1318 | static void devm_attr_groups_remove(struct device *dev, void *res) |
1319 | { |
1320 | union device_attr_group_devres *devres = res; |
1321 | const struct attribute_group **groups = devres->groups; |
1322 | |
1323 | dev_dbg(dev, "%s: removing groups %p\n" , __func__, groups); |
1324 | sysfs_remove_groups(&dev->kobj, groups); |
1325 | } |
1326 | |
1327 | /** |
1328 | * devm_device_add_group - given a device, create a managed attribute group |
1329 | * @dev: The device to create the group for |
1330 | * @grp: The attribute group to create |
1331 | * |
1332 | * This function creates a group for the first time. It will explicitly |
1333 | * warn and error if any of the attribute files being created already exist. |
1334 | * |
1335 | * Returns 0 on success or error code on failure. |
1336 | */ |
1337 | int devm_device_add_group(struct device *dev, const struct attribute_group *grp) |
1338 | { |
1339 | union device_attr_group_devres *devres; |
1340 | int error; |
1341 | |
1342 | devres = devres_alloc(devm_attr_group_remove, |
1343 | sizeof(*devres), GFP_KERNEL); |
1344 | if (!devres) |
1345 | return -ENOMEM; |
1346 | |
1347 | error = sysfs_create_group(&dev->kobj, grp); |
1348 | if (error) { |
1349 | devres_free(devres); |
1350 | return error; |
1351 | } |
1352 | |
1353 | devres->group = grp; |
1354 | devres_add(dev, devres); |
1355 | return 0; |
1356 | } |
1357 | EXPORT_SYMBOL_GPL(devm_device_add_group); |
1358 | |
1359 | /** |
1360 | * devm_device_remove_group: remove a managed group from a device |
1361 | * @dev: device to remove the group from |
1362 | * @grp: group to remove |
1363 | * |
1364 | * This function removes a group of attributes from a device. The attributes |
1365 | * previously have to have been created for this group, otherwise it will fail. |
1366 | */ |
1367 | void devm_device_remove_group(struct device *dev, |
1368 | const struct attribute_group *grp) |
1369 | { |
1370 | WARN_ON(devres_release(dev, devm_attr_group_remove, |
1371 | devm_attr_group_match, |
1372 | /* cast away const */ (void *)grp)); |
1373 | } |
1374 | EXPORT_SYMBOL_GPL(devm_device_remove_group); |
1375 | |
1376 | /** |
1377 | * devm_device_add_groups - create a bunch of managed attribute groups |
1378 | * @dev: The device to create the group for |
1379 | * @groups: The attribute groups to create, NULL terminated |
1380 | * |
1381 | * This function creates a bunch of managed attribute groups. If an error |
1382 | * occurs when creating a group, all previously created groups will be |
1383 | * removed, unwinding everything back to the original state when this |
1384 | * function was called. It will explicitly warn and error if any of the |
1385 | * attribute files being created already exist. |
1386 | * |
1387 | * Returns 0 on success or error code from sysfs_create_group on failure. |
1388 | */ |
1389 | int devm_device_add_groups(struct device *dev, |
1390 | const struct attribute_group **groups) |
1391 | { |
1392 | union device_attr_group_devres *devres; |
1393 | int error; |
1394 | |
1395 | devres = devres_alloc(devm_attr_groups_remove, |
1396 | sizeof(*devres), GFP_KERNEL); |
1397 | if (!devres) |
1398 | return -ENOMEM; |
1399 | |
1400 | error = sysfs_create_groups(&dev->kobj, groups); |
1401 | if (error) { |
1402 | devres_free(devres); |
1403 | return error; |
1404 | } |
1405 | |
1406 | devres->groups = groups; |
1407 | devres_add(dev, devres); |
1408 | return 0; |
1409 | } |
1410 | EXPORT_SYMBOL_GPL(devm_device_add_groups); |
1411 | |
1412 | /** |
1413 | * devm_device_remove_groups - remove a list of managed groups |
1414 | * |
1415 | * @dev: The device for the groups to be removed from |
1416 | * @groups: NULL terminated list of groups to be removed |
1417 | * |
1418 | * If groups is not NULL, remove the specified groups from the device. |
1419 | */ |
1420 | void devm_device_remove_groups(struct device *dev, |
1421 | const struct attribute_group **groups) |
1422 | { |
1423 | WARN_ON(devres_release(dev, devm_attr_groups_remove, |
1424 | devm_attr_group_match, |
1425 | /* cast away const */ (void *)groups)); |
1426 | } |
1427 | EXPORT_SYMBOL_GPL(devm_device_remove_groups); |
1428 | |
1429 | static int device_add_attrs(struct device *dev) |
1430 | { |
1431 | struct class *class = dev->class; |
1432 | const struct device_type *type = dev->type; |
1433 | int error; |
1434 | |
1435 | if (class) { |
1436 | error = device_add_groups(dev, class->dev_groups); |
1437 | if (error) |
1438 | return error; |
1439 | } |
1440 | |
1441 | if (type) { |
1442 | error = device_add_groups(dev, type->groups); |
1443 | if (error) |
1444 | goto err_remove_class_groups; |
1445 | } |
1446 | |
1447 | error = device_add_groups(dev, dev->groups); |
1448 | if (error) |
1449 | goto err_remove_type_groups; |
1450 | |
1451 | if (device_supports_offline(dev) && !dev->offline_disabled) { |
1452 | error = device_create_file(dev, &dev_attr_online); |
1453 | if (error) |
1454 | goto err_remove_dev_groups; |
1455 | } |
1456 | |
1457 | return 0; |
1458 | |
1459 | err_remove_dev_groups: |
1460 | device_remove_groups(dev, dev->groups); |
1461 | err_remove_type_groups: |
1462 | if (type) |
1463 | device_remove_groups(dev, type->groups); |
1464 | err_remove_class_groups: |
1465 | if (class) |
1466 | device_remove_groups(dev, class->dev_groups); |
1467 | |
1468 | return error; |
1469 | } |
1470 | |
1471 | static void device_remove_attrs(struct device *dev) |
1472 | { |
1473 | struct class *class = dev->class; |
1474 | const struct device_type *type = dev->type; |
1475 | |
1476 | device_remove_file(dev, &dev_attr_online); |
1477 | device_remove_groups(dev, dev->groups); |
1478 | |
1479 | if (type) |
1480 | device_remove_groups(dev, type->groups); |
1481 | |
1482 | if (class) |
1483 | device_remove_groups(dev, class->dev_groups); |
1484 | } |
1485 | |
1486 | static ssize_t dev_show(struct device *dev, struct device_attribute *attr, |
1487 | char *buf) |
1488 | { |
1489 | return print_dev_t(buf, dev->devt); |
1490 | } |
1491 | static DEVICE_ATTR_RO(dev); |
1492 | |
1493 | /* /sys/devices/ */ |
1494 | struct kset *devices_kset; |
1495 | |
1496 | /** |
1497 | * devices_kset_move_before - Move device in the devices_kset's list. |
1498 | * @deva: Device to move. |
1499 | * @devb: Device @deva should come before. |
1500 | */ |
1501 | static void devices_kset_move_before(struct device *deva, struct device *devb) |
1502 | { |
1503 | if (!devices_kset) |
1504 | return; |
1505 | pr_debug("devices_kset: Moving %s before %s\n" , |
1506 | dev_name(deva), dev_name(devb)); |
1507 | spin_lock(&devices_kset->list_lock); |
1508 | list_move_tail(&deva->kobj.entry, &devb->kobj.entry); |
1509 | spin_unlock(&devices_kset->list_lock); |
1510 | } |
1511 | |
1512 | /** |
1513 | * devices_kset_move_after - Move device in the devices_kset's list. |
1514 | * @deva: Device to move |
1515 | * @devb: Device @deva should come after. |
1516 | */ |
1517 | static void devices_kset_move_after(struct device *deva, struct device *devb) |
1518 | { |
1519 | if (!devices_kset) |
1520 | return; |
1521 | pr_debug("devices_kset: Moving %s after %s\n" , |
1522 | dev_name(deva), dev_name(devb)); |
1523 | spin_lock(&devices_kset->list_lock); |
1524 | list_move(&deva->kobj.entry, &devb->kobj.entry); |
1525 | spin_unlock(&devices_kset->list_lock); |
1526 | } |
1527 | |
1528 | /** |
1529 | * devices_kset_move_last - move the device to the end of devices_kset's list. |
1530 | * @dev: device to move |
1531 | */ |
1532 | void devices_kset_move_last(struct device *dev) |
1533 | { |
1534 | if (!devices_kset) |
1535 | return; |
1536 | pr_debug("devices_kset: Moving %s to end of list\n" , dev_name(dev)); |
1537 | spin_lock(&devices_kset->list_lock); |
1538 | list_move_tail(&dev->kobj.entry, &devices_kset->list); |
1539 | spin_unlock(&devices_kset->list_lock); |
1540 | } |
1541 | |
1542 | /** |
1543 | * device_create_file - create sysfs attribute file for device. |
1544 | * @dev: device. |
1545 | * @attr: device attribute descriptor. |
1546 | */ |
1547 | int device_create_file(struct device *dev, |
1548 | const struct device_attribute *attr) |
1549 | { |
1550 | int error = 0; |
1551 | |
1552 | if (dev) { |
1553 | WARN(((attr->attr.mode & S_IWUGO) && !attr->store), |
1554 | "Attribute %s: write permission without 'store'\n" , |
1555 | attr->attr.name); |
1556 | WARN(((attr->attr.mode & S_IRUGO) && !attr->show), |
1557 | "Attribute %s: read permission without 'show'\n" , |
1558 | attr->attr.name); |
1559 | error = sysfs_create_file(&dev->kobj, &attr->attr); |
1560 | } |
1561 | |
1562 | return error; |
1563 | } |
1564 | EXPORT_SYMBOL_GPL(device_create_file); |
1565 | |
1566 | /** |
1567 | * device_remove_file - remove sysfs attribute file. |
1568 | * @dev: device. |
1569 | * @attr: device attribute descriptor. |
1570 | */ |
1571 | void device_remove_file(struct device *dev, |
1572 | const struct device_attribute *attr) |
1573 | { |
1574 | if (dev) |
1575 | sysfs_remove_file(&dev->kobj, &attr->attr); |
1576 | } |
1577 | EXPORT_SYMBOL_GPL(device_remove_file); |
1578 | |
1579 | /** |
1580 | * device_remove_file_self - remove sysfs attribute file from its own method. |
1581 | * @dev: device. |
1582 | * @attr: device attribute descriptor. |
1583 | * |
1584 | * See kernfs_remove_self() for details. |
1585 | */ |
1586 | bool device_remove_file_self(struct device *dev, |
1587 | const struct device_attribute *attr) |
1588 | { |
1589 | if (dev) |
1590 | return sysfs_remove_file_self(&dev->kobj, &attr->attr); |
1591 | else |
1592 | return false; |
1593 | } |
1594 | EXPORT_SYMBOL_GPL(device_remove_file_self); |
1595 | |
1596 | /** |
1597 | * device_create_bin_file - create sysfs binary attribute file for device. |
1598 | * @dev: device. |
1599 | * @attr: device binary attribute descriptor. |
1600 | */ |
1601 | int device_create_bin_file(struct device *dev, |
1602 | const struct bin_attribute *attr) |
1603 | { |
1604 | int error = -EINVAL; |
1605 | if (dev) |
1606 | error = sysfs_create_bin_file(&dev->kobj, attr); |
1607 | return error; |
1608 | } |
1609 | EXPORT_SYMBOL_GPL(device_create_bin_file); |
1610 | |
1611 | /** |
1612 | * device_remove_bin_file - remove sysfs binary attribute file |
1613 | * @dev: device. |
1614 | * @attr: device binary attribute descriptor. |
1615 | */ |
1616 | void device_remove_bin_file(struct device *dev, |
1617 | const struct bin_attribute *attr) |
1618 | { |
1619 | if (dev) |
1620 | sysfs_remove_bin_file(&dev->kobj, attr); |
1621 | } |
1622 | EXPORT_SYMBOL_GPL(device_remove_bin_file); |
1623 | |
1624 | static void klist_children_get(struct klist_node *n) |
1625 | { |
1626 | struct device_private *p = to_device_private_parent(n); |
1627 | struct device *dev = p->device; |
1628 | |
1629 | get_device(dev); |
1630 | } |
1631 | |
1632 | static void klist_children_put(struct klist_node *n) |
1633 | { |
1634 | struct device_private *p = to_device_private_parent(n); |
1635 | struct device *dev = p->device; |
1636 | |
1637 | put_device(dev); |
1638 | } |
1639 | |
1640 | /** |
1641 | * device_initialize - init device structure. |
1642 | * @dev: device. |
1643 | * |
1644 | * This prepares the device for use by other layers by initializing |
1645 | * its fields. |
1646 | * It is the first half of device_register(), if called by |
1647 | * that function, though it can also be called separately, so one |
1648 | * may use @dev's fields. In particular, get_device()/put_device() |
1649 | * may be used for reference counting of @dev after calling this |
1650 | * function. |
1651 | * |
1652 | * All fields in @dev must be initialized by the caller to 0, except |
1653 | * for those explicitly set to some other value. The simplest |
1654 | * approach is to use kzalloc() to allocate the structure containing |
1655 | * @dev. |
1656 | * |
1657 | * NOTE: Use put_device() to give up your reference instead of freeing |
1658 | * @dev directly once you have called this function. |
1659 | */ |
1660 | void device_initialize(struct device *dev) |
1661 | { |
1662 | dev->kobj.kset = devices_kset; |
1663 | kobject_init(&dev->kobj, &device_ktype); |
1664 | INIT_LIST_HEAD(&dev->dma_pools); |
1665 | mutex_init(&dev->mutex); |
1666 | lockdep_set_novalidate_class(&dev->mutex); |
1667 | spin_lock_init(&dev->devres_lock); |
1668 | INIT_LIST_HEAD(&dev->devres_head); |
1669 | device_pm_init(dev); |
1670 | set_dev_node(dev, -1); |
1671 | #ifdef CONFIG_GENERIC_MSI_IRQ |
1672 | INIT_LIST_HEAD(&dev->msi_list); |
1673 | #endif |
1674 | INIT_LIST_HEAD(&dev->links.consumers); |
1675 | INIT_LIST_HEAD(&dev->links.suppliers); |
1676 | dev->links.status = DL_DEV_NO_DRIVER; |
1677 | } |
1678 | EXPORT_SYMBOL_GPL(device_initialize); |
1679 | |
1680 | struct kobject *virtual_device_parent(struct device *dev) |
1681 | { |
1682 | static struct kobject *virtual_dir = NULL; |
1683 | |
1684 | if (!virtual_dir) |
1685 | virtual_dir = kobject_create_and_add("virtual" , |
1686 | &devices_kset->kobj); |
1687 | |
1688 | return virtual_dir; |
1689 | } |
1690 | |
1691 | struct class_dir { |
1692 | struct kobject kobj; |
1693 | struct class *class; |
1694 | }; |
1695 | |
1696 | #define to_class_dir(obj) container_of(obj, struct class_dir, kobj) |
1697 | |
1698 | static void class_dir_release(struct kobject *kobj) |
1699 | { |
1700 | struct class_dir *dir = to_class_dir(kobj); |
1701 | kfree(dir); |
1702 | } |
1703 | |
1704 | static const |
1705 | struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj) |
1706 | { |
1707 | struct class_dir *dir = to_class_dir(kobj); |
1708 | return dir->class->ns_type; |
1709 | } |
1710 | |
1711 | static struct kobj_type class_dir_ktype = { |
1712 | .release = class_dir_release, |
1713 | .sysfs_ops = &kobj_sysfs_ops, |
1714 | .child_ns_type = class_dir_child_ns_type |
1715 | }; |
1716 | |
1717 | static struct kobject * |
1718 | class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) |
1719 | { |
1720 | struct class_dir *dir; |
1721 | int retval; |
1722 | |
1723 | dir = kzalloc(sizeof(*dir), GFP_KERNEL); |
1724 | if (!dir) |
1725 | return ERR_PTR(-ENOMEM); |
1726 | |
1727 | dir->class = class; |
1728 | kobject_init(&dir->kobj, &class_dir_ktype); |
1729 | |
1730 | dir->kobj.kset = &class->p->glue_dirs; |
1731 | |
1732 | retval = kobject_add(&dir->kobj, parent_kobj, "%s" , class->name); |
1733 | if (retval < 0) { |
1734 | kobject_put(&dir->kobj); |
1735 | return ERR_PTR(retval); |
1736 | } |
1737 | return &dir->kobj; |
1738 | } |
1739 | |
1740 | static DEFINE_MUTEX(gdp_mutex); |
1741 | |
1742 | static struct kobject *get_device_parent(struct device *dev, |
1743 | struct device *parent) |
1744 | { |
1745 | if (dev->class) { |
1746 | struct kobject *kobj = NULL; |
1747 | struct kobject *parent_kobj; |
1748 | struct kobject *k; |
1749 | |
1750 | #ifdef CONFIG_BLOCK |
1751 | /* block disks show up in /sys/block */ |
1752 | if (sysfs_deprecated && dev->class == &block_class) { |
1753 | if (parent && parent->class == &block_class) |
1754 | return &parent->kobj; |
1755 | return &block_class.p->subsys.kobj; |
1756 | } |
1757 | #endif |
1758 | |
1759 | /* |
1760 | * If we have no parent, we live in "virtual". |
1761 | * Class-devices with a non class-device as parent, live |
1762 | * in a "glue" directory to prevent namespace collisions. |
1763 | */ |
1764 | if (parent == NULL) |
1765 | parent_kobj = virtual_device_parent(dev); |
1766 | else if (parent->class && !dev->class->ns_type) |
1767 | return &parent->kobj; |
1768 | else |
1769 | parent_kobj = &parent->kobj; |
1770 | |
1771 | mutex_lock(&gdp_mutex); |
1772 | |
1773 | /* find our class-directory at the parent and reference it */ |
1774 | spin_lock(&dev->class->p->glue_dirs.list_lock); |
1775 | list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry) |
1776 | if (k->parent == parent_kobj) { |
1777 | kobj = kobject_get(k); |
1778 | break; |
1779 | } |
1780 | spin_unlock(&dev->class->p->glue_dirs.list_lock); |
1781 | if (kobj) { |
1782 | mutex_unlock(&gdp_mutex); |
1783 | return kobj; |
1784 | } |
1785 | |
1786 | /* or create a new class-directory at the parent device */ |
1787 | k = class_dir_create_and_add(dev->class, parent_kobj); |
1788 | /* do not emit an uevent for this simple "glue" directory */ |
1789 | mutex_unlock(&gdp_mutex); |
1790 | return k; |
1791 | } |
1792 | |
1793 | /* subsystems can specify a default root directory for their devices */ |
1794 | if (!parent && dev->bus && dev->bus->dev_root) |
1795 | return &dev->bus->dev_root->kobj; |
1796 | |
1797 | if (parent) |
1798 | return &parent->kobj; |
1799 | return NULL; |
1800 | } |
1801 | |
1802 | static inline bool live_in_glue_dir(struct kobject *kobj, |
1803 | struct device *dev) |
1804 | { |
1805 | if (!kobj || !dev->class || |
1806 | kobj->kset != &dev->class->p->glue_dirs) |
1807 | return false; |
1808 | return true; |
1809 | } |
1810 | |
1811 | static inline struct kobject *get_glue_dir(struct device *dev) |
1812 | { |
1813 | return dev->kobj.parent; |
1814 | } |
1815 | |
1816 | /* |
1817 | * make sure cleaning up dir as the last step, we need to make |
1818 | * sure .release handler of kobject is run with holding the |
1819 | * global lock |
1820 | */ |
1821 | static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) |
1822 | { |
1823 | /* see if we live in a "glue" directory */ |
1824 | if (!live_in_glue_dir(glue_dir, dev)) |
1825 | return; |
1826 | |
1827 | mutex_lock(&gdp_mutex); |
1828 | if (!kobject_has_children(glue_dir)) |
1829 | kobject_del(glue_dir); |
1830 | kobject_put(glue_dir); |
1831 | mutex_unlock(&gdp_mutex); |
1832 | } |
1833 | |
1834 | static int device_add_class_symlinks(struct device *dev) |
1835 | { |
1836 | struct device_node *of_node = dev_of_node(dev); |
1837 | int error; |
1838 | |
1839 | if (of_node) { |
1840 | error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node" ); |
1841 | if (error) |
1842 | dev_warn(dev, "Error %d creating of_node link\n" ,error); |
1843 | /* An error here doesn't warrant bringing down the device */ |
1844 | } |
1845 | |
1846 | if (!dev->class) |
1847 | return 0; |
1848 | |
1849 | error = sysfs_create_link(&dev->kobj, |
1850 | &dev->class->p->subsys.kobj, |
1851 | "subsystem" ); |
1852 | if (error) |
1853 | goto out_devnode; |
1854 | |
1855 | if (dev->parent && device_is_not_partition(dev)) { |
1856 | error = sysfs_create_link(&dev->kobj, &dev->parent->kobj, |
1857 | "device" ); |
1858 | if (error) |
1859 | goto out_subsys; |
1860 | } |
1861 | |
1862 | #ifdef CONFIG_BLOCK |
1863 | /* /sys/block has directories and does not need symlinks */ |
1864 | if (sysfs_deprecated && dev->class == &block_class) |
1865 | return 0; |
1866 | #endif |
1867 | |
1868 | /* link in the class directory pointing to the device */ |
1869 | error = sysfs_create_link(&dev->class->p->subsys.kobj, |
1870 | &dev->kobj, dev_name(dev)); |
1871 | if (error) |
1872 | goto out_device; |
1873 | |
1874 | return 0; |
1875 | |
1876 | out_device: |
1877 | sysfs_remove_link(&dev->kobj, "device" ); |
1878 | |
1879 | out_subsys: |
1880 | sysfs_remove_link(&dev->kobj, "subsystem" ); |
1881 | out_devnode: |
1882 | sysfs_remove_link(&dev->kobj, "of_node" ); |
1883 | return error; |
1884 | } |
1885 | |
1886 | static void device_remove_class_symlinks(struct device *dev) |
1887 | { |
1888 | if (dev_of_node(dev)) |
1889 | sysfs_remove_link(&dev->kobj, "of_node" ); |
1890 | |
1891 | if (!dev->class) |
1892 | return; |
1893 | |
1894 | if (dev->parent && device_is_not_partition(dev)) |
1895 | sysfs_remove_link(&dev->kobj, "device" ); |
1896 | sysfs_remove_link(&dev->kobj, "subsystem" ); |
1897 | #ifdef CONFIG_BLOCK |
1898 | if (sysfs_deprecated && dev->class == &block_class) |
1899 | return; |
1900 | #endif |
1901 | sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev)); |
1902 | } |
1903 | |
1904 | /** |
1905 | * dev_set_name - set a device name |
1906 | * @dev: device |
1907 | * @fmt: format string for the device's name |
1908 | */ |
1909 | int dev_set_name(struct device *dev, const char *fmt, ...) |
1910 | { |
1911 | va_list vargs; |
1912 | int err; |
1913 | |
1914 | va_start(vargs, fmt); |
1915 | err = kobject_set_name_vargs(&dev->kobj, fmt, vargs); |
1916 | va_end(vargs); |
1917 | return err; |
1918 | } |
1919 | EXPORT_SYMBOL_GPL(dev_set_name); |
1920 | |
1921 | /** |
1922 | * device_to_dev_kobj - select a /sys/dev/ directory for the device |
1923 | * @dev: device |
1924 | * |
1925 | * By default we select char/ for new entries. Setting class->dev_obj |
1926 | * to NULL prevents an entry from being created. class->dev_kobj must |
1927 | * be set (or cleared) before any devices are registered to the class |
1928 | * otherwise device_create_sys_dev_entry() and |
1929 | * device_remove_sys_dev_entry() will disagree about the presence of |
1930 | * the link. |
1931 | */ |
1932 | static struct kobject *device_to_dev_kobj(struct device *dev) |
1933 | { |
1934 | struct kobject *kobj; |
1935 | |
1936 | if (dev->class) |
1937 | kobj = dev->class->dev_kobj; |
1938 | else |
1939 | kobj = sysfs_dev_char_kobj; |
1940 | |
1941 | return kobj; |
1942 | } |
1943 | |
1944 | static int device_create_sys_dev_entry(struct device *dev) |
1945 | { |
1946 | struct kobject *kobj = device_to_dev_kobj(dev); |
1947 | int error = 0; |
1948 | char devt_str[15]; |
1949 | |
1950 | if (kobj) { |
1951 | format_dev_t(devt_str, dev->devt); |
1952 | error = sysfs_create_link(kobj, &dev->kobj, devt_str); |
1953 | } |
1954 | |
1955 | return error; |
1956 | } |
1957 | |
1958 | static void device_remove_sys_dev_entry(struct device *dev) |
1959 | { |
1960 | struct kobject *kobj = device_to_dev_kobj(dev); |
1961 | char devt_str[15]; |
1962 | |
1963 | if (kobj) { |
1964 | format_dev_t(devt_str, dev->devt); |
1965 | sysfs_remove_link(kobj, devt_str); |
1966 | } |
1967 | } |
1968 | |
1969 | static int device_private_init(struct device *dev) |
1970 | { |
1971 | dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL); |
1972 | if (!dev->p) |
1973 | return -ENOMEM; |
1974 | dev->p->device = dev; |
1975 | klist_init(&dev->p->klist_children, klist_children_get, |
1976 | klist_children_put); |
1977 | INIT_LIST_HEAD(&dev->p->deferred_probe); |
1978 | return 0; |
1979 | } |
1980 | |
1981 | /** |
1982 | * device_add - add device to device hierarchy. |
1983 | * @dev: device. |
1984 | * |
1985 | * This is part 2 of device_register(), though may be called |
1986 | * separately _iff_ device_initialize() has been called separately. |
1987 | * |
1988 | * This adds @dev to the kobject hierarchy via kobject_add(), adds it |
1989 | * to the global and sibling lists for the device, then |
1990 | * adds it to the other relevant subsystems of the driver model. |
1991 | * |
1992 | * Do not call this routine or device_register() more than once for |
1993 | * any device structure. The driver model core is not designed to work |
1994 | * with devices that get unregistered and then spring back to life. |
1995 | * (Among other things, it's very hard to guarantee that all references |
1996 | * to the previous incarnation of @dev have been dropped.) Allocate |
1997 | * and register a fresh new struct device instead. |
1998 | * |
1999 | * NOTE: _Never_ directly free @dev after calling this function, even |
2000 | * if it returned an error! Always use put_device() to give up your |
2001 | * reference instead. |
2002 | */ |
2003 | int device_add(struct device *dev) |
2004 | { |
2005 | struct device *parent; |
2006 | struct kobject *kobj; |
2007 | struct class_interface *class_intf; |
2008 | int error = -EINVAL; |
2009 | struct kobject *glue_dir = NULL; |
2010 | |
2011 | dev = get_device(dev); |
2012 | if (!dev) |
2013 | goto done; |
2014 | |
2015 | if (!dev->p) { |
2016 | error = device_private_init(dev); |
2017 | if (error) |
2018 | goto done; |
2019 | } |
2020 | |
2021 | /* |
2022 | * for statically allocated devices, which should all be converted |
2023 | * some day, we need to initialize the name. We prevent reading back |
2024 | * the name, and force the use of dev_name() |
2025 | */ |
2026 | if (dev->init_name) { |
2027 | dev_set_name(dev, "%s" , dev->init_name); |
2028 | dev->init_name = NULL; |
2029 | } |
2030 | |
2031 | /* subsystems can specify simple device enumeration */ |
2032 | if (!dev_name(dev) && dev->bus && dev->bus->dev_name) |
2033 | dev_set_name(dev, "%s%u" , dev->bus->dev_name, dev->id); |
2034 | |
2035 | if (!dev_name(dev)) { |
2036 | error = -EINVAL; |
2037 | goto name_error; |
2038 | } |
2039 | |
2040 | pr_debug("device: '%s': %s\n" , dev_name(dev), __func__); |
2041 | |
2042 | parent = get_device(dev->parent); |
2043 | kobj = get_device_parent(dev, parent); |
2044 | if (IS_ERR(kobj)) { |
2045 | error = PTR_ERR(kobj); |
2046 | goto parent_error; |
2047 | } |
2048 | if (kobj) |
2049 | dev->kobj.parent = kobj; |
2050 | |
2051 | /* use parent numa_node */ |
2052 | if (parent && (dev_to_node(dev) == NUMA_NO_NODE)) |
2053 | set_dev_node(dev, dev_to_node(parent)); |
2054 | |
2055 | /* first, register with generic layer. */ |
2056 | /* we require the name to be set before, and pass NULL */ |
2057 | error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); |
2058 | if (error) { |
2059 | glue_dir = get_glue_dir(dev); |
2060 | goto Error; |
2061 | } |
2062 | |
2063 | /* notify platform of device entry */ |
2064 | error = device_platform_notify(dev, KOBJ_ADD); |
2065 | if (error) |
2066 | goto platform_error; |
2067 | |
2068 | error = device_create_file(dev, &dev_attr_uevent); |
2069 | if (error) |
2070 | goto attrError; |
2071 | |
2072 | error = device_add_class_symlinks(dev); |
2073 | if (error) |
2074 | goto SymlinkError; |
2075 | error = device_add_attrs(dev); |
2076 | if (error) |
2077 | goto AttrsError; |
2078 | error = bus_add_device(dev); |
2079 | if (error) |
2080 | goto BusError; |
2081 | error = dpm_sysfs_add(dev); |
2082 | if (error) |
2083 | goto DPMError; |
2084 | device_pm_add(dev); |
2085 | |
2086 | if (MAJOR(dev->devt)) { |
2087 | error = device_create_file(dev, &dev_attr_dev); |
2088 | if (error) |
2089 | goto DevAttrError; |
2090 | |
2091 | error = device_create_sys_dev_entry(dev); |
2092 | if (error) |
2093 | goto SysEntryError; |
2094 | |
2095 | devtmpfs_create_node(dev); |
2096 | } |
2097 | |
2098 | /* Notify clients of device addition. This call must come |
2099 | * after dpm_sysfs_add() and before kobject_uevent(). |
2100 | */ |
2101 | if (dev->bus) |
2102 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, |
2103 | BUS_NOTIFY_ADD_DEVICE, dev); |
2104 | |
2105 | kobject_uevent(&dev->kobj, KOBJ_ADD); |
2106 | bus_probe_device(dev); |
2107 | if (parent) |
2108 | klist_add_tail(&dev->p->knode_parent, |
2109 | &parent->p->klist_children); |
2110 | |
2111 | if (dev->class) { |
2112 | mutex_lock(&dev->class->p->mutex); |
2113 | /* tie the class to the device */ |
2114 | klist_add_tail(&dev->p->knode_class, |
2115 | &dev->class->p->klist_devices); |
2116 | |
2117 | /* notify any interfaces that the device is here */ |
2118 | list_for_each_entry(class_intf, |
2119 | &dev->class->p->interfaces, node) |
2120 | if (class_intf->add_dev) |
2121 | class_intf->add_dev(dev, class_intf); |
2122 | mutex_unlock(&dev->class->p->mutex); |
2123 | } |
2124 | done: |
2125 | put_device(dev); |
2126 | return error; |
2127 | SysEntryError: |
2128 | if (MAJOR(dev->devt)) |
2129 | device_remove_file(dev, &dev_attr_dev); |
2130 | DevAttrError: |
2131 | device_pm_remove(dev); |
2132 | dpm_sysfs_remove(dev); |
2133 | DPMError: |
2134 | bus_remove_device(dev); |
2135 | BusError: |
2136 | device_remove_attrs(dev); |
2137 | AttrsError: |
2138 | device_remove_class_symlinks(dev); |
2139 | SymlinkError: |
2140 | device_remove_file(dev, &dev_attr_uevent); |
2141 | attrError: |
2142 | device_platform_notify(dev, KOBJ_REMOVE); |
2143 | platform_error: |
2144 | kobject_uevent(&dev->kobj, KOBJ_REMOVE); |
2145 | glue_dir = get_glue_dir(dev); |
2146 | kobject_del(&dev->kobj); |
2147 | Error: |
2148 | cleanup_glue_dir(dev, glue_dir); |
2149 | parent_error: |
2150 | put_device(parent); |
2151 | name_error: |
2152 | kfree(dev->p); |
2153 | dev->p = NULL; |
2154 | goto done; |
2155 | } |
2156 | EXPORT_SYMBOL_GPL(device_add); |
2157 | |
2158 | /** |
2159 | * device_register - register a device with the system. |
2160 | * @dev: pointer to the device structure |
2161 | * |
2162 | * This happens in two clean steps - initialize the device |
2163 | * and add it to the system. The two steps can be called |
2164 | * separately, but this is the easiest and most common. |
2165 | * I.e. you should only call the two helpers separately if |
2166 | * have a clearly defined need to use and refcount the device |
2167 | * before it is added to the hierarchy. |
2168 | * |
2169 | * For more information, see the kerneldoc for device_initialize() |
2170 | * and device_add(). |
2171 | * |
2172 | * NOTE: _Never_ directly free @dev after calling this function, even |
2173 | * if it returned an error! Always use put_device() to give up the |
2174 | * reference initialized in this function instead. |
2175 | */ |
2176 | int device_register(struct device *dev) |
2177 | { |
2178 | device_initialize(dev); |
2179 | return device_add(dev); |
2180 | } |
2181 | EXPORT_SYMBOL_GPL(device_register); |
2182 | |
2183 | /** |
2184 | * get_device - increment reference count for device. |
2185 | * @dev: device. |
2186 | * |
2187 | * This simply forwards the call to kobject_get(), though |
2188 | * we do take care to provide for the case that we get a NULL |
2189 | * pointer passed in. |
2190 | */ |
2191 | struct device *get_device(struct device *dev) |
2192 | { |
2193 | return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL; |
2194 | } |
2195 | EXPORT_SYMBOL_GPL(get_device); |
2196 | |
2197 | /** |
2198 | * put_device - decrement reference count. |
2199 | * @dev: device in question. |
2200 | */ |
2201 | void put_device(struct device *dev) |
2202 | { |
2203 | /* might_sleep(); */ |
2204 | if (dev) |
2205 | kobject_put(&dev->kobj); |
2206 | } |
2207 | EXPORT_SYMBOL_GPL(put_device); |
2208 | |
2209 | /** |
2210 | * device_del - delete device from system. |
2211 | * @dev: device. |
2212 | * |
2213 | * This is the first part of the device unregistration |
2214 | * sequence. This removes the device from the lists we control |
2215 | * from here, has it removed from the other driver model |
2216 | * subsystems it was added to in device_add(), and removes it |
2217 | * from the kobject hierarchy. |
2218 | * |
2219 | * NOTE: this should be called manually _iff_ device_add() was |
2220 | * also called manually. |
2221 | */ |
2222 | void device_del(struct device *dev) |
2223 | { |
2224 | struct device *parent = dev->parent; |
2225 | struct kobject *glue_dir = NULL; |
2226 | struct class_interface *class_intf; |
2227 | |
2228 | /* |
2229 | * Hold the device lock and set the "dead" flag to guarantee that |
2230 | * the update behavior is consistent with the other bitfields near |
2231 | * it and that we cannot have an asynchronous probe routine trying |
2232 | * to run while we are tearing out the bus/class/sysfs from |
2233 | * underneath the device. |
2234 | */ |
2235 | device_lock(dev); |
2236 | dev->p->dead = true; |
2237 | device_unlock(dev); |
2238 | |
2239 | /* Notify clients of device removal. This call must come |
2240 | * before dpm_sysfs_remove(). |
2241 | */ |
2242 | if (dev->bus) |
2243 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, |
2244 | BUS_NOTIFY_DEL_DEVICE, dev); |
2245 | |
2246 | dpm_sysfs_remove(dev); |
2247 | if (parent) |
2248 | klist_del(&dev->p->knode_parent); |
2249 | if (MAJOR(dev->devt)) { |
2250 | devtmpfs_delete_node(dev); |
2251 | device_remove_sys_dev_entry(dev); |
2252 | device_remove_file(dev, &dev_attr_dev); |
2253 | } |
2254 | if (dev->class) { |
2255 | device_remove_class_symlinks(dev); |
2256 | |
2257 | mutex_lock(&dev->class->p->mutex); |
2258 | /* notify any interfaces that the device is now gone */ |
2259 | list_for_each_entry(class_intf, |
2260 | &dev->class->p->interfaces, node) |
2261 | if (class_intf->remove_dev) |
2262 | class_intf->remove_dev(dev, class_intf); |
2263 | /* remove the device from the class list */ |
2264 | klist_del(&dev->p->knode_class); |
2265 | mutex_unlock(&dev->class->p->mutex); |
2266 | } |
2267 | device_remove_file(dev, &dev_attr_uevent); |
2268 | device_remove_attrs(dev); |
2269 | bus_remove_device(dev); |
2270 | device_pm_remove(dev); |
2271 | driver_deferred_probe_del(dev); |
2272 | device_platform_notify(dev, KOBJ_REMOVE); |
2273 | device_remove_properties(dev); |
2274 | device_links_purge(dev); |
2275 | |
2276 | if (dev->bus) |
2277 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, |
2278 | BUS_NOTIFY_REMOVED_DEVICE, dev); |
2279 | kobject_uevent(&dev->kobj, KOBJ_REMOVE); |
2280 | glue_dir = get_glue_dir(dev); |
2281 | kobject_del(&dev->kobj); |
2282 | cleanup_glue_dir(dev, glue_dir); |
2283 | put_device(parent); |
2284 | } |
2285 | EXPORT_SYMBOL_GPL(device_del); |
2286 | |
2287 | /** |
2288 | * device_unregister - unregister device from system. |
2289 | * @dev: device going away. |
2290 | * |
2291 | * We do this in two parts, like we do device_register(). First, |
2292 | * we remove it from all the subsystems with device_del(), then |
2293 | * we decrement the reference count via put_device(). If that |
2294 | * is the final reference count, the device will be cleaned up |
2295 | * via device_release() above. Otherwise, the structure will |
2296 | * stick around until the final reference to the device is dropped. |
2297 | */ |
2298 | void device_unregister(struct device *dev) |
2299 | { |
2300 | pr_debug("device: '%s': %s\n" , dev_name(dev), __func__); |
2301 | device_del(dev); |
2302 | put_device(dev); |
2303 | } |
2304 | EXPORT_SYMBOL_GPL(device_unregister); |
2305 | |
2306 | static struct device *prev_device(struct klist_iter *i) |
2307 | { |
2308 | struct klist_node *n = klist_prev(i); |
2309 | struct device *dev = NULL; |
2310 | struct device_private *p; |
2311 | |
2312 | if (n) { |
2313 | p = to_device_private_parent(n); |
2314 | dev = p->device; |
2315 | } |
2316 | return dev; |
2317 | } |
2318 | |
2319 | static struct device *next_device(struct klist_iter *i) |
2320 | { |
2321 | struct klist_node *n = klist_next(i); |
2322 | struct device *dev = NULL; |
2323 | struct device_private *p; |
2324 | |
2325 | if (n) { |
2326 | p = to_device_private_parent(n); |
2327 | dev = p->device; |
2328 | } |
2329 | return dev; |
2330 | } |
2331 | |
2332 | /** |
2333 | * device_get_devnode - path of device node file |
2334 | * @dev: device |
2335 | * @mode: returned file access mode |
2336 | * @uid: returned file owner |
2337 | * @gid: returned file group |
2338 | * @tmp: possibly allocated string |
2339 | * |
2340 | * Return the relative path of a possible device node. |
2341 | * Non-default names may need to allocate a memory to compose |
2342 | * a name. This memory is returned in tmp and needs to be |
2343 | * freed by the caller. |
2344 | */ |
2345 | const char *device_get_devnode(struct device *dev, |
2346 | umode_t *mode, kuid_t *uid, kgid_t *gid, |
2347 | const char **tmp) |
2348 | { |
2349 | char *s; |
2350 | |
2351 | *tmp = NULL; |
2352 | |
2353 | /* the device type may provide a specific name */ |
2354 | if (dev->type && dev->type->devnode) |
2355 | *tmp = dev->type->devnode(dev, mode, uid, gid); |
2356 | if (*tmp) |
2357 | return *tmp; |
2358 | |
2359 | /* the class may provide a specific name */ |
2360 | if (dev->class && dev->class->devnode) |
2361 | *tmp = dev->class->devnode(dev, mode); |
2362 | if (*tmp) |
2363 | return *tmp; |
2364 | |
2365 | /* return name without allocation, tmp == NULL */ |
2366 | if (strchr(dev_name(dev), '!') == NULL) |
2367 | return dev_name(dev); |
2368 | |
2369 | /* replace '!' in the name with '/' */ |
2370 | s = kstrdup(dev_name(dev), GFP_KERNEL); |
2371 | if (!s) |
2372 | return NULL; |
2373 | strreplace(s, '!', '/'); |
2374 | return *tmp = s; |
2375 | } |
2376 | |
2377 | /** |
2378 | * device_for_each_child - device child iterator. |
2379 | * @parent: parent struct device. |
2380 | * @fn: function to be called for each device. |
2381 | * @data: data for the callback. |
2382 | * |
2383 | * Iterate over @parent's child devices, and call @fn for each, |
2384 | * passing it @data. |
2385 | * |
2386 | * We check the return of @fn each time. If it returns anything |
2387 | * other than 0, we break out and return that value. |
2388 | */ |
2389 | int device_for_each_child(struct device *parent, void *data, |
2390 | int (*fn)(struct device *dev, void *data)) |
2391 | { |
2392 | struct klist_iter i; |
2393 | struct device *child; |
2394 | int error = 0; |
2395 | |
2396 | if (!parent->p) |
2397 | return 0; |
2398 | |
2399 | klist_iter_init(&parent->p->klist_children, &i); |
2400 | while (!error && (child = next_device(&i))) |
2401 | error = fn(child, data); |
2402 | klist_iter_exit(&i); |
2403 | return error; |
2404 | } |
2405 | EXPORT_SYMBOL_GPL(device_for_each_child); |
2406 | |
2407 | /** |
2408 | * device_for_each_child_reverse - device child iterator in reversed order. |
2409 | * @parent: parent struct device. |
2410 | * @fn: function to be called for each device. |
2411 | * @data: data for the callback. |
2412 | * |
2413 | * Iterate over @parent's child devices, and call @fn for each, |
2414 | * passing it @data. |
2415 | * |
2416 | * We check the return of @fn each time. If it returns anything |
2417 | * other than 0, we break out and return that value. |
2418 | */ |
2419 | int device_for_each_child_reverse(struct device *parent, void *data, |
2420 | int (*fn)(struct device *dev, void *data)) |
2421 | { |
2422 | struct klist_iter i; |
2423 | struct device *child; |
2424 | int error = 0; |
2425 | |
2426 | if (!parent->p) |
2427 | return 0; |
2428 | |
2429 | klist_iter_init(&parent->p->klist_children, &i); |
2430 | while ((child = prev_device(&i)) && !error) |
2431 | error = fn(child, data); |
2432 | klist_iter_exit(&i); |
2433 | return error; |
2434 | } |
2435 | EXPORT_SYMBOL_GPL(device_for_each_child_reverse); |
2436 | |
2437 | /** |
2438 | * device_find_child - device iterator for locating a particular device. |
2439 | * @parent: parent struct device |
2440 | * @match: Callback function to check device |
2441 | * @data: Data to pass to match function |
2442 | * |
2443 | * This is similar to the device_for_each_child() function above, but it |
2444 | * returns a reference to a device that is 'found' for later use, as |
2445 | * determined by the @match callback. |
2446 | * |
2447 | * The callback should return 0 if the device doesn't match and non-zero |
2448 | * if it does. If the callback returns non-zero and a reference to the |
2449 | * current device can be obtained, this function will return to the caller |
2450 | * and not iterate over any more devices. |
2451 | * |
2452 | * NOTE: you will need to drop the reference with put_device() after use. |
2453 | */ |
2454 | struct device *device_find_child(struct device *parent, void *data, |
2455 | int (*match)(struct device *dev, void *data)) |
2456 | { |
2457 | struct klist_iter i; |
2458 | struct device *child; |
2459 | |
2460 | if (!parent) |
2461 | return NULL; |
2462 | |
2463 | klist_iter_init(&parent->p->klist_children, &i); |
2464 | while ((child = next_device(&i))) |
2465 | if (match(child, data) && get_device(child)) |
2466 | break; |
2467 | klist_iter_exit(&i); |
2468 | return child; |
2469 | } |
2470 | EXPORT_SYMBOL_GPL(device_find_child); |
2471 | |
2472 | int __init devices_init(void) |
2473 | { |
2474 | devices_kset = kset_create_and_add("devices" , &device_uevent_ops, NULL); |
2475 | if (!devices_kset) |
2476 | return -ENOMEM; |
2477 | dev_kobj = kobject_create_and_add("dev" , NULL); |
2478 | if (!dev_kobj) |
2479 | goto dev_kobj_err; |
2480 | sysfs_dev_block_kobj = kobject_create_and_add("block" , dev_kobj); |
2481 | if (!sysfs_dev_block_kobj) |
2482 | goto block_kobj_err; |
2483 | sysfs_dev_char_kobj = kobject_create_and_add("char" , dev_kobj); |
2484 | if (!sysfs_dev_char_kobj) |
2485 | goto char_kobj_err; |
2486 | |
2487 | return 0; |
2488 | |
2489 | char_kobj_err: |
2490 | kobject_put(sysfs_dev_block_kobj); |
2491 | block_kobj_err: |
2492 | kobject_put(dev_kobj); |
2493 | dev_kobj_err: |
2494 | kset_unregister(devices_kset); |
2495 | return -ENOMEM; |
2496 | } |
2497 | |
2498 | static int device_check_offline(struct device *dev, void *not_used) |
2499 | { |
2500 | int ret; |
2501 | |
2502 | ret = device_for_each_child(dev, NULL, device_check_offline); |
2503 | if (ret) |
2504 | return ret; |
2505 | |
2506 | return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0; |
2507 | } |
2508 | |
2509 | /** |
2510 | * device_offline - Prepare the device for hot-removal. |
2511 | * @dev: Device to be put offline. |
2512 | * |
2513 | * Execute the device bus type's .offline() callback, if present, to prepare |
2514 | * the device for a subsequent hot-removal. If that succeeds, the device must |
2515 | * not be used until either it is removed or its bus type's .online() callback |
2516 | * is executed. |
2517 | * |
2518 | * Call under device_hotplug_lock. |
2519 | */ |
2520 | int device_offline(struct device *dev) |
2521 | { |
2522 | int ret; |
2523 | |
2524 | if (dev->offline_disabled) |
2525 | return -EPERM; |
2526 | |
2527 | ret = device_for_each_child(dev, NULL, device_check_offline); |
2528 | if (ret) |
2529 | return ret; |
2530 | |
2531 | device_lock(dev); |
2532 | if (device_supports_offline(dev)) { |
2533 | if (dev->offline) { |
2534 | ret = 1; |
2535 | } else { |
2536 | ret = dev->bus->offline(dev); |
2537 | if (!ret) { |
2538 | kobject_uevent(&dev->kobj, KOBJ_OFFLINE); |
2539 | dev->offline = true; |
2540 | } |
2541 | } |
2542 | } |
2543 | device_unlock(dev); |
2544 | |
2545 | return ret; |
2546 | } |
2547 | |
2548 | /** |
2549 | * device_online - Put the device back online after successful device_offline(). |
2550 | * @dev: Device to be put back online. |
2551 | * |
2552 | * If device_offline() has been successfully executed for @dev, but the device |
2553 | * has not been removed subsequently, execute its bus type's .online() callback |
2554 | * to indicate that the device can be used again. |
2555 | * |
2556 | * Call under device_hotplug_lock. |
2557 | */ |
2558 | int device_online(struct device *dev) |
2559 | { |
2560 | int ret = 0; |
2561 | |
2562 | device_lock(dev); |
2563 | if (device_supports_offline(dev)) { |
2564 | if (dev->offline) { |
2565 | ret = dev->bus->online(dev); |
2566 | if (!ret) { |
2567 | kobject_uevent(&dev->kobj, KOBJ_ONLINE); |
2568 | dev->offline = false; |
2569 | } |
2570 | } else { |
2571 | ret = 1; |
2572 | } |
2573 | } |
2574 | device_unlock(dev); |
2575 | |
2576 | return ret; |
2577 | } |
2578 | |
2579 | struct root_device { |
2580 | struct device dev; |
2581 | struct module *owner; |
2582 | }; |
2583 | |
2584 | static inline struct root_device *to_root_device(struct device *d) |
2585 | { |
2586 | return container_of(d, struct root_device, dev); |
2587 | } |
2588 | |
2589 | static void root_device_release(struct device *dev) |
2590 | { |
2591 | kfree(to_root_device(dev)); |
2592 | } |
2593 | |
2594 | /** |
2595 | * __root_device_register - allocate and register a root device |
2596 | * @name: root device name |
2597 | * @owner: owner module of the root device, usually THIS_MODULE |
2598 | * |
2599 | * This function allocates a root device and registers it |
2600 | * using device_register(). In order to free the returned |
2601 | * device, use root_device_unregister(). |
2602 | * |
2603 | * Root devices are dummy devices which allow other devices |
2604 | * to be grouped under /sys/devices. Use this function to |
2605 | * allocate a root device and then use it as the parent of |
2606 | * any device which should appear under /sys/devices/{name} |
2607 | * |
2608 | * The /sys/devices/{name} directory will also contain a |
2609 | * 'module' symlink which points to the @owner directory |
2610 | * in sysfs. |
2611 | * |
2612 | * Returns &struct device pointer on success, or ERR_PTR() on error. |
2613 | * |
2614 | * Note: You probably want to use root_device_register(). |
2615 | */ |
2616 | struct device *__root_device_register(const char *name, struct module *owner) |
2617 | { |
2618 | struct root_device *root; |
2619 | int err = -ENOMEM; |
2620 | |
2621 | root = kzalloc(sizeof(struct root_device), GFP_KERNEL); |
2622 | if (!root) |
2623 | return ERR_PTR(err); |
2624 | |
2625 | err = dev_set_name(&root->dev, "%s" , name); |
2626 | if (err) { |
2627 | kfree(root); |
2628 | return ERR_PTR(err); |
2629 | } |
2630 | |
2631 | root->dev.release = root_device_release; |
2632 | |
2633 | err = device_register(&root->dev); |
2634 | if (err) { |
2635 | put_device(&root->dev); |
2636 | return ERR_PTR(err); |
2637 | } |
2638 | |
2639 | #ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */ |
2640 | if (owner) { |
2641 | struct module_kobject *mk = &owner->mkobj; |
2642 | |
2643 | err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module" ); |
2644 | if (err) { |
2645 | device_unregister(&root->dev); |
2646 | return ERR_PTR(err); |
2647 | } |
2648 | root->owner = owner; |
2649 | } |
2650 | #endif |
2651 | |
2652 | return &root->dev; |
2653 | } |
2654 | EXPORT_SYMBOL_GPL(__root_device_register); |
2655 | |
2656 | /** |
2657 | * root_device_unregister - unregister and free a root device |
2658 | * @dev: device going away |
2659 | * |
2660 | * This function unregisters and cleans up a device that was created by |
2661 | * root_device_register(). |
2662 | */ |
2663 | void root_device_unregister(struct device *dev) |
2664 | { |
2665 | struct root_device *root = to_root_device(dev); |
2666 | |
2667 | if (root->owner) |
2668 | sysfs_remove_link(&root->dev.kobj, "module" ); |
2669 | |
2670 | device_unregister(dev); |
2671 | } |
2672 | EXPORT_SYMBOL_GPL(root_device_unregister); |
2673 | |
2674 | |
2675 | static void device_create_release(struct device *dev) |
2676 | { |
2677 | pr_debug("device: '%s': %s\n" , dev_name(dev), __func__); |
2678 | kfree(dev); |
2679 | } |
2680 | |
2681 | static __printf(6, 0) struct device * |
2682 | device_create_groups_vargs(struct class *class, struct device *parent, |
2683 | dev_t devt, void *drvdata, |
2684 | const struct attribute_group **groups, |
2685 | const char *fmt, va_list args) |
2686 | { |
2687 | struct device *dev = NULL; |
2688 | int retval = -ENODEV; |
2689 | |
2690 | if (class == NULL || IS_ERR(class)) |
2691 | goto error; |
2692 | |
2693 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
2694 | if (!dev) { |
2695 | retval = -ENOMEM; |
2696 | goto error; |
2697 | } |
2698 | |
2699 | device_initialize(dev); |
2700 | dev->devt = devt; |
2701 | dev->class = class; |
2702 | dev->parent = parent; |
2703 | dev->groups = groups; |
2704 | dev->release = device_create_release; |
2705 | dev_set_drvdata(dev, drvdata); |
2706 | |
2707 | retval = kobject_set_name_vargs(&dev->kobj, fmt, args); |
2708 | if (retval) |
2709 | goto error; |
2710 | |
2711 | retval = device_add(dev); |
2712 | if (retval) |
2713 | goto error; |
2714 | |
2715 | return dev; |
2716 | |
2717 | error: |
2718 | put_device(dev); |
2719 | return ERR_PTR(retval); |
2720 | } |
2721 | |
2722 | /** |
2723 | * device_create_vargs - creates a device and registers it with sysfs |
2724 | * @class: pointer to the struct class that this device should be registered to |
2725 | * @parent: pointer to the parent struct device of this new device, if any |
2726 | * @devt: the dev_t for the char device to be added |
2727 | * @drvdata: the data to be added to the device for callbacks |
2728 | * @fmt: string for the device's name |
2729 | * @args: va_list for the device's name |
2730 | * |
2731 | * This function can be used by char device classes. A struct device |
2732 | * will be created in sysfs, registered to the specified class. |
2733 | * |
2734 | * A "dev" file will be created, showing the dev_t for the device, if |
2735 | * the dev_t is not 0,0. |
2736 | * If a pointer to a parent struct device is passed in, the newly created |
2737 | * struct device will be a child of that device in sysfs. |
2738 | * The pointer to the struct device will be returned from the call. |
2739 | * Any further sysfs files that might be required can be created using this |
2740 | * pointer. |
2741 | * |
2742 | * Returns &struct device pointer on success, or ERR_PTR() on error. |
2743 | * |
2744 | * Note: the struct class passed to this function must have previously |
2745 | * been created with a call to class_create(). |
2746 | */ |
2747 | struct device *device_create_vargs(struct class *class, struct device *parent, |
2748 | dev_t devt, void *drvdata, const char *fmt, |
2749 | va_list args) |
2750 | { |
2751 | return device_create_groups_vargs(class, parent, devt, drvdata, NULL, |
2752 | fmt, args); |
2753 | } |
2754 | EXPORT_SYMBOL_GPL(device_create_vargs); |
2755 | |
2756 | /** |
2757 | * device_create - creates a device and registers it with sysfs |
2758 | * @class: pointer to the struct class that this device should be registered to |
2759 | * @parent: pointer to the parent struct device of this new device, if any |
2760 | * @devt: the dev_t for the char device to be added |
2761 | * @drvdata: the data to be added to the device for callbacks |
2762 | * @fmt: string for the device's name |
2763 | * |
2764 | * This function can be used by char device classes. A struct device |
2765 | * will be created in sysfs, registered to the specified class. |
2766 | * |
2767 | * A "dev" file will be created, showing the dev_t for the device, if |
2768 | * the dev_t is not 0,0. |
2769 | * If a pointer to a parent struct device is passed in, the newly created |
2770 | * struct device will be a child of that device in sysfs. |
2771 | * The pointer to the struct device will be returned from the call. |
2772 | * Any further sysfs files that might be required can be created using this |
2773 | * pointer. |
2774 | * |
2775 | * Returns &struct device pointer on success, or ERR_PTR() on error. |
2776 | * |
2777 | * Note: the struct class passed to this function must have previously |
2778 | * been created with a call to class_create(). |
2779 | */ |
2780 | struct device *device_create(struct class *class, struct device *parent, |
2781 | dev_t devt, void *drvdata, const char *fmt, ...) |
2782 | { |
2783 | va_list vargs; |
2784 | struct device *dev; |
2785 | |
2786 | va_start(vargs, fmt); |
2787 | dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs); |
2788 | va_end(vargs); |
2789 | return dev; |
2790 | } |
2791 | EXPORT_SYMBOL_GPL(device_create); |
2792 | |
2793 | /** |
2794 | * device_create_with_groups - creates a device and registers it with sysfs |
2795 | * @class: pointer to the struct class that this device should be registered to |
2796 | * @parent: pointer to the parent struct device of this new device, if any |
2797 | * @devt: the dev_t for the char device to be added |
2798 | * @drvdata: the data to be added to the device for callbacks |
2799 | * @groups: NULL-terminated list of attribute groups to be created |
2800 | * @fmt: string for the device's name |
2801 | * |
2802 | * This function can be used by char device classes. A struct device |
2803 | * will be created in sysfs, registered to the specified class. |
2804 | * Additional attributes specified in the groups parameter will also |
2805 | * be created automatically. |
2806 | * |
2807 | * A "dev" file will be created, showing the dev_t for the device, if |
2808 | * the dev_t is not 0,0. |
2809 | * If a pointer to a parent struct device is passed in, the newly created |
2810 | * struct device will be a child of that device in sysfs. |
2811 | * The pointer to the struct device will be returned from the call. |
2812 | * Any further sysfs files that might be required can be created using this |
2813 | * pointer. |
2814 | * |
2815 | * Returns &struct device pointer on success, or ERR_PTR() on error. |
2816 | * |
2817 | * Note: the struct class passed to this function must have previously |
2818 | * been created with a call to class_create(). |
2819 | */ |
2820 | struct device *device_create_with_groups(struct class *class, |
2821 | struct device *parent, dev_t devt, |
2822 | void *drvdata, |
2823 | const struct attribute_group **groups, |
2824 | const char *fmt, ...) |
2825 | { |
2826 | va_list vargs; |
2827 | struct device *dev; |
2828 | |
2829 | va_start(vargs, fmt); |
2830 | dev = device_create_groups_vargs(class, parent, devt, drvdata, groups, |
2831 | fmt, vargs); |
2832 | va_end(vargs); |
2833 | return dev; |
2834 | } |
2835 | EXPORT_SYMBOL_GPL(device_create_with_groups); |
2836 | |
2837 | static int __match_devt(struct device *dev, const void *data) |
2838 | { |
2839 | const dev_t *devt = data; |
2840 | |
2841 | return dev->devt == *devt; |
2842 | } |
2843 | |
2844 | /** |
2845 | * device_destroy - removes a device that was created with device_create() |
2846 | * @class: pointer to the struct class that this device was registered with |
2847 | * @devt: the dev_t of the device that was previously registered |
2848 | * |
2849 | * This call unregisters and cleans up a device that was created with a |
2850 | * call to device_create(). |
2851 | */ |
2852 | void device_destroy(struct class *class, dev_t devt) |
2853 | { |
2854 | struct device *dev; |
2855 | |
2856 | dev = class_find_device(class, NULL, &devt, __match_devt); |
2857 | if (dev) { |
2858 | put_device(dev); |
2859 | device_unregister(dev); |
2860 | } |
2861 | } |
2862 | EXPORT_SYMBOL_GPL(device_destroy); |
2863 | |
2864 | /** |
2865 | * device_rename - renames a device |
2866 | * @dev: the pointer to the struct device to be renamed |
2867 | * @new_name: the new name of the device |
2868 | * |
2869 | * It is the responsibility of the caller to provide mutual |
2870 | * exclusion between two different calls of device_rename |
2871 | * on the same device to ensure that new_name is valid and |
2872 | * won't conflict with other devices. |
2873 | * |
2874 | * Note: Don't call this function. Currently, the networking layer calls this |
2875 | * function, but that will change. The following text from Kay Sievers offers |
2876 | * some insight: |
2877 | * |
2878 | * Renaming devices is racy at many levels, symlinks and other stuff are not |
2879 | * replaced atomically, and you get a "move" uevent, but it's not easy to |
2880 | * connect the event to the old and new device. Device nodes are not renamed at |
2881 | * all, there isn't even support for that in the kernel now. |
2882 | * |
2883 | * In the meantime, during renaming, your target name might be taken by another |
2884 | * driver, creating conflicts. Or the old name is taken directly after you |
2885 | * renamed it -- then you get events for the same DEVPATH, before you even see |
2886 | * the "move" event. It's just a mess, and nothing new should ever rely on |
2887 | * kernel device renaming. Besides that, it's not even implemented now for |
2888 | * other things than (driver-core wise very simple) network devices. |
2889 | * |
2890 | * We are currently about to change network renaming in udev to completely |
2891 | * disallow renaming of devices in the same namespace as the kernel uses, |
2892 | * because we can't solve the problems properly, that arise with swapping names |
2893 | * of multiple interfaces without races. Means, renaming of eth[0-9]* will only |
2894 | * be allowed to some other name than eth[0-9]*, for the aforementioned |
2895 | * reasons. |
2896 | * |
2897 | * Make up a "real" name in the driver before you register anything, or add |
2898 | * some other attributes for userspace to find the device, or use udev to add |
2899 | * symlinks -- but never rename kernel devices later, it's a complete mess. We |
2900 | * don't even want to get into that and try to implement the missing pieces in |
2901 | * the core. We really have other pieces to fix in the driver core mess. :) |
2902 | */ |
2903 | int device_rename(struct device *dev, const char *new_name) |
2904 | { |
2905 | struct kobject *kobj = &dev->kobj; |
2906 | char *old_device_name = NULL; |
2907 | int error; |
2908 | |
2909 | dev = get_device(dev); |
2910 | if (!dev) |
2911 | return -EINVAL; |
2912 | |
2913 | dev_dbg(dev, "renaming to %s\n" , new_name); |
2914 | |
2915 | old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); |
2916 | if (!old_device_name) { |
2917 | error = -ENOMEM; |
2918 | goto out; |
2919 | } |
2920 | |
2921 | if (dev->class) { |
2922 | error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj, |
2923 | kobj, old_device_name, |
2924 | new_name, kobject_namespace(kobj)); |
2925 | if (error) |
2926 | goto out; |
2927 | } |
2928 | |
2929 | error = kobject_rename(kobj, new_name); |
2930 | if (error) |
2931 | goto out; |
2932 | |
2933 | out: |
2934 | put_device(dev); |
2935 | |
2936 | kfree(old_device_name); |
2937 | |
2938 | return error; |
2939 | } |
2940 | EXPORT_SYMBOL_GPL(device_rename); |
2941 | |
2942 | static int device_move_class_links(struct device *dev, |
2943 | struct device *old_parent, |
2944 | struct device *new_parent) |
2945 | { |
2946 | int error = 0; |
2947 | |
2948 | if (old_parent) |
2949 | sysfs_remove_link(&dev->kobj, "device" ); |
2950 | if (new_parent) |
2951 | error = sysfs_create_link(&dev->kobj, &new_parent->kobj, |
2952 | "device" ); |
2953 | return error; |
2954 | } |
2955 | |
2956 | /** |
2957 | * device_move - moves a device to a new parent |
2958 | * @dev: the pointer to the struct device to be moved |
2959 | * @new_parent: the new parent of the device (can be NULL) |
2960 | * @dpm_order: how to reorder the dpm_list |
2961 | */ |
2962 | int device_move(struct device *dev, struct device *new_parent, |
2963 | enum dpm_order dpm_order) |
2964 | { |
2965 | int error; |
2966 | struct device *old_parent; |
2967 | struct kobject *new_parent_kobj; |
2968 | |
2969 | dev = get_device(dev); |
2970 | if (!dev) |
2971 | return -EINVAL; |
2972 | |
2973 | device_pm_lock(); |
2974 | new_parent = get_device(new_parent); |
2975 | new_parent_kobj = get_device_parent(dev, new_parent); |
2976 | if (IS_ERR(new_parent_kobj)) { |
2977 | error = PTR_ERR(new_parent_kobj); |
2978 | put_device(new_parent); |
2979 | goto out; |
2980 | } |
2981 | |
2982 | pr_debug("device: '%s': %s: moving to '%s'\n" , dev_name(dev), |
2983 | __func__, new_parent ? dev_name(new_parent) : "<NULL>" ); |
2984 | error = kobject_move(&dev->kobj, new_parent_kobj); |
2985 | if (error) { |
2986 | cleanup_glue_dir(dev, new_parent_kobj); |
2987 | put_device(new_parent); |
2988 | goto out; |
2989 | } |
2990 | old_parent = dev->parent; |
2991 | dev->parent = new_parent; |
2992 | if (old_parent) |
2993 | klist_remove(&dev->p->knode_parent); |
2994 | if (new_parent) { |
2995 | klist_add_tail(&dev->p->knode_parent, |
2996 | &new_parent->p->klist_children); |
2997 | set_dev_node(dev, dev_to_node(new_parent)); |
2998 | } |
2999 | |
3000 | if (dev->class) { |
3001 | error = device_move_class_links(dev, old_parent, new_parent); |
3002 | if (error) { |
3003 | /* We ignore errors on cleanup since we're hosed anyway... */ |
3004 | device_move_class_links(dev, new_parent, old_parent); |
3005 | if (!kobject_move(&dev->kobj, &old_parent->kobj)) { |
3006 | if (new_parent) |
3007 | klist_remove(&dev->p->knode_parent); |
3008 | dev->parent = old_parent; |
3009 | if (old_parent) { |
3010 | klist_add_tail(&dev->p->knode_parent, |
3011 | &old_parent->p->klist_children); |
3012 | set_dev_node(dev, dev_to_node(old_parent)); |
3013 | } |
3014 | } |
3015 | cleanup_glue_dir(dev, new_parent_kobj); |
3016 | put_device(new_parent); |
3017 | goto out; |
3018 | } |
3019 | } |
3020 | switch (dpm_order) { |
3021 | case DPM_ORDER_NONE: |
3022 | break; |
3023 | case DPM_ORDER_DEV_AFTER_PARENT: |
3024 | device_pm_move_after(dev, new_parent); |
3025 | devices_kset_move_after(dev, new_parent); |
3026 | break; |
3027 | case DPM_ORDER_PARENT_BEFORE_DEV: |
3028 | device_pm_move_before(new_parent, dev); |
3029 | devices_kset_move_before(new_parent, dev); |
3030 | break; |
3031 | case DPM_ORDER_DEV_LAST: |
3032 | device_pm_move_last(dev); |
3033 | devices_kset_move_last(dev); |
3034 | break; |
3035 | } |
3036 | |
3037 | put_device(old_parent); |
3038 | out: |
3039 | device_pm_unlock(); |
3040 | put_device(dev); |
3041 | return error; |
3042 | } |
3043 | EXPORT_SYMBOL_GPL(device_move); |
3044 | |
3045 | /** |
3046 | * device_shutdown - call ->shutdown() on each device to shutdown. |
3047 | */ |
3048 | void device_shutdown(void) |
3049 | { |
3050 | struct device *dev, *parent; |
3051 | |
3052 | wait_for_device_probe(); |
3053 | device_block_probing(); |
3054 | |
3055 | spin_lock(&devices_kset->list_lock); |
3056 | /* |
3057 | * Walk the devices list backward, shutting down each in turn. |
3058 | * Beware that device unplug events may also start pulling |
3059 | * devices offline, even as the system is shutting down. |
3060 | */ |
3061 | while (!list_empty(&devices_kset->list)) { |
3062 | dev = list_entry(devices_kset->list.prev, struct device, |
3063 | kobj.entry); |
3064 | |
3065 | /* |
3066 | * hold reference count of device's parent to |
3067 | * prevent it from being freed because parent's |
3068 | * lock is to be held |
3069 | */ |
3070 | parent = get_device(dev->parent); |
3071 | get_device(dev); |
3072 | /* |
3073 | * Make sure the device is off the kset list, in the |
3074 | * event that dev->*->shutdown() doesn't remove it. |
3075 | */ |
3076 | list_del_init(&dev->kobj.entry); |
3077 | spin_unlock(&devices_kset->list_lock); |
3078 | |
3079 | /* hold lock to avoid race with probe/release */ |
3080 | if (parent) |
3081 | device_lock(parent); |
3082 | device_lock(dev); |
3083 | |
3084 | /* Don't allow any more runtime suspends */ |
3085 | pm_runtime_get_noresume(dev); |
3086 | pm_runtime_barrier(dev); |
3087 | |
3088 | if (dev->class && dev->class->shutdown_pre) { |
3089 | if (initcall_debug) |
3090 | dev_info(dev, "shutdown_pre\n" ); |
3091 | dev->class->shutdown_pre(dev); |
3092 | } |
3093 | if (dev->bus && dev->bus->shutdown) { |
3094 | if (initcall_debug) |
3095 | dev_info(dev, "shutdown\n" ); |
3096 | dev->bus->shutdown(dev); |
3097 | } else if (dev->driver && dev->driver->shutdown) { |
3098 | if (initcall_debug) |
3099 | dev_info(dev, "shutdown\n" ); |
3100 | dev->driver->shutdown(dev); |
3101 | } |
3102 | |
3103 | device_unlock(dev); |
3104 | if (parent) |
3105 | device_unlock(parent); |
3106 | |
3107 | put_device(dev); |
3108 | put_device(parent); |
3109 | |
3110 | spin_lock(&devices_kset->list_lock); |
3111 | } |
3112 | spin_unlock(&devices_kset->list_lock); |
3113 | } |
3114 | |
3115 | /* |
3116 | * Device logging functions |
3117 | */ |
3118 | |
3119 | #ifdef CONFIG_PRINTK |
3120 | static int |
3121 | (const struct device *dev, char *hdr, size_t hdrlen) |
3122 | { |
3123 | const char *subsys; |
3124 | size_t pos = 0; |
3125 | |
3126 | if (dev->class) |
3127 | subsys = dev->class->name; |
3128 | else if (dev->bus) |
3129 | subsys = dev->bus->name; |
3130 | else |
3131 | return 0; |
3132 | |
3133 | pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s" , subsys); |
3134 | if (pos >= hdrlen) |
3135 | goto overflow; |
3136 | |
3137 | /* |
3138 | * Add device identifier DEVICE=: |
3139 | * b12:8 block dev_t |
3140 | * c127:3 char dev_t |
3141 | * n8 netdev ifindex |
3142 | * +sound:card0 subsystem:devname |
3143 | */ |
3144 | if (MAJOR(dev->devt)) { |
3145 | char c; |
3146 | |
3147 | if (strcmp(subsys, "block" ) == 0) |
3148 | c = 'b'; |
3149 | else |
3150 | c = 'c'; |
3151 | pos++; |
3152 | pos += snprintf(hdr + pos, hdrlen - pos, |
3153 | "DEVICE=%c%u:%u" , |
3154 | c, MAJOR(dev->devt), MINOR(dev->devt)); |
3155 | } else if (strcmp(subsys, "net" ) == 0) { |
3156 | struct net_device *net = to_net_dev(dev); |
3157 | |
3158 | pos++; |
3159 | pos += snprintf(hdr + pos, hdrlen - pos, |
3160 | "DEVICE=n%u" , net->ifindex); |
3161 | } else { |
3162 | pos++; |
3163 | pos += snprintf(hdr + pos, hdrlen - pos, |
3164 | "DEVICE=+%s:%s" , subsys, dev_name(dev)); |
3165 | } |
3166 | |
3167 | if (pos >= hdrlen) |
3168 | goto overflow; |
3169 | |
3170 | return pos; |
3171 | |
3172 | overflow: |
3173 | dev_WARN(dev, "device/subsystem name too long" ); |
3174 | return 0; |
3175 | } |
3176 | |
3177 | int dev_vprintk_emit(int level, const struct device *dev, |
3178 | const char *fmt, va_list args) |
3179 | { |
3180 | char hdr[128]; |
3181 | size_t hdrlen; |
3182 | |
3183 | hdrlen = create_syslog_header(dev, hdr, sizeof(hdr)); |
3184 | |
3185 | return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args); |
3186 | } |
3187 | EXPORT_SYMBOL(dev_vprintk_emit); |
3188 | |
3189 | int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) |
3190 | { |
3191 | va_list args; |
3192 | int r; |
3193 | |
3194 | va_start(args, fmt); |
3195 | |
3196 | r = dev_vprintk_emit(level, dev, fmt, args); |
3197 | |
3198 | va_end(args); |
3199 | |
3200 | return r; |
3201 | } |
3202 | EXPORT_SYMBOL(dev_printk_emit); |
3203 | |
3204 | static void __dev_printk(const char *level, const struct device *dev, |
3205 | struct va_format *vaf) |
3206 | { |
3207 | if (dev) |
3208 | dev_printk_emit(level[1] - '0', dev, "%s %s: %pV" , |
3209 | dev_driver_string(dev), dev_name(dev), vaf); |
3210 | else |
3211 | printk("%s(NULL device *): %pV" , level, vaf); |
3212 | } |
3213 | |
3214 | void dev_printk(const char *level, const struct device *dev, |
3215 | const char *fmt, ...) |
3216 | { |
3217 | struct va_format vaf; |
3218 | va_list args; |
3219 | |
3220 | va_start(args, fmt); |
3221 | |
3222 | vaf.fmt = fmt; |
3223 | vaf.va = &args; |
3224 | |
3225 | __dev_printk(level, dev, &vaf); |
3226 | |
3227 | va_end(args); |
3228 | } |
3229 | EXPORT_SYMBOL(dev_printk); |
3230 | |
3231 | #define define_dev_printk_level(func, kern_level) \ |
3232 | void func(const struct device *dev, const char *fmt, ...) \ |
3233 | { \ |
3234 | struct va_format vaf; \ |
3235 | va_list args; \ |
3236 | \ |
3237 | va_start(args, fmt); \ |
3238 | \ |
3239 | vaf.fmt = fmt; \ |
3240 | vaf.va = &args; \ |
3241 | \ |
3242 | __dev_printk(kern_level, dev, &vaf); \ |
3243 | \ |
3244 | va_end(args); \ |
3245 | } \ |
3246 | EXPORT_SYMBOL(func); |
3247 | |
3248 | define_dev_printk_level(_dev_emerg, KERN_EMERG); |
3249 | define_dev_printk_level(_dev_alert, KERN_ALERT); |
3250 | define_dev_printk_level(_dev_crit, KERN_CRIT); |
3251 | define_dev_printk_level(_dev_err, KERN_ERR); |
3252 | define_dev_printk_level(_dev_warn, KERN_WARNING); |
3253 | define_dev_printk_level(_dev_notice, KERN_NOTICE); |
3254 | define_dev_printk_level(_dev_info, KERN_INFO); |
3255 | |
3256 | #endif |
3257 | |
3258 | static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) |
3259 | { |
3260 | return fwnode && !IS_ERR(fwnode->secondary); |
3261 | } |
3262 | |
3263 | /** |
3264 | * set_primary_fwnode - Change the primary firmware node of a given device. |
3265 | * @dev: Device to handle. |
3266 | * @fwnode: New primary firmware node of the device. |
3267 | * |
3268 | * Set the device's firmware node pointer to @fwnode, but if a secondary |
3269 | * firmware node of the device is present, preserve it. |
3270 | */ |
3271 | void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) |
3272 | { |
3273 | if (fwnode) { |
3274 | struct fwnode_handle *fn = dev->fwnode; |
3275 | |
3276 | if (fwnode_is_primary(fn)) |
3277 | fn = fn->secondary; |
3278 | |
3279 | if (fn) { |
3280 | WARN_ON(fwnode->secondary); |
3281 | fwnode->secondary = fn; |
3282 | } |
3283 | dev->fwnode = fwnode; |
3284 | } else { |
3285 | dev->fwnode = fwnode_is_primary(dev->fwnode) ? |
3286 | dev->fwnode->secondary : NULL; |
3287 | } |
3288 | } |
3289 | EXPORT_SYMBOL_GPL(set_primary_fwnode); |
3290 | |
3291 | /** |
3292 | * set_secondary_fwnode - Change the secondary firmware node of a given device. |
3293 | * @dev: Device to handle. |
3294 | * @fwnode: New secondary firmware node of the device. |
3295 | * |
3296 | * If a primary firmware node of the device is present, set its secondary |
3297 | * pointer to @fwnode. Otherwise, set the device's firmware node pointer to |
3298 | * @fwnode. |
3299 | */ |
3300 | void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode) |
3301 | { |
3302 | if (fwnode) |
3303 | fwnode->secondary = ERR_PTR(-ENODEV); |
3304 | |
3305 | if (fwnode_is_primary(dev->fwnode)) |
3306 | dev->fwnode->secondary = fwnode; |
3307 | else |
3308 | dev->fwnode = fwnode; |
3309 | } |
3310 | |
3311 | /** |
3312 | * device_set_of_node_from_dev - reuse device-tree node of another device |
3313 | * @dev: device whose device-tree node is being set |
3314 | * @dev2: device whose device-tree node is being reused |
3315 | * |
3316 | * Takes another reference to the new device-tree node after first dropping |
3317 | * any reference held to the old node. |
3318 | */ |
3319 | void device_set_of_node_from_dev(struct device *dev, const struct device *dev2) |
3320 | { |
3321 | of_node_put(dev->of_node); |
3322 | dev->of_node = of_node_get(dev2->of_node); |
3323 | dev->of_node_reused = true; |
3324 | } |
3325 | EXPORT_SYMBOL_GPL(device_set_of_node_from_dev); |
3326 | |