1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * nvmem framework core. |
4 | * |
5 | * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> |
6 | * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> |
7 | */ |
8 | |
9 | #include <linux/device.h> |
10 | #include <linux/export.h> |
11 | #include <linux/fs.h> |
12 | #include <linux/idr.h> |
13 | #include <linux/init.h> |
14 | #include <linux/kref.h> |
15 | #include <linux/module.h> |
16 | #include <linux/nvmem-consumer.h> |
17 | #include <linux/nvmem-provider.h> |
18 | #include <linux/of.h> |
19 | #include <linux/slab.h> |
20 | |
21 | struct nvmem_device { |
22 | struct module *owner; |
23 | struct device dev; |
24 | int stride; |
25 | int word_size; |
26 | int id; |
27 | struct kref refcnt; |
28 | size_t size; |
29 | bool read_only; |
30 | int flags; |
31 | enum nvmem_type type; |
32 | struct bin_attribute eeprom; |
33 | struct device *base_dev; |
34 | struct list_head cells; |
35 | nvmem_reg_read_t reg_read; |
36 | nvmem_reg_write_t reg_write; |
37 | void *priv; |
38 | }; |
39 | |
40 | #define FLAG_COMPAT BIT(0) |
41 | |
42 | struct nvmem_cell { |
43 | const char *name; |
44 | int offset; |
45 | int bytes; |
46 | int bit_offset; |
47 | int nbits; |
48 | struct device_node *np; |
49 | struct nvmem_device *nvmem; |
50 | struct list_head node; |
51 | }; |
52 | |
53 | static DEFINE_MUTEX(nvmem_mutex); |
54 | static DEFINE_IDA(nvmem_ida); |
55 | |
56 | static DEFINE_MUTEX(nvmem_cell_mutex); |
57 | static LIST_HEAD(nvmem_cell_tables); |
58 | |
59 | static DEFINE_MUTEX(nvmem_lookup_mutex); |
60 | static LIST_HEAD(nvmem_lookup_list); |
61 | |
62 | static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); |
63 | |
64 | static const char * const nvmem_type_str[] = { |
65 | [NVMEM_TYPE_UNKNOWN] = "Unknown" , |
66 | [NVMEM_TYPE_EEPROM] = "EEPROM" , |
67 | [NVMEM_TYPE_OTP] = "OTP" , |
68 | [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed" , |
69 | }; |
70 | |
71 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
72 | static struct lock_class_key eeprom_lock_key; |
73 | #endif |
74 | |
75 | #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) |
76 | static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, |
77 | void *val, size_t bytes) |
78 | { |
79 | if (nvmem->reg_read) |
80 | return nvmem->reg_read(nvmem->priv, offset, val, bytes); |
81 | |
82 | return -EINVAL; |
83 | } |
84 | |
85 | static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, |
86 | void *val, size_t bytes) |
87 | { |
88 | if (nvmem->reg_write) |
89 | return nvmem->reg_write(nvmem->priv, offset, val, bytes); |
90 | |
91 | return -EINVAL; |
92 | } |
93 | |
94 | static ssize_t type_show(struct device *dev, |
95 | struct device_attribute *attr, char *buf) |
96 | { |
97 | struct nvmem_device *nvmem = to_nvmem_device(dev); |
98 | |
99 | return sprintf(buf, "%s\n" , nvmem_type_str[nvmem->type]); |
100 | } |
101 | |
102 | static DEVICE_ATTR_RO(type); |
103 | |
104 | static struct attribute *nvmem_attrs[] = { |
105 | &dev_attr_type.attr, |
106 | NULL, |
107 | }; |
108 | |
109 | static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, |
110 | struct bin_attribute *attr, |
111 | char *buf, loff_t pos, size_t count) |
112 | { |
113 | struct device *dev; |
114 | struct nvmem_device *nvmem; |
115 | int rc; |
116 | |
117 | if (attr->private) |
118 | dev = attr->private; |
119 | else |
120 | dev = container_of(kobj, struct device, kobj); |
121 | nvmem = to_nvmem_device(dev); |
122 | |
123 | /* Stop the user from reading */ |
124 | if (pos >= nvmem->size) |
125 | return 0; |
126 | |
127 | if (count < nvmem->word_size) |
128 | return -EINVAL; |
129 | |
130 | if (pos + count > nvmem->size) |
131 | count = nvmem->size - pos; |
132 | |
133 | count = round_down(count, nvmem->word_size); |
134 | |
135 | rc = nvmem_reg_read(nvmem, pos, buf, count); |
136 | |
137 | if (rc) |
138 | return rc; |
139 | |
140 | return count; |
141 | } |
142 | |
143 | static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, |
144 | struct bin_attribute *attr, |
145 | char *buf, loff_t pos, size_t count) |
146 | { |
147 | struct device *dev; |
148 | struct nvmem_device *nvmem; |
149 | int rc; |
150 | |
151 | if (attr->private) |
152 | dev = attr->private; |
153 | else |
154 | dev = container_of(kobj, struct device, kobj); |
155 | nvmem = to_nvmem_device(dev); |
156 | |
157 | /* Stop the user from writing */ |
158 | if (pos >= nvmem->size) |
159 | return -EFBIG; |
160 | |
161 | if (count < nvmem->word_size) |
162 | return -EINVAL; |
163 | |
164 | if (pos + count > nvmem->size) |
165 | count = nvmem->size - pos; |
166 | |
167 | count = round_down(count, nvmem->word_size); |
168 | |
169 | rc = nvmem_reg_write(nvmem, pos, buf, count); |
170 | |
171 | if (rc) |
172 | return rc; |
173 | |
174 | return count; |
175 | } |
176 | |
177 | /* default read/write permissions */ |
178 | static struct bin_attribute bin_attr_rw_nvmem = { |
179 | .attr = { |
180 | .name = "nvmem" , |
181 | .mode = 0644, |
182 | }, |
183 | .read = bin_attr_nvmem_read, |
184 | .write = bin_attr_nvmem_write, |
185 | }; |
186 | |
187 | static struct bin_attribute *nvmem_bin_rw_attributes[] = { |
188 | &bin_attr_rw_nvmem, |
189 | NULL, |
190 | }; |
191 | |
192 | static const struct attribute_group nvmem_bin_rw_group = { |
193 | .bin_attrs = nvmem_bin_rw_attributes, |
194 | .attrs = nvmem_attrs, |
195 | }; |
196 | |
197 | static const struct attribute_group *nvmem_rw_dev_groups[] = { |
198 | &nvmem_bin_rw_group, |
199 | NULL, |
200 | }; |
201 | |
202 | /* read only permission */ |
203 | static struct bin_attribute bin_attr_ro_nvmem = { |
204 | .attr = { |
205 | .name = "nvmem" , |
206 | .mode = 0444, |
207 | }, |
208 | .read = bin_attr_nvmem_read, |
209 | }; |
210 | |
211 | static struct bin_attribute *nvmem_bin_ro_attributes[] = { |
212 | &bin_attr_ro_nvmem, |
213 | NULL, |
214 | }; |
215 | |
216 | static const struct attribute_group nvmem_bin_ro_group = { |
217 | .bin_attrs = nvmem_bin_ro_attributes, |
218 | .attrs = nvmem_attrs, |
219 | }; |
220 | |
221 | static const struct attribute_group *nvmem_ro_dev_groups[] = { |
222 | &nvmem_bin_ro_group, |
223 | NULL, |
224 | }; |
225 | |
226 | /* default read/write permissions, root only */ |
227 | static struct bin_attribute bin_attr_rw_root_nvmem = { |
228 | .attr = { |
229 | .name = "nvmem" , |
230 | .mode = 0600, |
231 | }, |
232 | .read = bin_attr_nvmem_read, |
233 | .write = bin_attr_nvmem_write, |
234 | }; |
235 | |
236 | static struct bin_attribute *nvmem_bin_rw_root_attributes[] = { |
237 | &bin_attr_rw_root_nvmem, |
238 | NULL, |
239 | }; |
240 | |
241 | static const struct attribute_group nvmem_bin_rw_root_group = { |
242 | .bin_attrs = nvmem_bin_rw_root_attributes, |
243 | .attrs = nvmem_attrs, |
244 | }; |
245 | |
246 | static const struct attribute_group *nvmem_rw_root_dev_groups[] = { |
247 | &nvmem_bin_rw_root_group, |
248 | NULL, |
249 | }; |
250 | |
251 | /* read only permission, root only */ |
252 | static struct bin_attribute bin_attr_ro_root_nvmem = { |
253 | .attr = { |
254 | .name = "nvmem" , |
255 | .mode = 0400, |
256 | }, |
257 | .read = bin_attr_nvmem_read, |
258 | }; |
259 | |
260 | static struct bin_attribute *nvmem_bin_ro_root_attributes[] = { |
261 | &bin_attr_ro_root_nvmem, |
262 | NULL, |
263 | }; |
264 | |
265 | static const struct attribute_group nvmem_bin_ro_root_group = { |
266 | .bin_attrs = nvmem_bin_ro_root_attributes, |
267 | .attrs = nvmem_attrs, |
268 | }; |
269 | |
270 | static const struct attribute_group *nvmem_ro_root_dev_groups[] = { |
271 | &nvmem_bin_ro_root_group, |
272 | NULL, |
273 | }; |
274 | |
275 | static void nvmem_release(struct device *dev) |
276 | { |
277 | struct nvmem_device *nvmem = to_nvmem_device(dev); |
278 | |
279 | ida_simple_remove(&nvmem_ida, nvmem->id); |
280 | kfree(nvmem); |
281 | } |
282 | |
283 | static const struct device_type nvmem_provider_type = { |
284 | .release = nvmem_release, |
285 | }; |
286 | |
287 | static struct bus_type nvmem_bus_type = { |
288 | .name = "nvmem" , |
289 | }; |
290 | |
291 | static int of_nvmem_match(struct device *dev, void *nvmem_np) |
292 | { |
293 | return dev->of_node == nvmem_np; |
294 | } |
295 | |
296 | static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) |
297 | { |
298 | struct device *d; |
299 | |
300 | if (!nvmem_np) |
301 | return NULL; |
302 | |
303 | d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match); |
304 | |
305 | if (!d) |
306 | return NULL; |
307 | |
308 | return to_nvmem_device(d); |
309 | } |
310 | |
311 | static struct nvmem_device *nvmem_find(const char *name) |
312 | { |
313 | struct device *d; |
314 | |
315 | d = bus_find_device_by_name(&nvmem_bus_type, NULL, name); |
316 | |
317 | if (!d) |
318 | return NULL; |
319 | |
320 | return to_nvmem_device(d); |
321 | } |
322 | |
323 | static void nvmem_cell_drop(struct nvmem_cell *cell) |
324 | { |
325 | blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); |
326 | mutex_lock(&nvmem_mutex); |
327 | list_del(&cell->node); |
328 | mutex_unlock(&nvmem_mutex); |
329 | of_node_put(cell->np); |
330 | kfree(cell->name); |
331 | kfree(cell); |
332 | } |
333 | |
334 | static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) |
335 | { |
336 | struct nvmem_cell *cell, *p; |
337 | |
338 | list_for_each_entry_safe(cell, p, &nvmem->cells, node) |
339 | nvmem_cell_drop(cell); |
340 | } |
341 | |
342 | static void nvmem_cell_add(struct nvmem_cell *cell) |
343 | { |
344 | mutex_lock(&nvmem_mutex); |
345 | list_add_tail(&cell->node, &cell->nvmem->cells); |
346 | mutex_unlock(&nvmem_mutex); |
347 | blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); |
348 | } |
349 | |
350 | static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, |
351 | const struct nvmem_cell_info *info, |
352 | struct nvmem_cell *cell) |
353 | { |
354 | cell->nvmem = nvmem; |
355 | cell->offset = info->offset; |
356 | cell->bytes = info->bytes; |
357 | cell->name = info->name; |
358 | |
359 | cell->bit_offset = info->bit_offset; |
360 | cell->nbits = info->nbits; |
361 | |
362 | if (cell->nbits) |
363 | cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, |
364 | BITS_PER_BYTE); |
365 | |
366 | if (!IS_ALIGNED(cell->offset, nvmem->stride)) { |
367 | dev_err(&nvmem->dev, |
368 | "cell %s unaligned to nvmem stride %d\n" , |
369 | cell->name, nvmem->stride); |
370 | return -EINVAL; |
371 | } |
372 | |
373 | return 0; |
374 | } |
375 | |
376 | /** |
377 | * nvmem_add_cells() - Add cell information to an nvmem device |
378 | * |
379 | * @nvmem: nvmem device to add cells to. |
380 | * @info: nvmem cell info to add to the device |
381 | * @ncells: number of cells in info |
382 | * |
383 | * Return: 0 or negative error code on failure. |
384 | */ |
385 | static int nvmem_add_cells(struct nvmem_device *nvmem, |
386 | const struct nvmem_cell_info *info, |
387 | int ncells) |
388 | { |
389 | struct nvmem_cell **cells; |
390 | int i, rval; |
391 | |
392 | cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); |
393 | if (!cells) |
394 | return -ENOMEM; |
395 | |
396 | for (i = 0; i < ncells; i++) { |
397 | cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); |
398 | if (!cells[i]) { |
399 | rval = -ENOMEM; |
400 | goto err; |
401 | } |
402 | |
403 | rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); |
404 | if (rval) { |
405 | kfree(cells[i]); |
406 | goto err; |
407 | } |
408 | |
409 | nvmem_cell_add(cells[i]); |
410 | } |
411 | |
412 | /* remove tmp array */ |
413 | kfree(cells); |
414 | |
415 | return 0; |
416 | err: |
417 | while (i--) |
418 | nvmem_cell_drop(cells[i]); |
419 | |
420 | kfree(cells); |
421 | |
422 | return rval; |
423 | } |
424 | |
425 | /* |
426 | * nvmem_setup_compat() - Create an additional binary entry in |
427 | * drivers sys directory, to be backwards compatible with the older |
428 | * drivers/misc/eeprom drivers. |
429 | */ |
430 | static int nvmem_setup_compat(struct nvmem_device *nvmem, |
431 | const struct nvmem_config *config) |
432 | { |
433 | int rval; |
434 | |
435 | if (!config->base_dev) |
436 | return -EINVAL; |
437 | |
438 | if (nvmem->read_only) |
439 | nvmem->eeprom = bin_attr_ro_root_nvmem; |
440 | else |
441 | nvmem->eeprom = bin_attr_rw_root_nvmem; |
442 | nvmem->eeprom.attr.name = "eeprom" ; |
443 | nvmem->eeprom.size = nvmem->size; |
444 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
445 | nvmem->eeprom.attr.key = &eeprom_lock_key; |
446 | #endif |
447 | nvmem->eeprom.private = &nvmem->dev; |
448 | nvmem->base_dev = config->base_dev; |
449 | |
450 | rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); |
451 | if (rval) { |
452 | dev_err(&nvmem->dev, |
453 | "Failed to create eeprom binary file %d\n" , rval); |
454 | return rval; |
455 | } |
456 | |
457 | nvmem->flags |= FLAG_COMPAT; |
458 | |
459 | return 0; |
460 | } |
461 | |
462 | /** |
463 | * nvmem_register_notifier() - Register a notifier block for nvmem events. |
464 | * |
465 | * @nb: notifier block to be called on nvmem events. |
466 | * |
467 | * Return: 0 on success, negative error number on failure. |
468 | */ |
469 | int nvmem_register_notifier(struct notifier_block *nb) |
470 | { |
471 | return blocking_notifier_chain_register(&nvmem_notifier, nb); |
472 | } |
473 | EXPORT_SYMBOL_GPL(nvmem_register_notifier); |
474 | |
475 | /** |
476 | * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. |
477 | * |
478 | * @nb: notifier block to be unregistered. |
479 | * |
480 | * Return: 0 on success, negative error number on failure. |
481 | */ |
482 | int nvmem_unregister_notifier(struct notifier_block *nb) |
483 | { |
484 | return blocking_notifier_chain_unregister(&nvmem_notifier, nb); |
485 | } |
486 | EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); |
487 | |
488 | static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) |
489 | { |
490 | const struct nvmem_cell_info *info; |
491 | struct nvmem_cell_table *table; |
492 | struct nvmem_cell *cell; |
493 | int rval = 0, i; |
494 | |
495 | mutex_lock(&nvmem_cell_mutex); |
496 | list_for_each_entry(table, &nvmem_cell_tables, node) { |
497 | if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { |
498 | for (i = 0; i < table->ncells; i++) { |
499 | info = &table->cells[i]; |
500 | |
501 | cell = kzalloc(sizeof(*cell), GFP_KERNEL); |
502 | if (!cell) { |
503 | rval = -ENOMEM; |
504 | goto out; |
505 | } |
506 | |
507 | rval = nvmem_cell_info_to_nvmem_cell(nvmem, |
508 | info, |
509 | cell); |
510 | if (rval) { |
511 | kfree(cell); |
512 | goto out; |
513 | } |
514 | |
515 | nvmem_cell_add(cell); |
516 | } |
517 | } |
518 | } |
519 | |
520 | out: |
521 | mutex_unlock(&nvmem_cell_mutex); |
522 | return rval; |
523 | } |
524 | |
525 | static struct nvmem_cell * |
526 | nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) |
527 | { |
528 | struct nvmem_cell *iter, *cell = NULL; |
529 | |
530 | mutex_lock(&nvmem_mutex); |
531 | list_for_each_entry(iter, &nvmem->cells, node) { |
532 | if (strcmp(cell_id, iter->name) == 0) { |
533 | cell = iter; |
534 | break; |
535 | } |
536 | } |
537 | mutex_unlock(&nvmem_mutex); |
538 | |
539 | return cell; |
540 | } |
541 | |
542 | static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) |
543 | { |
544 | struct device_node *parent, *child; |
545 | struct device *dev = &nvmem->dev; |
546 | struct nvmem_cell *cell; |
547 | const __be32 *addr; |
548 | int len; |
549 | |
550 | parent = dev->of_node; |
551 | |
552 | for_each_child_of_node(parent, child) { |
553 | addr = of_get_property(child, "reg" , &len); |
554 | if (!addr || (len < 2 * sizeof(u32))) { |
555 | dev_err(dev, "nvmem: invalid reg on %pOF\n" , child); |
556 | return -EINVAL; |
557 | } |
558 | |
559 | cell = kzalloc(sizeof(*cell), GFP_KERNEL); |
560 | if (!cell) |
561 | return -ENOMEM; |
562 | |
563 | cell->nvmem = nvmem; |
564 | cell->np = of_node_get(child); |
565 | cell->offset = be32_to_cpup(addr++); |
566 | cell->bytes = be32_to_cpup(addr); |
567 | cell->name = kasprintf(GFP_KERNEL, "%pOFn" , child); |
568 | |
569 | addr = of_get_property(child, "bits" , &len); |
570 | if (addr && len == (2 * sizeof(u32))) { |
571 | cell->bit_offset = be32_to_cpup(addr++); |
572 | cell->nbits = be32_to_cpup(addr); |
573 | } |
574 | |
575 | if (cell->nbits) |
576 | cell->bytes = DIV_ROUND_UP( |
577 | cell->nbits + cell->bit_offset, |
578 | BITS_PER_BYTE); |
579 | |
580 | if (!IS_ALIGNED(cell->offset, nvmem->stride)) { |
581 | dev_err(dev, "cell %s unaligned to nvmem stride %d\n" , |
582 | cell->name, nvmem->stride); |
583 | /* Cells already added will be freed later. */ |
584 | kfree(cell->name); |
585 | kfree(cell); |
586 | return -EINVAL; |
587 | } |
588 | |
589 | nvmem_cell_add(cell); |
590 | } |
591 | |
592 | return 0; |
593 | } |
594 | |
595 | /** |
596 | * nvmem_register() - Register a nvmem device for given nvmem_config. |
597 | * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem |
598 | * |
599 | * @config: nvmem device configuration with which nvmem device is created. |
600 | * |
601 | * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device |
602 | * on success. |
603 | */ |
604 | |
605 | struct nvmem_device *nvmem_register(const struct nvmem_config *config) |
606 | { |
607 | struct nvmem_device *nvmem; |
608 | int rval; |
609 | |
610 | if (!config->dev) |
611 | return ERR_PTR(-EINVAL); |
612 | |
613 | nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); |
614 | if (!nvmem) |
615 | return ERR_PTR(-ENOMEM); |
616 | |
617 | rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); |
618 | if (rval < 0) { |
619 | kfree(nvmem); |
620 | return ERR_PTR(rval); |
621 | } |
622 | |
623 | kref_init(&nvmem->refcnt); |
624 | INIT_LIST_HEAD(&nvmem->cells); |
625 | |
626 | nvmem->id = rval; |
627 | nvmem->owner = config->owner; |
628 | if (!nvmem->owner && config->dev->driver) |
629 | nvmem->owner = config->dev->driver->owner; |
630 | nvmem->stride = config->stride ?: 1; |
631 | nvmem->word_size = config->word_size ?: 1; |
632 | nvmem->size = config->size; |
633 | nvmem->dev.type = &nvmem_provider_type; |
634 | nvmem->dev.bus = &nvmem_bus_type; |
635 | nvmem->dev.parent = config->dev; |
636 | nvmem->priv = config->priv; |
637 | nvmem->type = config->type; |
638 | nvmem->reg_read = config->reg_read; |
639 | nvmem->reg_write = config->reg_write; |
640 | if (!config->no_of_node) |
641 | nvmem->dev.of_node = config->dev->of_node; |
642 | |
643 | if (config->id == -1 && config->name) { |
644 | dev_set_name(&nvmem->dev, "%s" , config->name); |
645 | } else { |
646 | dev_set_name(&nvmem->dev, "%s%d" , |
647 | config->name ? : "nvmem" , |
648 | config->name ? config->id : nvmem->id); |
649 | } |
650 | |
651 | nvmem->read_only = device_property_present(config->dev, "read-only" ) || |
652 | config->read_only || !nvmem->reg_write; |
653 | |
654 | if (config->root_only) |
655 | nvmem->dev.groups = nvmem->read_only ? |
656 | nvmem_ro_root_dev_groups : |
657 | nvmem_rw_root_dev_groups; |
658 | else |
659 | nvmem->dev.groups = nvmem->read_only ? |
660 | nvmem_ro_dev_groups : |
661 | nvmem_rw_dev_groups; |
662 | |
663 | device_initialize(&nvmem->dev); |
664 | |
665 | dev_dbg(&nvmem->dev, "Registering nvmem device %s\n" , config->name); |
666 | |
667 | rval = device_add(&nvmem->dev); |
668 | if (rval) |
669 | goto err_put_device; |
670 | |
671 | if (config->compat) { |
672 | rval = nvmem_setup_compat(nvmem, config); |
673 | if (rval) |
674 | goto err_device_del; |
675 | } |
676 | |
677 | if (config->cells) { |
678 | rval = nvmem_add_cells(nvmem, config->cells, config->ncells); |
679 | if (rval) |
680 | goto err_teardown_compat; |
681 | } |
682 | |
683 | rval = nvmem_add_cells_from_table(nvmem); |
684 | if (rval) |
685 | goto err_remove_cells; |
686 | |
687 | rval = nvmem_add_cells_from_of(nvmem); |
688 | if (rval) |
689 | goto err_remove_cells; |
690 | |
691 | blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); |
692 | |
693 | return nvmem; |
694 | |
695 | err_remove_cells: |
696 | nvmem_device_remove_all_cells(nvmem); |
697 | err_teardown_compat: |
698 | if (config->compat) |
699 | device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); |
700 | err_device_del: |
701 | device_del(&nvmem->dev); |
702 | err_put_device: |
703 | put_device(&nvmem->dev); |
704 | |
705 | return ERR_PTR(rval); |
706 | } |
707 | EXPORT_SYMBOL_GPL(nvmem_register); |
708 | |
709 | static void nvmem_device_release(struct kref *kref) |
710 | { |
711 | struct nvmem_device *nvmem; |
712 | |
713 | nvmem = container_of(kref, struct nvmem_device, refcnt); |
714 | |
715 | blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); |
716 | |
717 | if (nvmem->flags & FLAG_COMPAT) |
718 | device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); |
719 | |
720 | nvmem_device_remove_all_cells(nvmem); |
721 | device_del(&nvmem->dev); |
722 | put_device(&nvmem->dev); |
723 | } |
724 | |
725 | /** |
726 | * nvmem_unregister() - Unregister previously registered nvmem device |
727 | * |
728 | * @nvmem: Pointer to previously registered nvmem device. |
729 | */ |
730 | void nvmem_unregister(struct nvmem_device *nvmem) |
731 | { |
732 | kref_put(&nvmem->refcnt, nvmem_device_release); |
733 | } |
734 | EXPORT_SYMBOL_GPL(nvmem_unregister); |
735 | |
736 | static void devm_nvmem_release(struct device *dev, void *res) |
737 | { |
738 | nvmem_unregister(*(struct nvmem_device **)res); |
739 | } |
740 | |
741 | /** |
742 | * devm_nvmem_register() - Register a managed nvmem device for given |
743 | * nvmem_config. |
744 | * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem |
745 | * |
746 | * @dev: Device that uses the nvmem device. |
747 | * @config: nvmem device configuration with which nvmem device is created. |
748 | * |
749 | * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device |
750 | * on success. |
751 | */ |
752 | struct nvmem_device *devm_nvmem_register(struct device *dev, |
753 | const struct nvmem_config *config) |
754 | { |
755 | struct nvmem_device **ptr, *nvmem; |
756 | |
757 | ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); |
758 | if (!ptr) |
759 | return ERR_PTR(-ENOMEM); |
760 | |
761 | nvmem = nvmem_register(config); |
762 | |
763 | if (!IS_ERR(nvmem)) { |
764 | *ptr = nvmem; |
765 | devres_add(dev, ptr); |
766 | } else { |
767 | devres_free(ptr); |
768 | } |
769 | |
770 | return nvmem; |
771 | } |
772 | EXPORT_SYMBOL_GPL(devm_nvmem_register); |
773 | |
774 | static int devm_nvmem_match(struct device *dev, void *res, void *data) |
775 | { |
776 | struct nvmem_device **r = res; |
777 | |
778 | return *r == data; |
779 | } |
780 | |
781 | /** |
782 | * devm_nvmem_unregister() - Unregister previously registered managed nvmem |
783 | * device. |
784 | * |
785 | * @dev: Device that uses the nvmem device. |
786 | * @nvmem: Pointer to previously registered nvmem device. |
787 | * |
788 | * Return: Will be an negative on error or a zero on success. |
789 | */ |
790 | int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) |
791 | { |
792 | return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); |
793 | } |
794 | EXPORT_SYMBOL(devm_nvmem_unregister); |
795 | |
796 | static struct nvmem_device *__nvmem_device_get(struct device_node *np, |
797 | const char *nvmem_name) |
798 | { |
799 | struct nvmem_device *nvmem = NULL; |
800 | |
801 | mutex_lock(&nvmem_mutex); |
802 | nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name); |
803 | mutex_unlock(&nvmem_mutex); |
804 | if (!nvmem) |
805 | return ERR_PTR(-EPROBE_DEFER); |
806 | |
807 | if (!try_module_get(nvmem->owner)) { |
808 | dev_err(&nvmem->dev, |
809 | "could not increase module refcount for cell %s\n" , |
810 | nvmem_dev_name(nvmem)); |
811 | |
812 | put_device(&nvmem->dev); |
813 | return ERR_PTR(-EINVAL); |
814 | } |
815 | |
816 | kref_get(&nvmem->refcnt); |
817 | |
818 | return nvmem; |
819 | } |
820 | |
821 | static void __nvmem_device_put(struct nvmem_device *nvmem) |
822 | { |
823 | put_device(&nvmem->dev); |
824 | module_put(nvmem->owner); |
825 | kref_put(&nvmem->refcnt, nvmem_device_release); |
826 | } |
827 | |
828 | #if IS_ENABLED(CONFIG_OF) |
829 | /** |
830 | * of_nvmem_device_get() - Get nvmem device from a given id |
831 | * |
832 | * @np: Device tree node that uses the nvmem device. |
833 | * @id: nvmem name from nvmem-names property. |
834 | * |
835 | * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device |
836 | * on success. |
837 | */ |
838 | struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) |
839 | { |
840 | |
841 | struct device_node *nvmem_np; |
842 | int index = 0; |
843 | |
844 | if (id) |
845 | index = of_property_match_string(np, "nvmem-names" , id); |
846 | |
847 | nvmem_np = of_parse_phandle(np, "nvmem" , index); |
848 | if (!nvmem_np) |
849 | return ERR_PTR(-ENOENT); |
850 | |
851 | return __nvmem_device_get(nvmem_np, NULL); |
852 | } |
853 | EXPORT_SYMBOL_GPL(of_nvmem_device_get); |
854 | #endif |
855 | |
856 | /** |
857 | * nvmem_device_get() - Get nvmem device from a given id |
858 | * |
859 | * @dev: Device that uses the nvmem device. |
860 | * @dev_name: name of the requested nvmem device. |
861 | * |
862 | * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device |
863 | * on success. |
864 | */ |
865 | struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) |
866 | { |
867 | if (dev->of_node) { /* try dt first */ |
868 | struct nvmem_device *nvmem; |
869 | |
870 | nvmem = of_nvmem_device_get(dev->of_node, dev_name); |
871 | |
872 | if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) |
873 | return nvmem; |
874 | |
875 | } |
876 | |
877 | return __nvmem_device_get(NULL, dev_name); |
878 | } |
879 | EXPORT_SYMBOL_GPL(nvmem_device_get); |
880 | |
881 | static int devm_nvmem_device_match(struct device *dev, void *res, void *data) |
882 | { |
883 | struct nvmem_device **nvmem = res; |
884 | |
885 | if (WARN_ON(!nvmem || !*nvmem)) |
886 | return 0; |
887 | |
888 | return *nvmem == data; |
889 | } |
890 | |
891 | static void devm_nvmem_device_release(struct device *dev, void *res) |
892 | { |
893 | nvmem_device_put(*(struct nvmem_device **)res); |
894 | } |
895 | |
896 | /** |
897 | * devm_nvmem_device_put() - put alredy got nvmem device |
898 | * |
899 | * @dev: Device that uses the nvmem device. |
900 | * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), |
901 | * that needs to be released. |
902 | */ |
903 | void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) |
904 | { |
905 | int ret; |
906 | |
907 | ret = devres_release(dev, devm_nvmem_device_release, |
908 | devm_nvmem_device_match, nvmem); |
909 | |
910 | WARN_ON(ret); |
911 | } |
912 | EXPORT_SYMBOL_GPL(devm_nvmem_device_put); |
913 | |
914 | /** |
915 | * nvmem_device_put() - put alredy got nvmem device |
916 | * |
917 | * @nvmem: pointer to nvmem device that needs to be released. |
918 | */ |
919 | void nvmem_device_put(struct nvmem_device *nvmem) |
920 | { |
921 | __nvmem_device_put(nvmem); |
922 | } |
923 | EXPORT_SYMBOL_GPL(nvmem_device_put); |
924 | |
925 | /** |
926 | * devm_nvmem_device_get() - Get nvmem cell of device form a given id |
927 | * |
928 | * @dev: Device that requests the nvmem device. |
929 | * @id: name id for the requested nvmem device. |
930 | * |
931 | * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell |
932 | * on success. The nvmem_cell will be freed by the automatically once the |
933 | * device is freed. |
934 | */ |
935 | struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) |
936 | { |
937 | struct nvmem_device **ptr, *nvmem; |
938 | |
939 | ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); |
940 | if (!ptr) |
941 | return ERR_PTR(-ENOMEM); |
942 | |
943 | nvmem = nvmem_device_get(dev, id); |
944 | if (!IS_ERR(nvmem)) { |
945 | *ptr = nvmem; |
946 | devres_add(dev, ptr); |
947 | } else { |
948 | devres_free(ptr); |
949 | } |
950 | |
951 | return nvmem; |
952 | } |
953 | EXPORT_SYMBOL_GPL(devm_nvmem_device_get); |
954 | |
955 | static struct nvmem_cell * |
956 | nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) |
957 | { |
958 | struct nvmem_cell *cell = ERR_PTR(-ENOENT); |
959 | struct nvmem_cell_lookup *lookup; |
960 | struct nvmem_device *nvmem; |
961 | const char *dev_id; |
962 | |
963 | if (!dev) |
964 | return ERR_PTR(-EINVAL); |
965 | |
966 | dev_id = dev_name(dev); |
967 | |
968 | mutex_lock(&nvmem_lookup_mutex); |
969 | |
970 | list_for_each_entry(lookup, &nvmem_lookup_list, node) { |
971 | if ((strcmp(lookup->dev_id, dev_id) == 0) && |
972 | (strcmp(lookup->con_id, con_id) == 0)) { |
973 | /* This is the right entry. */ |
974 | nvmem = __nvmem_device_get(NULL, lookup->nvmem_name); |
975 | if (IS_ERR(nvmem)) { |
976 | /* Provider may not be registered yet. */ |
977 | cell = ERR_CAST(nvmem); |
978 | break; |
979 | } |
980 | |
981 | cell = nvmem_find_cell_by_name(nvmem, |
982 | lookup->cell_name); |
983 | if (!cell) { |
984 | __nvmem_device_put(nvmem); |
985 | cell = ERR_PTR(-ENOENT); |
986 | } |
987 | break; |
988 | } |
989 | } |
990 | |
991 | mutex_unlock(&nvmem_lookup_mutex); |
992 | return cell; |
993 | } |
994 | |
995 | #if IS_ENABLED(CONFIG_OF) |
996 | static struct nvmem_cell * |
997 | nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) |
998 | { |
999 | struct nvmem_cell *iter, *cell = NULL; |
1000 | |
1001 | mutex_lock(&nvmem_mutex); |
1002 | list_for_each_entry(iter, &nvmem->cells, node) { |
1003 | if (np == iter->np) { |
1004 | cell = iter; |
1005 | break; |
1006 | } |
1007 | } |
1008 | mutex_unlock(&nvmem_mutex); |
1009 | |
1010 | return cell; |
1011 | } |
1012 | |
1013 | /** |
1014 | * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id |
1015 | * |
1016 | * @np: Device tree node that uses the nvmem cell. |
1017 | * @id: nvmem cell name from nvmem-cell-names property, or NULL |
1018 | * for the cell at index 0 (the lone cell with no accompanying |
1019 | * nvmem-cell-names property). |
1020 | * |
1021 | * Return: Will be an ERR_PTR() on error or a valid pointer |
1022 | * to a struct nvmem_cell. The nvmem_cell will be freed by the |
1023 | * nvmem_cell_put(). |
1024 | */ |
1025 | struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) |
1026 | { |
1027 | struct device_node *cell_np, *nvmem_np; |
1028 | struct nvmem_device *nvmem; |
1029 | struct nvmem_cell *cell; |
1030 | int index = 0; |
1031 | |
1032 | /* if cell name exists, find index to the name */ |
1033 | if (id) |
1034 | index = of_property_match_string(np, "nvmem-cell-names" , id); |
1035 | |
1036 | cell_np = of_parse_phandle(np, "nvmem-cells" , index); |
1037 | if (!cell_np) |
1038 | return ERR_PTR(-ENOENT); |
1039 | |
1040 | nvmem_np = of_get_next_parent(cell_np); |
1041 | if (!nvmem_np) |
1042 | return ERR_PTR(-EINVAL); |
1043 | |
1044 | nvmem = __nvmem_device_get(nvmem_np, NULL); |
1045 | of_node_put(nvmem_np); |
1046 | if (IS_ERR(nvmem)) |
1047 | return ERR_CAST(nvmem); |
1048 | |
1049 | cell = nvmem_find_cell_by_node(nvmem, cell_np); |
1050 | if (!cell) { |
1051 | __nvmem_device_put(nvmem); |
1052 | return ERR_PTR(-ENOENT); |
1053 | } |
1054 | |
1055 | return cell; |
1056 | } |
1057 | EXPORT_SYMBOL_GPL(of_nvmem_cell_get); |
1058 | #endif |
1059 | |
1060 | /** |
1061 | * nvmem_cell_get() - Get nvmem cell of device form a given cell name |
1062 | * |
1063 | * @dev: Device that requests the nvmem cell. |
1064 | * @id: nvmem cell name to get (this corresponds with the name from the |
1065 | * nvmem-cell-names property for DT systems and with the con_id from |
1066 | * the lookup entry for non-DT systems). |
1067 | * |
1068 | * Return: Will be an ERR_PTR() on error or a valid pointer |
1069 | * to a struct nvmem_cell. The nvmem_cell will be freed by the |
1070 | * nvmem_cell_put(). |
1071 | */ |
1072 | struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) |
1073 | { |
1074 | struct nvmem_cell *cell; |
1075 | |
1076 | if (dev->of_node) { /* try dt first */ |
1077 | cell = of_nvmem_cell_get(dev->of_node, id); |
1078 | if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) |
1079 | return cell; |
1080 | } |
1081 | |
1082 | /* NULL cell id only allowed for device tree; invalid otherwise */ |
1083 | if (!id) |
1084 | return ERR_PTR(-EINVAL); |
1085 | |
1086 | return nvmem_cell_get_from_lookup(dev, id); |
1087 | } |
1088 | EXPORT_SYMBOL_GPL(nvmem_cell_get); |
1089 | |
1090 | static void devm_nvmem_cell_release(struct device *dev, void *res) |
1091 | { |
1092 | nvmem_cell_put(*(struct nvmem_cell **)res); |
1093 | } |
1094 | |
1095 | /** |
1096 | * devm_nvmem_cell_get() - Get nvmem cell of device form a given id |
1097 | * |
1098 | * @dev: Device that requests the nvmem cell. |
1099 | * @id: nvmem cell name id to get. |
1100 | * |
1101 | * Return: Will be an ERR_PTR() on error or a valid pointer |
1102 | * to a struct nvmem_cell. The nvmem_cell will be freed by the |
1103 | * automatically once the device is freed. |
1104 | */ |
1105 | struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) |
1106 | { |
1107 | struct nvmem_cell **ptr, *cell; |
1108 | |
1109 | ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); |
1110 | if (!ptr) |
1111 | return ERR_PTR(-ENOMEM); |
1112 | |
1113 | cell = nvmem_cell_get(dev, id); |
1114 | if (!IS_ERR(cell)) { |
1115 | *ptr = cell; |
1116 | devres_add(dev, ptr); |
1117 | } else { |
1118 | devres_free(ptr); |
1119 | } |
1120 | |
1121 | return cell; |
1122 | } |
1123 | EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); |
1124 | |
1125 | static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) |
1126 | { |
1127 | struct nvmem_cell **c = res; |
1128 | |
1129 | if (WARN_ON(!c || !*c)) |
1130 | return 0; |
1131 | |
1132 | return *c == data; |
1133 | } |
1134 | |
1135 | /** |
1136 | * devm_nvmem_cell_put() - Release previously allocated nvmem cell |
1137 | * from devm_nvmem_cell_get. |
1138 | * |
1139 | * @dev: Device that requests the nvmem cell. |
1140 | * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). |
1141 | */ |
1142 | void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) |
1143 | { |
1144 | int ret; |
1145 | |
1146 | ret = devres_release(dev, devm_nvmem_cell_release, |
1147 | devm_nvmem_cell_match, cell); |
1148 | |
1149 | WARN_ON(ret); |
1150 | } |
1151 | EXPORT_SYMBOL(devm_nvmem_cell_put); |
1152 | |
1153 | /** |
1154 | * nvmem_cell_put() - Release previously allocated nvmem cell. |
1155 | * |
1156 | * @cell: Previously allocated nvmem cell by nvmem_cell_get(). |
1157 | */ |
1158 | void nvmem_cell_put(struct nvmem_cell *cell) |
1159 | { |
1160 | struct nvmem_device *nvmem = cell->nvmem; |
1161 | |
1162 | __nvmem_device_put(nvmem); |
1163 | } |
1164 | EXPORT_SYMBOL_GPL(nvmem_cell_put); |
1165 | |
1166 | static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) |
1167 | { |
1168 | u8 *p, *b; |
1169 | int i, bit_offset = cell->bit_offset; |
1170 | |
1171 | p = b = buf; |
1172 | if (bit_offset) { |
1173 | /* First shift */ |
1174 | *b++ >>= bit_offset; |
1175 | |
1176 | /* setup rest of the bytes if any */ |
1177 | for (i = 1; i < cell->bytes; i++) { |
1178 | /* Get bits from next byte and shift them towards msb */ |
1179 | *p |= *b << (BITS_PER_BYTE - bit_offset); |
1180 | |
1181 | p = b; |
1182 | *b++ >>= bit_offset; |
1183 | } |
1184 | |
1185 | /* result fits in less bytes */ |
1186 | if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) |
1187 | *p-- = 0; |
1188 | } |
1189 | /* clear msb bits if any leftover in the last byte */ |
1190 | *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); |
1191 | } |
1192 | |
1193 | static int __nvmem_cell_read(struct nvmem_device *nvmem, |
1194 | struct nvmem_cell *cell, |
1195 | void *buf, size_t *len) |
1196 | { |
1197 | int rc; |
1198 | |
1199 | rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); |
1200 | |
1201 | if (rc) |
1202 | return rc; |
1203 | |
1204 | /* shift bits in-place */ |
1205 | if (cell->bit_offset || cell->nbits) |
1206 | nvmem_shift_read_buffer_in_place(cell, buf); |
1207 | |
1208 | if (len) |
1209 | *len = cell->bytes; |
1210 | |
1211 | return 0; |
1212 | } |
1213 | |
1214 | /** |
1215 | * nvmem_cell_read() - Read a given nvmem cell |
1216 | * |
1217 | * @cell: nvmem cell to be read. |
1218 | * @len: pointer to length of cell which will be populated on successful read; |
1219 | * can be NULL. |
1220 | * |
1221 | * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The |
1222 | * buffer should be freed by the consumer with a kfree(). |
1223 | */ |
1224 | void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) |
1225 | { |
1226 | struct nvmem_device *nvmem = cell->nvmem; |
1227 | u8 *buf; |
1228 | int rc; |
1229 | |
1230 | if (!nvmem) |
1231 | return ERR_PTR(-EINVAL); |
1232 | |
1233 | buf = kzalloc(cell->bytes, GFP_KERNEL); |
1234 | if (!buf) |
1235 | return ERR_PTR(-ENOMEM); |
1236 | |
1237 | rc = __nvmem_cell_read(nvmem, cell, buf, len); |
1238 | if (rc) { |
1239 | kfree(buf); |
1240 | return ERR_PTR(rc); |
1241 | } |
1242 | |
1243 | return buf; |
1244 | } |
1245 | EXPORT_SYMBOL_GPL(nvmem_cell_read); |
1246 | |
1247 | static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, |
1248 | u8 *_buf, int len) |
1249 | { |
1250 | struct nvmem_device *nvmem = cell->nvmem; |
1251 | int i, rc, nbits, bit_offset = cell->bit_offset; |
1252 | u8 v, *p, *buf, *b, pbyte, pbits; |
1253 | |
1254 | nbits = cell->nbits; |
1255 | buf = kzalloc(cell->bytes, GFP_KERNEL); |
1256 | if (!buf) |
1257 | return ERR_PTR(-ENOMEM); |
1258 | |
1259 | memcpy(buf, _buf, len); |
1260 | p = b = buf; |
1261 | |
1262 | if (bit_offset) { |
1263 | pbyte = *b; |
1264 | *b <<= bit_offset; |
1265 | |
1266 | /* setup the first byte with lsb bits from nvmem */ |
1267 | rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); |
1268 | if (rc) |
1269 | goto err; |
1270 | *b++ |= GENMASK(bit_offset - 1, 0) & v; |
1271 | |
1272 | /* setup rest of the byte if any */ |
1273 | for (i = 1; i < cell->bytes; i++) { |
1274 | /* Get last byte bits and shift them towards lsb */ |
1275 | pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); |
1276 | pbyte = *b; |
1277 | p = b; |
1278 | *b <<= bit_offset; |
1279 | *b++ |= pbits; |
1280 | } |
1281 | } |
1282 | |
1283 | /* if it's not end on byte boundary */ |
1284 | if ((nbits + bit_offset) % BITS_PER_BYTE) { |
1285 | /* setup the last byte with msb bits from nvmem */ |
1286 | rc = nvmem_reg_read(nvmem, |
1287 | cell->offset + cell->bytes - 1, &v, 1); |
1288 | if (rc) |
1289 | goto err; |
1290 | *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; |
1291 | |
1292 | } |
1293 | |
1294 | return buf; |
1295 | err: |
1296 | kfree(buf); |
1297 | return ERR_PTR(rc); |
1298 | } |
1299 | |
1300 | /** |
1301 | * nvmem_cell_write() - Write to a given nvmem cell |
1302 | * |
1303 | * @cell: nvmem cell to be written. |
1304 | * @buf: Buffer to be written. |
1305 | * @len: length of buffer to be written to nvmem cell. |
1306 | * |
1307 | * Return: length of bytes written or negative on failure. |
1308 | */ |
1309 | int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) |
1310 | { |
1311 | struct nvmem_device *nvmem = cell->nvmem; |
1312 | int rc; |
1313 | |
1314 | if (!nvmem || nvmem->read_only || |
1315 | (cell->bit_offset == 0 && len != cell->bytes)) |
1316 | return -EINVAL; |
1317 | |
1318 | if (cell->bit_offset || cell->nbits) { |
1319 | buf = nvmem_cell_prepare_write_buffer(cell, buf, len); |
1320 | if (IS_ERR(buf)) |
1321 | return PTR_ERR(buf); |
1322 | } |
1323 | |
1324 | rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); |
1325 | |
1326 | /* free the tmp buffer */ |
1327 | if (cell->bit_offset || cell->nbits) |
1328 | kfree(buf); |
1329 | |
1330 | if (rc) |
1331 | return rc; |
1332 | |
1333 | return len; |
1334 | } |
1335 | EXPORT_SYMBOL_GPL(nvmem_cell_write); |
1336 | |
1337 | /** |
1338 | * nvmem_cell_read_u32() - Read a cell value as an u32 |
1339 | * |
1340 | * @dev: Device that requests the nvmem cell. |
1341 | * @cell_id: Name of nvmem cell to read. |
1342 | * @val: pointer to output value. |
1343 | * |
1344 | * Return: 0 on success or negative errno. |
1345 | */ |
1346 | int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) |
1347 | { |
1348 | struct nvmem_cell *cell; |
1349 | void *buf; |
1350 | size_t len; |
1351 | |
1352 | cell = nvmem_cell_get(dev, cell_id); |
1353 | if (IS_ERR(cell)) |
1354 | return PTR_ERR(cell); |
1355 | |
1356 | buf = nvmem_cell_read(cell, &len); |
1357 | if (IS_ERR(buf)) { |
1358 | nvmem_cell_put(cell); |
1359 | return PTR_ERR(buf); |
1360 | } |
1361 | if (len != sizeof(*val)) { |
1362 | kfree(buf); |
1363 | nvmem_cell_put(cell); |
1364 | return -EINVAL; |
1365 | } |
1366 | memcpy(val, buf, sizeof(*val)); |
1367 | |
1368 | kfree(buf); |
1369 | nvmem_cell_put(cell); |
1370 | return 0; |
1371 | } |
1372 | EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); |
1373 | |
1374 | /** |
1375 | * nvmem_device_cell_read() - Read a given nvmem device and cell |
1376 | * |
1377 | * @nvmem: nvmem device to read from. |
1378 | * @info: nvmem cell info to be read. |
1379 | * @buf: buffer pointer which will be populated on successful read. |
1380 | * |
1381 | * Return: length of successful bytes read on success and negative |
1382 | * error code on error. |
1383 | */ |
1384 | ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, |
1385 | struct nvmem_cell_info *info, void *buf) |
1386 | { |
1387 | struct nvmem_cell cell; |
1388 | int rc; |
1389 | ssize_t len; |
1390 | |
1391 | if (!nvmem) |
1392 | return -EINVAL; |
1393 | |
1394 | rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); |
1395 | if (rc) |
1396 | return rc; |
1397 | |
1398 | rc = __nvmem_cell_read(nvmem, &cell, buf, &len); |
1399 | if (rc) |
1400 | return rc; |
1401 | |
1402 | return len; |
1403 | } |
1404 | EXPORT_SYMBOL_GPL(nvmem_device_cell_read); |
1405 | |
1406 | /** |
1407 | * nvmem_device_cell_write() - Write cell to a given nvmem device |
1408 | * |
1409 | * @nvmem: nvmem device to be written to. |
1410 | * @info: nvmem cell info to be written. |
1411 | * @buf: buffer to be written to cell. |
1412 | * |
1413 | * Return: length of bytes written or negative error code on failure. |
1414 | */ |
1415 | int nvmem_device_cell_write(struct nvmem_device *nvmem, |
1416 | struct nvmem_cell_info *info, void *buf) |
1417 | { |
1418 | struct nvmem_cell cell; |
1419 | int rc; |
1420 | |
1421 | if (!nvmem) |
1422 | return -EINVAL; |
1423 | |
1424 | rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); |
1425 | if (rc) |
1426 | return rc; |
1427 | |
1428 | return nvmem_cell_write(&cell, buf, cell.bytes); |
1429 | } |
1430 | EXPORT_SYMBOL_GPL(nvmem_device_cell_write); |
1431 | |
1432 | /** |
1433 | * nvmem_device_read() - Read from a given nvmem device |
1434 | * |
1435 | * @nvmem: nvmem device to read from. |
1436 | * @offset: offset in nvmem device. |
1437 | * @bytes: number of bytes to read. |
1438 | * @buf: buffer pointer which will be populated on successful read. |
1439 | * |
1440 | * Return: length of successful bytes read on success and negative |
1441 | * error code on error. |
1442 | */ |
1443 | int nvmem_device_read(struct nvmem_device *nvmem, |
1444 | unsigned int offset, |
1445 | size_t bytes, void *buf) |
1446 | { |
1447 | int rc; |
1448 | |
1449 | if (!nvmem) |
1450 | return -EINVAL; |
1451 | |
1452 | rc = nvmem_reg_read(nvmem, offset, buf, bytes); |
1453 | |
1454 | if (rc) |
1455 | return rc; |
1456 | |
1457 | return bytes; |
1458 | } |
1459 | EXPORT_SYMBOL_GPL(nvmem_device_read); |
1460 | |
1461 | /** |
1462 | * nvmem_device_write() - Write cell to a given nvmem device |
1463 | * |
1464 | * @nvmem: nvmem device to be written to. |
1465 | * @offset: offset in nvmem device. |
1466 | * @bytes: number of bytes to write. |
1467 | * @buf: buffer to be written. |
1468 | * |
1469 | * Return: length of bytes written or negative error code on failure. |
1470 | */ |
1471 | int nvmem_device_write(struct nvmem_device *nvmem, |
1472 | unsigned int offset, |
1473 | size_t bytes, void *buf) |
1474 | { |
1475 | int rc; |
1476 | |
1477 | if (!nvmem) |
1478 | return -EINVAL; |
1479 | |
1480 | rc = nvmem_reg_write(nvmem, offset, buf, bytes); |
1481 | |
1482 | if (rc) |
1483 | return rc; |
1484 | |
1485 | |
1486 | return bytes; |
1487 | } |
1488 | EXPORT_SYMBOL_GPL(nvmem_device_write); |
1489 | |
1490 | /** |
1491 | * nvmem_add_cell_table() - register a table of cell info entries |
1492 | * |
1493 | * @table: table of cell info entries |
1494 | */ |
1495 | void nvmem_add_cell_table(struct nvmem_cell_table *table) |
1496 | { |
1497 | mutex_lock(&nvmem_cell_mutex); |
1498 | list_add_tail(&table->node, &nvmem_cell_tables); |
1499 | mutex_unlock(&nvmem_cell_mutex); |
1500 | } |
1501 | EXPORT_SYMBOL_GPL(nvmem_add_cell_table); |
1502 | |
1503 | /** |
1504 | * nvmem_del_cell_table() - remove a previously registered cell info table |
1505 | * |
1506 | * @table: table of cell info entries |
1507 | */ |
1508 | void nvmem_del_cell_table(struct nvmem_cell_table *table) |
1509 | { |
1510 | mutex_lock(&nvmem_cell_mutex); |
1511 | list_del(&table->node); |
1512 | mutex_unlock(&nvmem_cell_mutex); |
1513 | } |
1514 | EXPORT_SYMBOL_GPL(nvmem_del_cell_table); |
1515 | |
1516 | /** |
1517 | * nvmem_add_cell_lookups() - register a list of cell lookup entries |
1518 | * |
1519 | * @entries: array of cell lookup entries |
1520 | * @nentries: number of cell lookup entries in the array |
1521 | */ |
1522 | void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) |
1523 | { |
1524 | int i; |
1525 | |
1526 | mutex_lock(&nvmem_lookup_mutex); |
1527 | for (i = 0; i < nentries; i++) |
1528 | list_add_tail(&entries[i].node, &nvmem_lookup_list); |
1529 | mutex_unlock(&nvmem_lookup_mutex); |
1530 | } |
1531 | EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); |
1532 | |
1533 | /** |
1534 | * nvmem_del_cell_lookups() - remove a list of previously added cell lookup |
1535 | * entries |
1536 | * |
1537 | * @entries: array of cell lookup entries |
1538 | * @nentries: number of cell lookup entries in the array |
1539 | */ |
1540 | void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) |
1541 | { |
1542 | int i; |
1543 | |
1544 | mutex_lock(&nvmem_lookup_mutex); |
1545 | for (i = 0; i < nentries; i++) |
1546 | list_del(&entries[i].node); |
1547 | mutex_unlock(&nvmem_lookup_mutex); |
1548 | } |
1549 | EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); |
1550 | |
1551 | /** |
1552 | * nvmem_dev_name() - Get the name of a given nvmem device. |
1553 | * |
1554 | * @nvmem: nvmem device. |
1555 | * |
1556 | * Return: name of the nvmem device. |
1557 | */ |
1558 | const char *nvmem_dev_name(struct nvmem_device *nvmem) |
1559 | { |
1560 | return dev_name(&nvmem->dev); |
1561 | } |
1562 | EXPORT_SYMBOL_GPL(nvmem_dev_name); |
1563 | |
1564 | static int __init nvmem_init(void) |
1565 | { |
1566 | return bus_register(&nvmem_bus_type); |
1567 | } |
1568 | |
1569 | static void __exit nvmem_exit(void) |
1570 | { |
1571 | bus_unregister(&nvmem_bus_type); |
1572 | } |
1573 | |
1574 | subsys_initcall(nvmem_init); |
1575 | module_exit(nvmem_exit); |
1576 | |
1577 | MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org" ); |
1578 | MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com" ); |
1579 | MODULE_DESCRIPTION("nvmem Driver Core" ); |
1580 | MODULE_LICENSE("GPL v2" ); |
1581 | |