1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // |
3 | // Register cache access API |
4 | // |
5 | // Copyright 2011 Wolfson Microelectronics plc |
6 | // |
7 | // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> |
8 | |
9 | #include <linux/bsearch.h> |
10 | #include <linux/device.h> |
11 | #include <linux/export.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/sort.h> |
14 | |
15 | #include "trace.h" |
16 | #include "internal.h" |
17 | |
18 | static const struct regcache_ops *cache_types[] = { |
19 | ®cache_rbtree_ops, |
20 | ®cache_maple_ops, |
21 | ®cache_flat_ops, |
22 | }; |
23 | |
24 | static int regcache_hw_init(struct regmap *map) |
25 | { |
26 | int i, j; |
27 | int ret; |
28 | int count; |
29 | unsigned int reg, val; |
30 | void *tmp_buf; |
31 | |
32 | if (!map->num_reg_defaults_raw) |
33 | return -EINVAL; |
34 | |
35 | /* calculate the size of reg_defaults */ |
36 | for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) |
37 | if (regmap_readable(map, reg: i * map->reg_stride) && |
38 | !regmap_volatile(map, reg: i * map->reg_stride)) |
39 | count++; |
40 | |
41 | /* all registers are unreadable or volatile, so just bypass */ |
42 | if (!count) { |
43 | map->cache_bypass = true; |
44 | return 0; |
45 | } |
46 | |
47 | map->num_reg_defaults = count; |
48 | map->reg_defaults = kmalloc_array(n: count, size: sizeof(struct reg_default), |
49 | GFP_KERNEL); |
50 | if (!map->reg_defaults) |
51 | return -ENOMEM; |
52 | |
53 | if (!map->reg_defaults_raw) { |
54 | bool cache_bypass = map->cache_bypass; |
55 | dev_warn(map->dev, "No cache defaults, reading back from HW\n" ); |
56 | |
57 | /* Bypass the cache access till data read from HW */ |
58 | map->cache_bypass = true; |
59 | tmp_buf = kmalloc(size: map->cache_size_raw, GFP_KERNEL); |
60 | if (!tmp_buf) { |
61 | ret = -ENOMEM; |
62 | goto err_free; |
63 | } |
64 | ret = regmap_raw_read(map, reg: 0, val: tmp_buf, |
65 | val_len: map->cache_size_raw); |
66 | map->cache_bypass = cache_bypass; |
67 | if (ret == 0) { |
68 | map->reg_defaults_raw = tmp_buf; |
69 | map->cache_free = true; |
70 | } else { |
71 | kfree(objp: tmp_buf); |
72 | } |
73 | } |
74 | |
75 | /* fill the reg_defaults */ |
76 | for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { |
77 | reg = i * map->reg_stride; |
78 | |
79 | if (!regmap_readable(map, reg)) |
80 | continue; |
81 | |
82 | if (regmap_volatile(map, reg)) |
83 | continue; |
84 | |
85 | if (map->reg_defaults_raw) { |
86 | val = regcache_get_val(map, base: map->reg_defaults_raw, idx: i); |
87 | } else { |
88 | bool cache_bypass = map->cache_bypass; |
89 | |
90 | map->cache_bypass = true; |
91 | ret = regmap_read(map, reg, val: &val); |
92 | map->cache_bypass = cache_bypass; |
93 | if (ret != 0) { |
94 | dev_err(map->dev, "Failed to read %d: %d\n" , |
95 | reg, ret); |
96 | goto err_free; |
97 | } |
98 | } |
99 | |
100 | map->reg_defaults[j].reg = reg; |
101 | map->reg_defaults[j].def = val; |
102 | j++; |
103 | } |
104 | |
105 | return 0; |
106 | |
107 | err_free: |
108 | kfree(objp: map->reg_defaults); |
109 | |
110 | return ret; |
111 | } |
112 | |
113 | int regcache_init(struct regmap *map, const struct regmap_config *config) |
114 | { |
115 | int ret; |
116 | int i; |
117 | void *tmp_buf; |
118 | |
119 | if (map->cache_type == REGCACHE_NONE) { |
120 | if (config->reg_defaults || config->num_reg_defaults_raw) |
121 | dev_warn(map->dev, |
122 | "No cache used with register defaults set!\n" ); |
123 | |
124 | map->cache_bypass = true; |
125 | return 0; |
126 | } |
127 | |
128 | if (config->reg_defaults && !config->num_reg_defaults) { |
129 | dev_err(map->dev, |
130 | "Register defaults are set without the number!\n" ); |
131 | return -EINVAL; |
132 | } |
133 | |
134 | if (config->num_reg_defaults && !config->reg_defaults) { |
135 | dev_err(map->dev, |
136 | "Register defaults number are set without the reg!\n" ); |
137 | return -EINVAL; |
138 | } |
139 | |
140 | for (i = 0; i < config->num_reg_defaults; i++) |
141 | if (config->reg_defaults[i].reg % map->reg_stride) |
142 | return -EINVAL; |
143 | |
144 | for (i = 0; i < ARRAY_SIZE(cache_types); i++) |
145 | if (cache_types[i]->type == map->cache_type) |
146 | break; |
147 | |
148 | if (i == ARRAY_SIZE(cache_types)) { |
149 | dev_err(map->dev, "Could not match cache type: %d\n" , |
150 | map->cache_type); |
151 | return -EINVAL; |
152 | } |
153 | |
154 | map->num_reg_defaults = config->num_reg_defaults; |
155 | map->num_reg_defaults_raw = config->num_reg_defaults_raw; |
156 | map->reg_defaults_raw = config->reg_defaults_raw; |
157 | map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); |
158 | map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; |
159 | |
160 | map->cache = NULL; |
161 | map->cache_ops = cache_types[i]; |
162 | |
163 | if (!map->cache_ops->read || |
164 | !map->cache_ops->write || |
165 | !map->cache_ops->name) |
166 | return -EINVAL; |
167 | |
168 | /* We still need to ensure that the reg_defaults |
169 | * won't vanish from under us. We'll need to make |
170 | * a copy of it. |
171 | */ |
172 | if (config->reg_defaults) { |
173 | tmp_buf = kmemdup(p: config->reg_defaults, size: map->num_reg_defaults * |
174 | sizeof(struct reg_default), GFP_KERNEL); |
175 | if (!tmp_buf) |
176 | return -ENOMEM; |
177 | map->reg_defaults = tmp_buf; |
178 | } else if (map->num_reg_defaults_raw) { |
179 | /* Some devices such as PMICs don't have cache defaults, |
180 | * we cope with this by reading back the HW registers and |
181 | * crafting the cache defaults by hand. |
182 | */ |
183 | ret = regcache_hw_init(map); |
184 | if (ret < 0) |
185 | return ret; |
186 | if (map->cache_bypass) |
187 | return 0; |
188 | } |
189 | |
190 | if (!map->max_register && map->num_reg_defaults_raw) |
191 | map->max_register = (map->num_reg_defaults_raw - 1) * map->reg_stride; |
192 | |
193 | if (map->cache_ops->init) { |
194 | dev_dbg(map->dev, "Initializing %s cache\n" , |
195 | map->cache_ops->name); |
196 | ret = map->cache_ops->init(map); |
197 | if (ret) |
198 | goto err_free; |
199 | } |
200 | return 0; |
201 | |
202 | err_free: |
203 | kfree(objp: map->reg_defaults); |
204 | if (map->cache_free) |
205 | kfree(objp: map->reg_defaults_raw); |
206 | |
207 | return ret; |
208 | } |
209 | |
210 | void regcache_exit(struct regmap *map) |
211 | { |
212 | if (map->cache_type == REGCACHE_NONE) |
213 | return; |
214 | |
215 | BUG_ON(!map->cache_ops); |
216 | |
217 | kfree(objp: map->reg_defaults); |
218 | if (map->cache_free) |
219 | kfree(objp: map->reg_defaults_raw); |
220 | |
221 | if (map->cache_ops->exit) { |
222 | dev_dbg(map->dev, "Destroying %s cache\n" , |
223 | map->cache_ops->name); |
224 | map->cache_ops->exit(map); |
225 | } |
226 | } |
227 | |
228 | /** |
229 | * regcache_read - Fetch the value of a given register from the cache. |
230 | * |
231 | * @map: map to configure. |
232 | * @reg: The register index. |
233 | * @value: The value to be returned. |
234 | * |
235 | * Return a negative value on failure, 0 on success. |
236 | */ |
237 | int regcache_read(struct regmap *map, |
238 | unsigned int reg, unsigned int *value) |
239 | { |
240 | int ret; |
241 | |
242 | if (map->cache_type == REGCACHE_NONE) |
243 | return -EINVAL; |
244 | |
245 | BUG_ON(!map->cache_ops); |
246 | |
247 | if (!regmap_volatile(map, reg)) { |
248 | ret = map->cache_ops->read(map, reg, value); |
249 | |
250 | if (ret == 0) |
251 | trace_regmap_reg_read_cache(map, reg, val: *value); |
252 | |
253 | return ret; |
254 | } |
255 | |
256 | return -EINVAL; |
257 | } |
258 | |
259 | /** |
260 | * regcache_write - Set the value of a given register in the cache. |
261 | * |
262 | * @map: map to configure. |
263 | * @reg: The register index. |
264 | * @value: The new register value. |
265 | * |
266 | * Return a negative value on failure, 0 on success. |
267 | */ |
268 | int regcache_write(struct regmap *map, |
269 | unsigned int reg, unsigned int value) |
270 | { |
271 | if (map->cache_type == REGCACHE_NONE) |
272 | return 0; |
273 | |
274 | BUG_ON(!map->cache_ops); |
275 | |
276 | if (!regmap_volatile(map, reg)) |
277 | return map->cache_ops->write(map, reg, value); |
278 | |
279 | return 0; |
280 | } |
281 | |
282 | bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg, |
283 | unsigned int val) |
284 | { |
285 | int ret; |
286 | |
287 | if (!regmap_writeable(map, reg)) |
288 | return false; |
289 | |
290 | /* If we don't know the chip just got reset, then sync everything. */ |
291 | if (!map->no_sync_defaults) |
292 | return true; |
293 | |
294 | /* Is this the hardware default? If so skip. */ |
295 | ret = regcache_lookup_reg(map, reg); |
296 | if (ret >= 0 && val == map->reg_defaults[ret].def) |
297 | return false; |
298 | return true; |
299 | } |
300 | |
301 | static int regcache_default_sync(struct regmap *map, unsigned int min, |
302 | unsigned int max) |
303 | { |
304 | unsigned int reg; |
305 | |
306 | for (reg = min; reg <= max; reg += map->reg_stride) { |
307 | unsigned int val; |
308 | int ret; |
309 | |
310 | if (regmap_volatile(map, reg) || |
311 | !regmap_writeable(map, reg)) |
312 | continue; |
313 | |
314 | ret = regcache_read(map, reg, value: &val); |
315 | if (ret == -ENOENT) |
316 | continue; |
317 | if (ret) |
318 | return ret; |
319 | |
320 | if (!regcache_reg_needs_sync(map, reg, val)) |
321 | continue; |
322 | |
323 | map->cache_bypass = true; |
324 | ret = _regmap_write(map, reg, val); |
325 | map->cache_bypass = false; |
326 | if (ret) { |
327 | dev_err(map->dev, "Unable to sync register %#x. %d\n" , |
328 | reg, ret); |
329 | return ret; |
330 | } |
331 | dev_dbg(map->dev, "Synced register %#x, value %#x\n" , reg, val); |
332 | } |
333 | |
334 | return 0; |
335 | } |
336 | |
337 | static int rbtree_all(const void *key, const struct rb_node *node) |
338 | { |
339 | return 0; |
340 | } |
341 | |
342 | /** |
343 | * regcache_sync - Sync the register cache with the hardware. |
344 | * |
345 | * @map: map to configure. |
346 | * |
347 | * Any registers that should not be synced should be marked as |
348 | * volatile. In general drivers can choose not to use the provided |
349 | * syncing functionality if they so require. |
350 | * |
351 | * Return a negative value on failure, 0 on success. |
352 | */ |
353 | int regcache_sync(struct regmap *map) |
354 | { |
355 | int ret = 0; |
356 | unsigned int i; |
357 | const char *name; |
358 | bool bypass; |
359 | struct rb_node *node; |
360 | |
361 | if (WARN_ON(map->cache_type == REGCACHE_NONE)) |
362 | return -EINVAL; |
363 | |
364 | BUG_ON(!map->cache_ops); |
365 | |
366 | map->lock(map->lock_arg); |
367 | /* Remember the initial bypass state */ |
368 | bypass = map->cache_bypass; |
369 | dev_dbg(map->dev, "Syncing %s cache\n" , |
370 | map->cache_ops->name); |
371 | name = map->cache_ops->name; |
372 | trace_regcache_sync(map, type: name, status: "start" ); |
373 | |
374 | if (!map->cache_dirty) |
375 | goto out; |
376 | |
377 | /* Apply any patch first */ |
378 | map->cache_bypass = true; |
379 | for (i = 0; i < map->patch_regs; i++) { |
380 | ret = _regmap_write(map, reg: map->patch[i].reg, val: map->patch[i].def); |
381 | if (ret != 0) { |
382 | dev_err(map->dev, "Failed to write %x = %x: %d\n" , |
383 | map->patch[i].reg, map->patch[i].def, ret); |
384 | goto out; |
385 | } |
386 | } |
387 | map->cache_bypass = false; |
388 | |
389 | if (map->cache_ops->sync) |
390 | ret = map->cache_ops->sync(map, 0, map->max_register); |
391 | else |
392 | ret = regcache_default_sync(map, min: 0, max: map->max_register); |
393 | |
394 | if (ret == 0) |
395 | map->cache_dirty = false; |
396 | |
397 | out: |
398 | /* Restore the bypass state */ |
399 | map->cache_bypass = bypass; |
400 | map->no_sync_defaults = false; |
401 | |
402 | /* |
403 | * If we did any paging with cache bypassed and a cached |
404 | * paging register then the register and cache state might |
405 | * have gone out of sync, force writes of all the paging |
406 | * registers. |
407 | */ |
408 | rb_for_each(node, 0, &map->range_tree, rbtree_all) { |
409 | struct regmap_range_node *this = |
410 | rb_entry(node, struct regmap_range_node, node); |
411 | |
412 | /* If there's nothing in the cache there's nothing to sync */ |
413 | ret = regcache_read(map, reg: this->selector_reg, value: &i); |
414 | if (ret != 0) |
415 | continue; |
416 | |
417 | ret = _regmap_write(map, reg: this->selector_reg, val: i); |
418 | if (ret != 0) { |
419 | dev_err(map->dev, "Failed to write %x = %x: %d\n" , |
420 | this->selector_reg, i, ret); |
421 | break; |
422 | } |
423 | } |
424 | |
425 | map->unlock(map->lock_arg); |
426 | |
427 | regmap_async_complete(map); |
428 | |
429 | trace_regcache_sync(map, type: name, status: "stop" ); |
430 | |
431 | return ret; |
432 | } |
433 | EXPORT_SYMBOL_GPL(regcache_sync); |
434 | |
435 | /** |
436 | * regcache_sync_region - Sync part of the register cache with the hardware. |
437 | * |
438 | * @map: map to sync. |
439 | * @min: first register to sync |
440 | * @max: last register to sync |
441 | * |
442 | * Write all non-default register values in the specified region to |
443 | * the hardware. |
444 | * |
445 | * Return a negative value on failure, 0 on success. |
446 | */ |
447 | int regcache_sync_region(struct regmap *map, unsigned int min, |
448 | unsigned int max) |
449 | { |
450 | int ret = 0; |
451 | const char *name; |
452 | bool bypass; |
453 | |
454 | if (WARN_ON(map->cache_type == REGCACHE_NONE)) |
455 | return -EINVAL; |
456 | |
457 | BUG_ON(!map->cache_ops); |
458 | |
459 | map->lock(map->lock_arg); |
460 | |
461 | /* Remember the initial bypass state */ |
462 | bypass = map->cache_bypass; |
463 | |
464 | name = map->cache_ops->name; |
465 | dev_dbg(map->dev, "Syncing %s cache from %d-%d\n" , name, min, max); |
466 | |
467 | trace_regcache_sync(map, type: name, status: "start region" ); |
468 | |
469 | if (!map->cache_dirty) |
470 | goto out; |
471 | |
472 | map->async = true; |
473 | |
474 | if (map->cache_ops->sync) |
475 | ret = map->cache_ops->sync(map, min, max); |
476 | else |
477 | ret = regcache_default_sync(map, min, max); |
478 | |
479 | out: |
480 | /* Restore the bypass state */ |
481 | map->cache_bypass = bypass; |
482 | map->async = false; |
483 | map->no_sync_defaults = false; |
484 | map->unlock(map->lock_arg); |
485 | |
486 | regmap_async_complete(map); |
487 | |
488 | trace_regcache_sync(map, type: name, status: "stop region" ); |
489 | |
490 | return ret; |
491 | } |
492 | EXPORT_SYMBOL_GPL(regcache_sync_region); |
493 | |
494 | /** |
495 | * regcache_drop_region - Discard part of the register cache |
496 | * |
497 | * @map: map to operate on |
498 | * @min: first register to discard |
499 | * @max: last register to discard |
500 | * |
501 | * Discard part of the register cache. |
502 | * |
503 | * Return a negative value on failure, 0 on success. |
504 | */ |
505 | int regcache_drop_region(struct regmap *map, unsigned int min, |
506 | unsigned int max) |
507 | { |
508 | int ret = 0; |
509 | |
510 | if (!map->cache_ops || !map->cache_ops->drop) |
511 | return -EINVAL; |
512 | |
513 | map->lock(map->lock_arg); |
514 | |
515 | trace_regcache_drop_region(map, from: min, to: max); |
516 | |
517 | ret = map->cache_ops->drop(map, min, max); |
518 | |
519 | map->unlock(map->lock_arg); |
520 | |
521 | return ret; |
522 | } |
523 | EXPORT_SYMBOL_GPL(regcache_drop_region); |
524 | |
525 | /** |
526 | * regcache_cache_only - Put a register map into cache only mode |
527 | * |
528 | * @map: map to configure |
529 | * @enable: flag if changes should be written to the hardware |
530 | * |
531 | * When a register map is marked as cache only writes to the register |
532 | * map API will only update the register cache, they will not cause |
533 | * any hardware changes. This is useful for allowing portions of |
534 | * drivers to act as though the device were functioning as normal when |
535 | * it is disabled for power saving reasons. |
536 | */ |
537 | void regcache_cache_only(struct regmap *map, bool enable) |
538 | { |
539 | map->lock(map->lock_arg); |
540 | WARN_ON(map->cache_type != REGCACHE_NONE && |
541 | map->cache_bypass && enable); |
542 | map->cache_only = enable; |
543 | trace_regmap_cache_only(map, flag: enable); |
544 | map->unlock(map->lock_arg); |
545 | } |
546 | EXPORT_SYMBOL_GPL(regcache_cache_only); |
547 | |
548 | /** |
549 | * regcache_mark_dirty - Indicate that HW registers were reset to default values |
550 | * |
551 | * @map: map to mark |
552 | * |
553 | * Inform regcache that the device has been powered down or reset, so that |
554 | * on resume, regcache_sync() knows to write out all non-default values |
555 | * stored in the cache. |
556 | * |
557 | * If this function is not called, regcache_sync() will assume that |
558 | * the hardware state still matches the cache state, modulo any writes that |
559 | * happened when cache_only was true. |
560 | */ |
561 | void regcache_mark_dirty(struct regmap *map) |
562 | { |
563 | map->lock(map->lock_arg); |
564 | map->cache_dirty = true; |
565 | map->no_sync_defaults = true; |
566 | map->unlock(map->lock_arg); |
567 | } |
568 | EXPORT_SYMBOL_GPL(regcache_mark_dirty); |
569 | |
570 | /** |
571 | * regcache_cache_bypass - Put a register map into cache bypass mode |
572 | * |
573 | * @map: map to configure |
574 | * @enable: flag if changes should not be written to the cache |
575 | * |
576 | * When a register map is marked with the cache bypass option, writes |
577 | * to the register map API will only update the hardware and not |
578 | * the cache directly. This is useful when syncing the cache back to |
579 | * the hardware. |
580 | */ |
581 | void regcache_cache_bypass(struct regmap *map, bool enable) |
582 | { |
583 | map->lock(map->lock_arg); |
584 | WARN_ON(map->cache_only && enable); |
585 | map->cache_bypass = enable; |
586 | trace_regmap_cache_bypass(map, flag: enable); |
587 | map->unlock(map->lock_arg); |
588 | } |
589 | EXPORT_SYMBOL_GPL(regcache_cache_bypass); |
590 | |
591 | /** |
592 | * regcache_reg_cached - Check if a register is cached |
593 | * |
594 | * @map: map to check |
595 | * @reg: register to check |
596 | * |
597 | * Reports if a register is cached. |
598 | */ |
599 | bool regcache_reg_cached(struct regmap *map, unsigned int reg) |
600 | { |
601 | unsigned int val; |
602 | int ret; |
603 | |
604 | map->lock(map->lock_arg); |
605 | |
606 | ret = regcache_read(map, reg, value: &val); |
607 | |
608 | map->unlock(map->lock_arg); |
609 | |
610 | return ret == 0; |
611 | } |
612 | EXPORT_SYMBOL_GPL(regcache_reg_cached); |
613 | |
614 | void regcache_set_val(struct regmap *map, void *base, unsigned int idx, |
615 | unsigned int val) |
616 | { |
617 | /* Use device native format if possible */ |
618 | if (map->format.format_val) { |
619 | map->format.format_val(base + (map->cache_word_size * idx), |
620 | val, 0); |
621 | return; |
622 | } |
623 | |
624 | switch (map->cache_word_size) { |
625 | case 1: { |
626 | u8 *cache = base; |
627 | |
628 | cache[idx] = val; |
629 | break; |
630 | } |
631 | case 2: { |
632 | u16 *cache = base; |
633 | |
634 | cache[idx] = val; |
635 | break; |
636 | } |
637 | case 4: { |
638 | u32 *cache = base; |
639 | |
640 | cache[idx] = val; |
641 | break; |
642 | } |
643 | default: |
644 | BUG(); |
645 | } |
646 | } |
647 | |
648 | unsigned int regcache_get_val(struct regmap *map, const void *base, |
649 | unsigned int idx) |
650 | { |
651 | if (!base) |
652 | return -EINVAL; |
653 | |
654 | /* Use device native format if possible */ |
655 | if (map->format.parse_val) |
656 | return map->format.parse_val(regcache_get_val_addr(map, base, |
657 | idx)); |
658 | |
659 | switch (map->cache_word_size) { |
660 | case 1: { |
661 | const u8 *cache = base; |
662 | |
663 | return cache[idx]; |
664 | } |
665 | case 2: { |
666 | const u16 *cache = base; |
667 | |
668 | return cache[idx]; |
669 | } |
670 | case 4: { |
671 | const u32 *cache = base; |
672 | |
673 | return cache[idx]; |
674 | } |
675 | default: |
676 | BUG(); |
677 | } |
678 | /* unreachable */ |
679 | return -1; |
680 | } |
681 | |
682 | static int regcache_default_cmp(const void *a, const void *b) |
683 | { |
684 | const struct reg_default *_a = a; |
685 | const struct reg_default *_b = b; |
686 | |
687 | return _a->reg - _b->reg; |
688 | } |
689 | |
690 | int regcache_lookup_reg(struct regmap *map, unsigned int reg) |
691 | { |
692 | struct reg_default key; |
693 | struct reg_default *r; |
694 | |
695 | key.reg = reg; |
696 | key.def = 0; |
697 | |
698 | r = bsearch(key: &key, base: map->reg_defaults, num: map->num_reg_defaults, |
699 | size: sizeof(struct reg_default), cmp: regcache_default_cmp); |
700 | |
701 | if (r) |
702 | return r - map->reg_defaults; |
703 | else |
704 | return -ENOENT; |
705 | } |
706 | |
707 | static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx) |
708 | { |
709 | if (!cache_present) |
710 | return true; |
711 | |
712 | return test_bit(idx, cache_present); |
713 | } |
714 | |
715 | int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val) |
716 | { |
717 | int ret; |
718 | |
719 | if (!regcache_reg_needs_sync(map, reg, val)) |
720 | return 0; |
721 | |
722 | map->cache_bypass = true; |
723 | |
724 | ret = _regmap_write(map, reg, val); |
725 | |
726 | map->cache_bypass = false; |
727 | |
728 | if (ret != 0) { |
729 | dev_err(map->dev, "Unable to sync register %#x. %d\n" , |
730 | reg, ret); |
731 | return ret; |
732 | } |
733 | dev_dbg(map->dev, "Synced register %#x, value %#x\n" , |
734 | reg, val); |
735 | |
736 | return 0; |
737 | } |
738 | |
739 | static int regcache_sync_block_single(struct regmap *map, void *block, |
740 | unsigned long *cache_present, |
741 | unsigned int block_base, |
742 | unsigned int start, unsigned int end) |
743 | { |
744 | unsigned int i, regtmp, val; |
745 | int ret; |
746 | |
747 | for (i = start; i < end; i++) { |
748 | regtmp = block_base + (i * map->reg_stride); |
749 | |
750 | if (!regcache_reg_present(cache_present, idx: i) || |
751 | !regmap_writeable(map, reg: regtmp)) |
752 | continue; |
753 | |
754 | val = regcache_get_val(map, base: block, idx: i); |
755 | ret = regcache_sync_val(map, reg: regtmp, val); |
756 | if (ret != 0) |
757 | return ret; |
758 | } |
759 | |
760 | return 0; |
761 | } |
762 | |
763 | static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, |
764 | unsigned int base, unsigned int cur) |
765 | { |
766 | size_t val_bytes = map->format.val_bytes; |
767 | int ret, count; |
768 | |
769 | if (*data == NULL) |
770 | return 0; |
771 | |
772 | count = (cur - base) / map->reg_stride; |
773 | |
774 | dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n" , |
775 | count * val_bytes, count, base, cur - map->reg_stride); |
776 | |
777 | map->cache_bypass = true; |
778 | |
779 | ret = _regmap_raw_write(map, reg: base, val: *data, val_len: count * val_bytes, noinc: false); |
780 | if (ret) |
781 | dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n" , |
782 | base, cur - map->reg_stride, ret); |
783 | |
784 | map->cache_bypass = false; |
785 | |
786 | *data = NULL; |
787 | |
788 | return ret; |
789 | } |
790 | |
791 | static int regcache_sync_block_raw(struct regmap *map, void *block, |
792 | unsigned long *cache_present, |
793 | unsigned int block_base, unsigned int start, |
794 | unsigned int end) |
795 | { |
796 | unsigned int i, val; |
797 | unsigned int regtmp = 0; |
798 | unsigned int base = 0; |
799 | const void *data = NULL; |
800 | int ret; |
801 | |
802 | for (i = start; i < end; i++) { |
803 | regtmp = block_base + (i * map->reg_stride); |
804 | |
805 | if (!regcache_reg_present(cache_present, idx: i) || |
806 | !regmap_writeable(map, reg: regtmp)) { |
807 | ret = regcache_sync_block_raw_flush(map, data: &data, |
808 | base, cur: regtmp); |
809 | if (ret != 0) |
810 | return ret; |
811 | continue; |
812 | } |
813 | |
814 | val = regcache_get_val(map, base: block, idx: i); |
815 | if (!regcache_reg_needs_sync(map, reg: regtmp, val)) { |
816 | ret = regcache_sync_block_raw_flush(map, data: &data, |
817 | base, cur: regtmp); |
818 | if (ret != 0) |
819 | return ret; |
820 | continue; |
821 | } |
822 | |
823 | if (!data) { |
824 | data = regcache_get_val_addr(map, base: block, idx: i); |
825 | base = regtmp; |
826 | } |
827 | } |
828 | |
829 | return regcache_sync_block_raw_flush(map, data: &data, base, cur: regtmp + |
830 | map->reg_stride); |
831 | } |
832 | |
833 | int regcache_sync_block(struct regmap *map, void *block, |
834 | unsigned long *cache_present, |
835 | unsigned int block_base, unsigned int start, |
836 | unsigned int end) |
837 | { |
838 | if (regmap_can_raw_write(map) && !map->use_single_write) |
839 | return regcache_sync_block_raw(map, block, cache_present, |
840 | block_base, start, end); |
841 | else |
842 | return regcache_sync_block_single(map, block, cache_present, |
843 | block_base, start, end); |
844 | } |
845 | |