1// SPDX-License-Identifier: GPL-2.0
2//
3// Register cache access API - maple tree based cache
4//
5// Copyright 2023 Arm, Ltd
6//
7// Author: Mark Brown <broonie@kernel.org>
8
9#include <linux/debugfs.h>
10#include <linux/device.h>
11#include <linux/maple_tree.h>
12#include <linux/slab.h>
13
14#include "internal.h"
15
16static int regcache_maple_read(struct regmap *map,
17 unsigned int reg, unsigned int *value)
18{
19 struct maple_tree *mt = map->cache;
20 MA_STATE(mas, mt, reg, reg);
21 unsigned long *entry;
22
23 rcu_read_lock();
24
25 entry = mas_walk(mas: &mas);
26 if (!entry) {
27 rcu_read_unlock();
28 return -ENOENT;
29 }
30
31 *value = entry[reg - mas.index];
32
33 rcu_read_unlock();
34
35 return 0;
36}
37
38static int regcache_maple_write(struct regmap *map, unsigned int reg,
39 unsigned int val)
40{
41 struct maple_tree *mt = map->cache;
42 MA_STATE(mas, mt, reg, reg);
43 unsigned long *entry, *upper, *lower;
44 unsigned long index, last;
45 size_t lower_sz, upper_sz;
46 int ret;
47
48 rcu_read_lock();
49
50 entry = mas_walk(mas: &mas);
51 if (entry) {
52 entry[reg - mas.index] = val;
53 rcu_read_unlock();
54 return 0;
55 }
56
57 /* Any adjacent entries to extend/merge? */
58 mas_set_range(mas: &mas, start: reg - 1, last: reg + 1);
59 index = reg;
60 last = reg;
61
62 lower = mas_find(mas: &mas, max: reg - 1);
63 if (lower) {
64 index = mas.index;
65 lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
66 }
67
68 upper = mas_find(mas: &mas, max: reg + 1);
69 if (upper) {
70 last = mas.last;
71 upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
72 }
73
74 rcu_read_unlock();
75
76 entry = kmalloc(size: (last - index + 1) * sizeof(unsigned long),
77 flags: map->alloc_flags);
78 if (!entry)
79 return -ENOMEM;
80
81 if (lower)
82 memcpy(entry, lower, lower_sz);
83 entry[reg - index] = val;
84 if (upper)
85 memcpy(&entry[reg - index + 1], upper, upper_sz);
86
87 /*
88 * This is safe because the regmap lock means the Maple lock
89 * is redundant, but we need to take it due to lockdep asserts
90 * in the maple tree code.
91 */
92 mas_lock(&mas);
93
94 mas_set_range(mas: &mas, start: index, last);
95 ret = mas_store_gfp(mas: &mas, entry, gfp: map->alloc_flags);
96
97 mas_unlock(&mas);
98
99 if (ret == 0) {
100 kfree(objp: lower);
101 kfree(objp: upper);
102 }
103
104 return ret;
105}
106
107static int regcache_maple_drop(struct regmap *map, unsigned int min,
108 unsigned int max)
109{
110 struct maple_tree *mt = map->cache;
111 MA_STATE(mas, mt, min, max);
112 unsigned long *entry, *lower, *upper;
113 unsigned long lower_index, lower_last;
114 unsigned long upper_index, upper_last;
115 int ret = 0;
116
117 lower = NULL;
118 upper = NULL;
119
120 mas_lock(&mas);
121
122 mas_for_each(&mas, entry, max) {
123 /*
124 * This is safe because the regmap lock means the
125 * Maple lock is redundant, but we need to take it due
126 * to lockdep asserts in the maple tree code.
127 */
128 mas_unlock(&mas);
129
130 /* Do we need to save any of this entry? */
131 if (mas.index < min) {
132 lower_index = mas.index;
133 lower_last = min -1;
134
135 lower = kmemdup(p: entry, size: ((min - mas.index) *
136 sizeof(unsigned long)),
137 gfp: map->alloc_flags);
138 if (!lower) {
139 ret = -ENOMEM;
140 goto out_unlocked;
141 }
142 }
143
144 if (mas.last > max) {
145 upper_index = max + 1;
146 upper_last = mas.last;
147
148 upper = kmemdup(p: &entry[max - mas.index + 1],
149 size: ((mas.last - max) *
150 sizeof(unsigned long)),
151 gfp: map->alloc_flags);
152 if (!upper) {
153 ret = -ENOMEM;
154 goto out_unlocked;
155 }
156 }
157
158 kfree(objp: entry);
159 mas_lock(&mas);
160 mas_erase(mas: &mas);
161
162 /* Insert new nodes with the saved data */
163 if (lower) {
164 mas_set_range(mas: &mas, start: lower_index, last: lower_last);
165 ret = mas_store_gfp(mas: &mas, entry: lower, gfp: map->alloc_flags);
166 if (ret != 0)
167 goto out;
168 lower = NULL;
169 }
170
171 if (upper) {
172 mas_set_range(mas: &mas, start: upper_index, last: upper_last);
173 ret = mas_store_gfp(mas: &mas, entry: upper, gfp: map->alloc_flags);
174 if (ret != 0)
175 goto out;
176 upper = NULL;
177 }
178 }
179
180out:
181 mas_unlock(&mas);
182out_unlocked:
183 kfree(objp: lower);
184 kfree(objp: upper);
185
186 return ret;
187}
188
189static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
190 struct ma_state *mas,
191 unsigned int min, unsigned int max)
192{
193 void *buf;
194 unsigned long r;
195 size_t val_bytes = map->format.val_bytes;
196 int ret = 0;
197
198 mas_pause(mas);
199 rcu_read_unlock();
200
201 /*
202 * Use a raw write if writing more than one register to a
203 * device that supports raw writes to reduce transaction
204 * overheads.
205 */
206 if (max - min > 1 && regmap_can_raw_write(map)) {
207 buf = kmalloc(size: val_bytes * (max - min), flags: map->alloc_flags);
208 if (!buf) {
209 ret = -ENOMEM;
210 goto out;
211 }
212
213 /* Render the data for a raw write */
214 for (r = min; r < max; r++) {
215 regcache_set_val(map, base: buf, idx: r - min,
216 val: entry[r - mas->index]);
217 }
218
219 ret = _regmap_raw_write(map, reg: min, val: buf, val_len: (max - min) * val_bytes,
220 noinc: false);
221
222 kfree(objp: buf);
223 } else {
224 for (r = min; r < max; r++) {
225 ret = _regmap_write(map, reg: r,
226 val: entry[r - mas->index]);
227 if (ret != 0)
228 goto out;
229 }
230 }
231
232out:
233 rcu_read_lock();
234
235 return ret;
236}
237
238static int regcache_maple_sync(struct regmap *map, unsigned int min,
239 unsigned int max)
240{
241 struct maple_tree *mt = map->cache;
242 unsigned long *entry;
243 MA_STATE(mas, mt, min, max);
244 unsigned long lmin = min;
245 unsigned long lmax = max;
246 unsigned int r, v, sync_start;
247 int ret = 0;
248 bool sync_needed = false;
249
250 map->cache_bypass = true;
251
252 rcu_read_lock();
253
254 mas_for_each(&mas, entry, max) {
255 for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
256 v = entry[r - mas.index];
257
258 if (regcache_reg_needs_sync(map, reg: r, val: v)) {
259 if (!sync_needed) {
260 sync_start = r;
261 sync_needed = true;
262 }
263 continue;
264 }
265
266 if (!sync_needed)
267 continue;
268
269 ret = regcache_maple_sync_block(map, entry, mas: &mas,
270 min: sync_start, max: r);
271 if (ret != 0)
272 goto out;
273 sync_needed = false;
274 }
275
276 if (sync_needed) {
277 ret = regcache_maple_sync_block(map, entry, mas: &mas,
278 min: sync_start, max: r);
279 if (ret != 0)
280 goto out;
281 sync_needed = false;
282 }
283 }
284
285out:
286 rcu_read_unlock();
287
288 map->cache_bypass = false;
289
290 return ret;
291}
292
293static int regcache_maple_exit(struct regmap *map)
294{
295 struct maple_tree *mt = map->cache;
296 MA_STATE(mas, mt, 0, UINT_MAX);
297 unsigned int *entry;;
298
299 /* if we've already been called then just return */
300 if (!mt)
301 return 0;
302
303 mas_lock(&mas);
304 mas_for_each(&mas, entry, UINT_MAX)
305 kfree(objp: entry);
306 __mt_destroy(mt);
307 mas_unlock(&mas);
308
309 kfree(objp: mt);
310 map->cache = NULL;
311
312 return 0;
313}
314
315static int regcache_maple_insert_block(struct regmap *map, int first,
316 int last)
317{
318 struct maple_tree *mt = map->cache;
319 MA_STATE(mas, mt, first, last);
320 unsigned long *entry;
321 int i, ret;
322
323 entry = kcalloc(n: last - first + 1, size: sizeof(unsigned long), flags: map->alloc_flags);
324 if (!entry)
325 return -ENOMEM;
326
327 for (i = 0; i < last - first + 1; i++)
328 entry[i] = map->reg_defaults[first + i].def;
329
330 mas_lock(&mas);
331
332 mas_set_range(mas: &mas, start: map->reg_defaults[first].reg,
333 last: map->reg_defaults[last].reg);
334 ret = mas_store_gfp(mas: &mas, entry, gfp: map->alloc_flags);
335
336 mas_unlock(&mas);
337
338 if (ret)
339 kfree(objp: entry);
340
341 return ret;
342}
343
344static int regcache_maple_init(struct regmap *map)
345{
346 struct maple_tree *mt;
347 int i;
348 int ret;
349 int range_start;
350
351 mt = kmalloc(size: sizeof(*mt), GFP_KERNEL);
352 if (!mt)
353 return -ENOMEM;
354 map->cache = mt;
355
356 mt_init(mt);
357
358 if (!map->num_reg_defaults)
359 return 0;
360
361 range_start = 0;
362
363 /* Scan for ranges of contiguous registers */
364 for (i = 1; i < map->num_reg_defaults; i++) {
365 if (map->reg_defaults[i].reg !=
366 map->reg_defaults[i - 1].reg + 1) {
367 ret = regcache_maple_insert_block(map, first: range_start,
368 last: i - 1);
369 if (ret != 0)
370 goto err;
371
372 range_start = i;
373 }
374 }
375
376 /* Add the last block */
377 ret = regcache_maple_insert_block(map, first: range_start,
378 last: map->num_reg_defaults - 1);
379 if (ret != 0)
380 goto err;
381
382 return 0;
383
384err:
385 regcache_maple_exit(map);
386 return ret;
387}
388
389struct regcache_ops regcache_maple_ops = {
390 .type = REGCACHE_MAPLE,
391 .name = "maple",
392 .init = regcache_maple_init,
393 .exit = regcache_maple_exit,
394 .read = regcache_maple_read,
395 .write = regcache_maple_write,
396 .drop = regcache_maple_drop,
397 .sync = regcache_maple_sync,
398};
399

source code of linux/drivers/base/regmap/regcache-maple.c