1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2012 Google, Inc. |
4 | */ |
5 | |
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
7 | |
8 | #include <linux/device.h> |
9 | #include <linux/err.h> |
10 | #include <linux/errno.h> |
11 | #include <linux/init.h> |
12 | #include <linux/io.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/list.h> |
15 | #include <linux/memblock.h> |
16 | #include <linux/rslib.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/uaccess.h> |
19 | #include <linux/vmalloc.h> |
20 | #include <linux/mm.h> |
21 | #include <asm/page.h> |
22 | |
23 | #include "ram_internal.h" |
24 | |
25 | /** |
26 | * struct persistent_ram_buffer - persistent circular RAM buffer |
27 | * |
28 | * @sig: Signature to indicate header (PERSISTENT_RAM_SIG xor PRZ-type value) |
29 | * @start: First valid byte in the buffer. |
30 | * @size: Number of valid bytes in the buffer. |
31 | * @data: The contents of the buffer. |
32 | */ |
33 | struct persistent_ram_buffer { |
34 | uint32_t sig; |
35 | atomic_t start; |
36 | atomic_t size; |
37 | uint8_t data[]; |
38 | }; |
39 | |
40 | #define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */ |
41 | |
42 | static inline size_t buffer_size(struct persistent_ram_zone *prz) |
43 | { |
44 | return atomic_read(v: &prz->buffer->size); |
45 | } |
46 | |
47 | static inline size_t buffer_start(struct persistent_ram_zone *prz) |
48 | { |
49 | return atomic_read(v: &prz->buffer->start); |
50 | } |
51 | |
52 | /* increase and wrap the start pointer, returning the old value */ |
53 | static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a) |
54 | { |
55 | int old; |
56 | int new; |
57 | unsigned long flags = 0; |
58 | |
59 | if (!(prz->flags & PRZ_FLAG_NO_LOCK)) |
60 | raw_spin_lock_irqsave(&prz->buffer_lock, flags); |
61 | |
62 | old = atomic_read(v: &prz->buffer->start); |
63 | new = old + a; |
64 | while (unlikely(new >= prz->buffer_size)) |
65 | new -= prz->buffer_size; |
66 | atomic_set(v: &prz->buffer->start, i: new); |
67 | |
68 | if (!(prz->flags & PRZ_FLAG_NO_LOCK)) |
69 | raw_spin_unlock_irqrestore(&prz->buffer_lock, flags); |
70 | |
71 | return old; |
72 | } |
73 | |
74 | /* increase the size counter until it hits the max size */ |
75 | static void buffer_size_add(struct persistent_ram_zone *prz, size_t a) |
76 | { |
77 | size_t old; |
78 | size_t new; |
79 | unsigned long flags = 0; |
80 | |
81 | if (!(prz->flags & PRZ_FLAG_NO_LOCK)) |
82 | raw_spin_lock_irqsave(&prz->buffer_lock, flags); |
83 | |
84 | old = atomic_read(v: &prz->buffer->size); |
85 | if (old == prz->buffer_size) |
86 | goto exit; |
87 | |
88 | new = old + a; |
89 | if (new > prz->buffer_size) |
90 | new = prz->buffer_size; |
91 | atomic_set(v: &prz->buffer->size, i: new); |
92 | |
93 | exit: |
94 | if (!(prz->flags & PRZ_FLAG_NO_LOCK)) |
95 | raw_spin_unlock_irqrestore(&prz->buffer_lock, flags); |
96 | } |
97 | |
98 | static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz, |
99 | uint8_t *data, size_t len, uint8_t *ecc) |
100 | { |
101 | int i; |
102 | |
103 | /* Initialize the parity buffer */ |
104 | memset(prz->ecc_info.par, 0, |
105 | prz->ecc_info.ecc_size * sizeof(prz->ecc_info.par[0])); |
106 | encode_rs8(rs: prz->rs_decoder, data, len, par: prz->ecc_info.par, invmsk: 0); |
107 | for (i = 0; i < prz->ecc_info.ecc_size; i++) |
108 | ecc[i] = prz->ecc_info.par[i]; |
109 | } |
110 | |
111 | static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz, |
112 | void *data, size_t len, uint8_t *ecc) |
113 | { |
114 | int i; |
115 | |
116 | for (i = 0; i < prz->ecc_info.ecc_size; i++) |
117 | prz->ecc_info.par[i] = ecc[i]; |
118 | return decode_rs8(rs: prz->rs_decoder, data, par: prz->ecc_info.par, len, |
119 | NULL, no_eras: 0, NULL, invmsk: 0, NULL); |
120 | } |
121 | |
122 | static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz, |
123 | unsigned int start, unsigned int count) |
124 | { |
125 | struct persistent_ram_buffer *buffer = prz->buffer; |
126 | uint8_t *buffer_end = buffer->data + prz->buffer_size; |
127 | uint8_t *block; |
128 | uint8_t *par; |
129 | int ecc_block_size = prz->ecc_info.block_size; |
130 | int ecc_size = prz->ecc_info.ecc_size; |
131 | int size = ecc_block_size; |
132 | |
133 | if (!ecc_size) |
134 | return; |
135 | |
136 | block = buffer->data + (start & ~(ecc_block_size - 1)); |
137 | par = prz->par_buffer + (start / ecc_block_size) * ecc_size; |
138 | |
139 | do { |
140 | if (block + ecc_block_size > buffer_end) |
141 | size = buffer_end - block; |
142 | persistent_ram_encode_rs8(prz, data: block, len: size, ecc: par); |
143 | block += ecc_block_size; |
144 | par += ecc_size; |
145 | } while (block < buffer->data + start + count); |
146 | } |
147 | |
148 | static void (struct persistent_ram_zone *prz) |
149 | { |
150 | struct persistent_ram_buffer *buffer = prz->buffer; |
151 | |
152 | if (!prz->ecc_info.ecc_size) |
153 | return; |
154 | |
155 | persistent_ram_encode_rs8(prz, data: (uint8_t *)buffer, len: sizeof(*buffer), |
156 | ecc: prz->par_header); |
157 | } |
158 | |
159 | static void persistent_ram_ecc_old(struct persistent_ram_zone *prz) |
160 | { |
161 | struct persistent_ram_buffer *buffer = prz->buffer; |
162 | uint8_t *block; |
163 | uint8_t *par; |
164 | |
165 | if (!prz->ecc_info.ecc_size) |
166 | return; |
167 | |
168 | block = buffer->data; |
169 | par = prz->par_buffer; |
170 | while (block < buffer->data + buffer_size(prz)) { |
171 | int numerr; |
172 | int size = prz->ecc_info.block_size; |
173 | if (block + size > buffer->data + prz->buffer_size) |
174 | size = buffer->data + prz->buffer_size - block; |
175 | numerr = persistent_ram_decode_rs8(prz, data: block, len: size, ecc: par); |
176 | if (numerr > 0) { |
177 | pr_devel("error in block %p, %d\n" , block, numerr); |
178 | prz->corrected_bytes += numerr; |
179 | } else if (numerr < 0) { |
180 | pr_devel("uncorrectable error in block %p\n" , block); |
181 | prz->bad_blocks++; |
182 | } |
183 | block += prz->ecc_info.block_size; |
184 | par += prz->ecc_info.ecc_size; |
185 | } |
186 | } |
187 | |
188 | static int persistent_ram_init_ecc(struct persistent_ram_zone *prz, |
189 | struct persistent_ram_ecc_info *ecc_info) |
190 | { |
191 | int numerr; |
192 | struct persistent_ram_buffer *buffer = prz->buffer; |
193 | int ecc_blocks; |
194 | size_t ecc_total; |
195 | |
196 | if (!ecc_info || !ecc_info->ecc_size) |
197 | return 0; |
198 | |
199 | prz->ecc_info.block_size = ecc_info->block_size ?: 128; |
200 | prz->ecc_info.ecc_size = ecc_info->ecc_size ?: 16; |
201 | prz->ecc_info.symsize = ecc_info->symsize ?: 8; |
202 | prz->ecc_info.poly = ecc_info->poly ?: 0x11d; |
203 | |
204 | ecc_blocks = DIV_ROUND_UP(prz->buffer_size - prz->ecc_info.ecc_size, |
205 | prz->ecc_info.block_size + |
206 | prz->ecc_info.ecc_size); |
207 | ecc_total = (ecc_blocks + 1) * prz->ecc_info.ecc_size; |
208 | if (ecc_total >= prz->buffer_size) { |
209 | pr_err("%s: invalid ecc_size %u (total %zu, buffer size %zu)\n" , |
210 | __func__, prz->ecc_info.ecc_size, |
211 | ecc_total, prz->buffer_size); |
212 | return -EINVAL; |
213 | } |
214 | |
215 | prz->buffer_size -= ecc_total; |
216 | prz->par_buffer = buffer->data + prz->buffer_size; |
217 | prz->par_header = prz->par_buffer + |
218 | ecc_blocks * prz->ecc_info.ecc_size; |
219 | |
220 | /* |
221 | * first consecutive root is 0 |
222 | * primitive element to generate roots = 1 |
223 | */ |
224 | prz->rs_decoder = init_rs(symsize: prz->ecc_info.symsize, gfpoly: prz->ecc_info.poly, |
225 | fcr: 0, prim: 1, nroots: prz->ecc_info.ecc_size); |
226 | if (prz->rs_decoder == NULL) { |
227 | pr_info("init_rs failed\n" ); |
228 | return -EINVAL; |
229 | } |
230 | |
231 | /* allocate workspace instead of using stack VLA */ |
232 | prz->ecc_info.par = kmalloc_array(n: prz->ecc_info.ecc_size, |
233 | size: sizeof(*prz->ecc_info.par), |
234 | GFP_KERNEL); |
235 | if (!prz->ecc_info.par) { |
236 | pr_err("cannot allocate ECC parity workspace\n" ); |
237 | return -ENOMEM; |
238 | } |
239 | |
240 | prz->corrected_bytes = 0; |
241 | prz->bad_blocks = 0; |
242 | |
243 | numerr = persistent_ram_decode_rs8(prz, data: buffer, len: sizeof(*buffer), |
244 | ecc: prz->par_header); |
245 | if (numerr > 0) { |
246 | pr_info("error in header, %d\n" , numerr); |
247 | prz->corrected_bytes += numerr; |
248 | } else if (numerr < 0) { |
249 | pr_info_ratelimited("uncorrectable error in header\n" ); |
250 | prz->bad_blocks++; |
251 | } |
252 | |
253 | return 0; |
254 | } |
255 | |
256 | ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, |
257 | char *str, size_t len) |
258 | { |
259 | ssize_t ret; |
260 | |
261 | if (!prz->ecc_info.ecc_size) |
262 | return 0; |
263 | |
264 | if (prz->corrected_bytes || prz->bad_blocks) |
265 | ret = snprintf(buf: str, size: len, fmt: "" |
266 | "\nECC: %d Corrected bytes, %d unrecoverable blocks\n" , |
267 | prz->corrected_bytes, prz->bad_blocks); |
268 | else |
269 | ret = snprintf(buf: str, size: len, fmt: "\nECC: No errors detected\n" ); |
270 | |
271 | return ret; |
272 | } |
273 | |
274 | static void notrace persistent_ram_update(struct persistent_ram_zone *prz, |
275 | const void *s, unsigned int start, unsigned int count) |
276 | { |
277 | struct persistent_ram_buffer *buffer = prz->buffer; |
278 | memcpy_toio(buffer->data + start, s, count); |
279 | persistent_ram_update_ecc(prz, start, count); |
280 | } |
281 | |
282 | static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz, |
283 | const void __user *s, unsigned int start, unsigned int count) |
284 | { |
285 | struct persistent_ram_buffer *buffer = prz->buffer; |
286 | int ret = unlikely(copy_from_user(buffer->data + start, s, count)) ? |
287 | -EFAULT : 0; |
288 | persistent_ram_update_ecc(prz, start, count); |
289 | return ret; |
290 | } |
291 | |
292 | void persistent_ram_save_old(struct persistent_ram_zone *prz) |
293 | { |
294 | struct persistent_ram_buffer *buffer = prz->buffer; |
295 | size_t size = buffer_size(prz); |
296 | size_t start = buffer_start(prz); |
297 | |
298 | if (!size) |
299 | return; |
300 | |
301 | if (!prz->old_log) { |
302 | persistent_ram_ecc_old(prz); |
303 | prz->old_log = kvzalloc(size, GFP_KERNEL); |
304 | } |
305 | if (!prz->old_log) { |
306 | pr_err("failed to allocate buffer\n" ); |
307 | return; |
308 | } |
309 | |
310 | prz->old_log_size = size; |
311 | memcpy_fromio(prz->old_log, &buffer->data[start], size - start); |
312 | memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start); |
313 | } |
314 | |
315 | int notrace persistent_ram_write(struct persistent_ram_zone *prz, |
316 | const void *s, unsigned int count) |
317 | { |
318 | int rem; |
319 | int c = count; |
320 | size_t start; |
321 | |
322 | if (unlikely(c > prz->buffer_size)) { |
323 | s += c - prz->buffer_size; |
324 | c = prz->buffer_size; |
325 | } |
326 | |
327 | buffer_size_add(prz, a: c); |
328 | |
329 | start = buffer_start_add(prz, a: c); |
330 | |
331 | rem = prz->buffer_size - start; |
332 | if (unlikely(rem < c)) { |
333 | persistent_ram_update(prz, s, start, count: rem); |
334 | s += rem; |
335 | c -= rem; |
336 | start = 0; |
337 | } |
338 | persistent_ram_update(prz, s, start, count: c); |
339 | |
340 | persistent_ram_update_header_ecc(prz); |
341 | |
342 | return count; |
343 | } |
344 | |
345 | int notrace persistent_ram_write_user(struct persistent_ram_zone *prz, |
346 | const void __user *s, unsigned int count) |
347 | { |
348 | int rem, ret = 0, c = count; |
349 | size_t start; |
350 | |
351 | if (unlikely(c > prz->buffer_size)) { |
352 | s += c - prz->buffer_size; |
353 | c = prz->buffer_size; |
354 | } |
355 | |
356 | buffer_size_add(prz, a: c); |
357 | |
358 | start = buffer_start_add(prz, a: c); |
359 | |
360 | rem = prz->buffer_size - start; |
361 | if (unlikely(rem < c)) { |
362 | ret = persistent_ram_update_user(prz, s, start, count: rem); |
363 | s += rem; |
364 | c -= rem; |
365 | start = 0; |
366 | } |
367 | if (likely(!ret)) |
368 | ret = persistent_ram_update_user(prz, s, start, count: c); |
369 | |
370 | persistent_ram_update_header_ecc(prz); |
371 | |
372 | return unlikely(ret) ? ret : count; |
373 | } |
374 | |
375 | size_t persistent_ram_old_size(struct persistent_ram_zone *prz) |
376 | { |
377 | return prz->old_log_size; |
378 | } |
379 | |
380 | void *persistent_ram_old(struct persistent_ram_zone *prz) |
381 | { |
382 | return prz->old_log; |
383 | } |
384 | |
385 | void persistent_ram_free_old(struct persistent_ram_zone *prz) |
386 | { |
387 | kvfree(addr: prz->old_log); |
388 | prz->old_log = NULL; |
389 | prz->old_log_size = 0; |
390 | } |
391 | |
392 | void persistent_ram_zap(struct persistent_ram_zone *prz) |
393 | { |
394 | atomic_set(v: &prz->buffer->start, i: 0); |
395 | atomic_set(v: &prz->buffer->size, i: 0); |
396 | persistent_ram_update_header_ecc(prz); |
397 | } |
398 | |
399 | #define MEM_TYPE_WCOMBINE 0 |
400 | #define MEM_TYPE_NONCACHED 1 |
401 | #define MEM_TYPE_NORMAL 2 |
402 | |
403 | static void *persistent_ram_vmap(phys_addr_t start, size_t size, |
404 | unsigned int memtype) |
405 | { |
406 | struct page **pages; |
407 | phys_addr_t page_start; |
408 | unsigned int page_count; |
409 | pgprot_t prot; |
410 | unsigned int i; |
411 | void *vaddr; |
412 | |
413 | page_start = start - offset_in_page(start); |
414 | page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); |
415 | |
416 | switch (memtype) { |
417 | case MEM_TYPE_NORMAL: |
418 | prot = PAGE_KERNEL; |
419 | break; |
420 | case MEM_TYPE_NONCACHED: |
421 | prot = pgprot_noncached(PAGE_KERNEL); |
422 | break; |
423 | case MEM_TYPE_WCOMBINE: |
424 | prot = pgprot_writecombine(PAGE_KERNEL); |
425 | break; |
426 | default: |
427 | pr_err("invalid mem_type=%d\n" , memtype); |
428 | return NULL; |
429 | } |
430 | |
431 | pages = kmalloc_array(n: page_count, size: sizeof(struct page *), GFP_KERNEL); |
432 | if (!pages) { |
433 | pr_err("%s: Failed to allocate array for %u pages\n" , |
434 | __func__, page_count); |
435 | return NULL; |
436 | } |
437 | |
438 | for (i = 0; i < page_count; i++) { |
439 | phys_addr_t addr = page_start + i * PAGE_SIZE; |
440 | pages[i] = pfn_to_page(addr >> PAGE_SHIFT); |
441 | } |
442 | /* |
443 | * VM_IOREMAP used here to bypass this region during vread() |
444 | * and kmap_atomic() (i.e. kcore) to avoid __va() failures. |
445 | */ |
446 | vaddr = vmap(pages, count: page_count, VM_MAP | VM_IOREMAP, prot); |
447 | kfree(objp: pages); |
448 | |
449 | /* |
450 | * Since vmap() uses page granularity, we must add the offset |
451 | * into the page here, to get the byte granularity address |
452 | * into the mapping to represent the actual "start" location. |
453 | */ |
454 | return vaddr + offset_in_page(start); |
455 | } |
456 | |
457 | static void *persistent_ram_iomap(phys_addr_t start, size_t size, |
458 | unsigned int memtype, char *label) |
459 | { |
460 | void *va; |
461 | |
462 | if (!request_mem_region(start, size, label ?: "ramoops" )) { |
463 | pr_err("request mem region (%s 0x%llx@0x%llx) failed\n" , |
464 | label ?: "ramoops" , |
465 | (unsigned long long)size, (unsigned long long)start); |
466 | return NULL; |
467 | } |
468 | |
469 | if (memtype) |
470 | va = ioremap(offset: start, size); |
471 | else |
472 | va = ioremap_wc(offset: start, size); |
473 | |
474 | /* |
475 | * Since request_mem_region() and ioremap() are byte-granularity |
476 | * there is no need handle anything special like we do when the |
477 | * vmap() case in persistent_ram_vmap() above. |
478 | */ |
479 | return va; |
480 | } |
481 | |
482 | static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size, |
483 | struct persistent_ram_zone *prz, int memtype) |
484 | { |
485 | prz->paddr = start; |
486 | prz->size = size; |
487 | |
488 | if (pfn_valid(pfn: start >> PAGE_SHIFT)) |
489 | prz->vaddr = persistent_ram_vmap(start, size, memtype); |
490 | else |
491 | prz->vaddr = persistent_ram_iomap(start, size, memtype, |
492 | label: prz->label); |
493 | |
494 | if (!prz->vaddr) { |
495 | pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n" , __func__, |
496 | (unsigned long long)size, (unsigned long long)start); |
497 | return -ENOMEM; |
498 | } |
499 | |
500 | prz->buffer = prz->vaddr; |
501 | prz->buffer_size = size - sizeof(struct persistent_ram_buffer); |
502 | |
503 | return 0; |
504 | } |
505 | |
506 | static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig, |
507 | struct persistent_ram_ecc_info *ecc_info) |
508 | { |
509 | int ret; |
510 | bool zap = !!(prz->flags & PRZ_FLAG_ZAP_OLD); |
511 | |
512 | ret = persistent_ram_init_ecc(prz, ecc_info); |
513 | if (ret) { |
514 | pr_warn("ECC failed %s\n" , prz->label); |
515 | return ret; |
516 | } |
517 | |
518 | sig ^= PERSISTENT_RAM_SIG; |
519 | |
520 | if (prz->buffer->sig == sig) { |
521 | if (buffer_size(prz) == 0 && buffer_start(prz) == 0) { |
522 | pr_debug("found existing empty buffer\n" ); |
523 | return 0; |
524 | } |
525 | |
526 | if (buffer_size(prz) > prz->buffer_size || |
527 | buffer_start(prz) > buffer_size(prz)) { |
528 | pr_info("found existing invalid buffer, size %zu, start %zu\n" , |
529 | buffer_size(prz), buffer_start(prz)); |
530 | zap = true; |
531 | } else { |
532 | pr_debug("found existing buffer, size %zu, start %zu\n" , |
533 | buffer_size(prz), buffer_start(prz)); |
534 | persistent_ram_save_old(prz); |
535 | } |
536 | } else { |
537 | pr_debug("no valid data in buffer (sig = 0x%08x)\n" , |
538 | prz->buffer->sig); |
539 | prz->buffer->sig = sig; |
540 | zap = true; |
541 | } |
542 | |
543 | /* Reset missing, invalid, or single-use memory area. */ |
544 | if (zap) |
545 | persistent_ram_zap(prz); |
546 | |
547 | return 0; |
548 | } |
549 | |
550 | void persistent_ram_free(struct persistent_ram_zone **_prz) |
551 | { |
552 | struct persistent_ram_zone *prz; |
553 | |
554 | if (!_prz) |
555 | return; |
556 | |
557 | prz = *_prz; |
558 | if (!prz) |
559 | return; |
560 | |
561 | if (prz->vaddr) { |
562 | if (pfn_valid(pfn: prz->paddr >> PAGE_SHIFT)) { |
563 | /* We must vunmap() at page-granularity. */ |
564 | vunmap(addr: prz->vaddr - offset_in_page(prz->paddr)); |
565 | } else { |
566 | iounmap(addr: prz->vaddr); |
567 | release_mem_region(prz->paddr, prz->size); |
568 | } |
569 | prz->vaddr = NULL; |
570 | } |
571 | if (prz->rs_decoder) { |
572 | free_rs(rs: prz->rs_decoder); |
573 | prz->rs_decoder = NULL; |
574 | } |
575 | kfree(objp: prz->ecc_info.par); |
576 | prz->ecc_info.par = NULL; |
577 | |
578 | persistent_ram_free_old(prz); |
579 | kfree(objp: prz->label); |
580 | kfree(objp: prz); |
581 | *_prz = NULL; |
582 | } |
583 | |
584 | struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, |
585 | u32 sig, struct persistent_ram_ecc_info *ecc_info, |
586 | unsigned int memtype, u32 flags, char *label) |
587 | { |
588 | struct persistent_ram_zone *prz; |
589 | int ret = -ENOMEM; |
590 | |
591 | prz = kzalloc(size: sizeof(struct persistent_ram_zone), GFP_KERNEL); |
592 | if (!prz) { |
593 | pr_err("failed to allocate persistent ram zone\n" ); |
594 | goto err; |
595 | } |
596 | |
597 | /* Initialize general buffer state. */ |
598 | raw_spin_lock_init(&prz->buffer_lock); |
599 | prz->flags = flags; |
600 | prz->label = kstrdup(s: label, GFP_KERNEL); |
601 | if (!prz->label) |
602 | goto err; |
603 | |
604 | ret = persistent_ram_buffer_map(start, size, prz, memtype); |
605 | if (ret) |
606 | goto err; |
607 | |
608 | ret = persistent_ram_post_init(prz, sig, ecc_info); |
609 | if (ret) |
610 | goto err; |
611 | |
612 | pr_debug("attached %s 0x%zx@0x%llx: %zu header, %zu data, %zu ecc (%d/%d)\n" , |
613 | prz->label, prz->size, (unsigned long long)prz->paddr, |
614 | sizeof(*prz->buffer), prz->buffer_size, |
615 | prz->size - sizeof(*prz->buffer) - prz->buffer_size, |
616 | prz->ecc_info.ecc_size, prz->ecc_info.block_size); |
617 | |
618 | return prz; |
619 | err: |
620 | persistent_ram_free(prz: &prz); |
621 | return ERR_PTR(error: ret); |
622 | } |
623 | |