1// SPDX-License-Identifier: GPL-2.0-only
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/platform_device.h>
5#include <linux/device.h>
6#include <linux/module.h>
7#include <linux/genalloc.h>
8#include <linux/vmalloc.h>
9#include <linux/dma-mapping.h>
10#include <linux/list_sort.h>
11#include <linux/libnvdimm.h>
12#include <linux/ndctl.h>
13#include <nd-core.h>
14#include <linux/printk.h>
15#include <linux/seq_buf.h>
16
17#include "../watermark.h"
18#include "nfit_test.h"
19#include "ndtest.h"
20
21enum {
22 DIMM_SIZE = SZ_32M,
23 LABEL_SIZE = SZ_128K,
24 NUM_INSTANCES = 2,
25 NUM_DCR = 4,
26 NDTEST_MAX_MAPPING = 6,
27};
28
29#define NDTEST_SCM_DIMM_CMD_MASK \
30 ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
31 (1ul << ND_CMD_GET_CONFIG_DATA) | \
32 (1ul << ND_CMD_SET_CONFIG_DATA) | \
33 (1ul << ND_CMD_CALL))
34
35#define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
36 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
37 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
38
39static DEFINE_SPINLOCK(ndtest_lock);
40static struct ndtest_priv *instances[NUM_INSTANCES];
41
42static const struct class ndtest_dimm_class = {
43 .name = "nfit_test_dimm",
44};
45
46static struct gen_pool *ndtest_pool;
47
48static struct ndtest_dimm dimm_group1[] = {
49 {
50 .size = DIMM_SIZE,
51 .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
52 .uuid_str = "1e5c75d2-b618-11ea-9aa3-507b9ddc0f72",
53 .physical_id = 0,
54 .num_formats = 2,
55 },
56 {
57 .size = DIMM_SIZE,
58 .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
59 .uuid_str = "1c4d43ac-b618-11ea-be80-507b9ddc0f72",
60 .physical_id = 1,
61 .num_formats = 2,
62 },
63 {
64 .size = DIMM_SIZE,
65 .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
66 .uuid_str = "a9f17ffc-b618-11ea-b36d-507b9ddc0f72",
67 .physical_id = 2,
68 .num_formats = 2,
69 },
70 {
71 .size = DIMM_SIZE,
72 .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
73 .uuid_str = "b6b83b22-b618-11ea-8aae-507b9ddc0f72",
74 .physical_id = 3,
75 .num_formats = 2,
76 },
77 {
78 .size = DIMM_SIZE,
79 .handle = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
80 .uuid_str = "bf9baaee-b618-11ea-b181-507b9ddc0f72",
81 .physical_id = 4,
82 .num_formats = 2,
83 },
84};
85
86static struct ndtest_dimm dimm_group2[] = {
87 {
88 .size = DIMM_SIZE,
89 .handle = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
90 .uuid_str = "ca0817e2-b618-11ea-9db3-507b9ddc0f72",
91 .physical_id = 0,
92 .num_formats = 1,
93 .flags = PAPR_PMEM_UNARMED | PAPR_PMEM_EMPTY |
94 PAPR_PMEM_SAVE_FAILED | PAPR_PMEM_SHUTDOWN_DIRTY |
95 PAPR_PMEM_HEALTH_FATAL,
96 },
97};
98
99static struct ndtest_mapping region0_mapping[] = {
100 {
101 .dimm = 0,
102 .position = 0,
103 .start = 0,
104 .size = SZ_16M,
105 },
106 {
107 .dimm = 1,
108 .position = 1,
109 .start = 0,
110 .size = SZ_16M,
111 }
112};
113
114static struct ndtest_mapping region1_mapping[] = {
115 {
116 .dimm = 0,
117 .position = 0,
118 .start = SZ_16M,
119 .size = SZ_16M,
120 },
121 {
122 .dimm = 1,
123 .position = 1,
124 .start = SZ_16M,
125 .size = SZ_16M,
126 },
127 {
128 .dimm = 2,
129 .position = 2,
130 .start = SZ_16M,
131 .size = SZ_16M,
132 },
133 {
134 .dimm = 3,
135 .position = 3,
136 .start = SZ_16M,
137 .size = SZ_16M,
138 },
139};
140
141static struct ndtest_region bus0_regions[] = {
142 {
143 .type = ND_DEVICE_NAMESPACE_PMEM,
144 .num_mappings = ARRAY_SIZE(region0_mapping),
145 .mapping = region0_mapping,
146 .size = DIMM_SIZE,
147 .range_index = 1,
148 },
149 {
150 .type = ND_DEVICE_NAMESPACE_PMEM,
151 .num_mappings = ARRAY_SIZE(region1_mapping),
152 .mapping = region1_mapping,
153 .size = DIMM_SIZE * 2,
154 .range_index = 2,
155 },
156};
157
158static struct ndtest_mapping region6_mapping[] = {
159 {
160 .dimm = 0,
161 .position = 0,
162 .start = 0,
163 .size = DIMM_SIZE,
164 },
165};
166
167static struct ndtest_region bus1_regions[] = {
168 {
169 .type = ND_DEVICE_NAMESPACE_IO,
170 .num_mappings = ARRAY_SIZE(region6_mapping),
171 .mapping = region6_mapping,
172 .size = DIMM_SIZE,
173 .range_index = 1,
174 },
175};
176
177static struct ndtest_config bus_configs[NUM_INSTANCES] = {
178 /* bus 1 */
179 {
180 .dimm_start = 0,
181 .dimm_count = ARRAY_SIZE(dimm_group1),
182 .dimms = dimm_group1,
183 .regions = bus0_regions,
184 .num_regions = ARRAY_SIZE(bus0_regions),
185 },
186 /* bus 2 */
187 {
188 .dimm_start = ARRAY_SIZE(dimm_group1),
189 .dimm_count = ARRAY_SIZE(dimm_group2),
190 .dimms = dimm_group2,
191 .regions = bus1_regions,
192 .num_regions = ARRAY_SIZE(bus1_regions),
193 },
194};
195
196static inline struct ndtest_priv *to_ndtest_priv(struct device *dev)
197{
198 struct platform_device *pdev = to_platform_device(dev);
199
200 return container_of(pdev, struct ndtest_priv, pdev);
201}
202
203static int ndtest_config_get(struct ndtest_dimm *p, unsigned int buf_len,
204 struct nd_cmd_get_config_data_hdr *hdr)
205{
206 unsigned int len;
207
208 if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
209 return -EINVAL;
210
211 hdr->status = 0;
212 len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
213 memcpy(hdr->out_buf, p->label_area + hdr->in_offset, len);
214
215 return buf_len - len;
216}
217
218static int ndtest_config_set(struct ndtest_dimm *p, unsigned int buf_len,
219 struct nd_cmd_set_config_hdr *hdr)
220{
221 unsigned int len;
222
223 if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
224 return -EINVAL;
225
226 len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
227 memcpy(p->label_area + hdr->in_offset, hdr->in_buf, len);
228
229 return buf_len - len;
230}
231
232static int ndtest_get_config_size(struct ndtest_dimm *dimm, unsigned int buf_len,
233 struct nd_cmd_get_config_size *size)
234{
235 size->status = 0;
236 size->max_xfer = 8;
237 size->config_size = dimm->config_size;
238
239 return 0;
240}
241
242static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc,
243 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
244 unsigned int buf_len, int *cmd_rc)
245{
246 struct ndtest_dimm *dimm;
247 int _cmd_rc;
248
249 if (!cmd_rc)
250 cmd_rc = &_cmd_rc;
251
252 *cmd_rc = 0;
253
254 if (!nvdimm)
255 return -EINVAL;
256
257 dimm = nvdimm_provider_data(nvdimm);
258 if (!dimm)
259 return -EINVAL;
260
261 switch (cmd) {
262 case ND_CMD_GET_CONFIG_SIZE:
263 *cmd_rc = ndtest_get_config_size(dimm, buf_len, size: buf);
264 break;
265 case ND_CMD_GET_CONFIG_DATA:
266 *cmd_rc = ndtest_config_get(p: dimm, buf_len, hdr: buf);
267 break;
268 case ND_CMD_SET_CONFIG_DATA:
269 *cmd_rc = ndtest_config_set(p: dimm, buf_len, hdr: buf);
270 break;
271 default:
272 return -EINVAL;
273 }
274
275 /* Failures for a DIMM can be injected using fail_cmd and
276 * fail_cmd_code, see the device attributes below
277 */
278 if ((1 << cmd) & dimm->fail_cmd)
279 return dimm->fail_cmd_code ? dimm->fail_cmd_code : -EIO;
280
281 return 0;
282}
283
284static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr)
285{
286 int i;
287
288 for (i = 0; i < NUM_INSTANCES; i++) {
289 struct nfit_test_resource *n, *nfit_res = NULL;
290 struct ndtest_priv *t = instances[i];
291
292 if (!t)
293 continue;
294 spin_lock(lock: &ndtest_lock);
295 list_for_each_entry(n, &t->resources, list) {
296 if (addr >= n->res.start && (addr < n->res.start
297 + resource_size(res: &n->res))) {
298 nfit_res = n;
299 break;
300 } else if (addr >= (unsigned long) n->buf
301 && (addr < (unsigned long) n->buf
302 + resource_size(res: &n->res))) {
303 nfit_res = n;
304 break;
305 }
306 }
307 spin_unlock(lock: &ndtest_lock);
308 if (nfit_res)
309 return nfit_res;
310 }
311
312 pr_warn("Failed to get resource\n");
313
314 return NULL;
315}
316
317static void ndtest_release_resource(void *data)
318{
319 struct nfit_test_resource *res = data;
320
321 spin_lock(lock: &ndtest_lock);
322 list_del(entry: &res->list);
323 spin_unlock(lock: &ndtest_lock);
324
325 if (resource_size(res: &res->res) >= DIMM_SIZE)
326 gen_pool_free(pool: ndtest_pool, addr: res->res.start,
327 size: resource_size(res: &res->res));
328 vfree(addr: res->buf);
329 kfree(objp: res);
330}
331
332static void *ndtest_alloc_resource(struct ndtest_priv *p, size_t size,
333 dma_addr_t *dma)
334{
335 dma_addr_t __dma;
336 void *buf;
337 struct nfit_test_resource *res;
338 struct genpool_data_align data = {
339 .align = SZ_128M,
340 };
341
342 res = kzalloc(size: sizeof(*res), GFP_KERNEL);
343 if (!res)
344 return NULL;
345
346 buf = vmalloc(size);
347 if (size >= DIMM_SIZE)
348 __dma = gen_pool_alloc_algo(pool: ndtest_pool, size,
349 algo: gen_pool_first_fit_align, data: &data);
350 else
351 __dma = (unsigned long) buf;
352
353 if (!__dma)
354 goto buf_err;
355
356 INIT_LIST_HEAD(list: &res->list);
357 res->dev = &p->pdev.dev;
358 res->buf = buf;
359 res->res.start = __dma;
360 res->res.end = __dma + size - 1;
361 res->res.name = "NFIT";
362 spin_lock_init(&res->lock);
363 INIT_LIST_HEAD(list: &res->requests);
364 spin_lock(lock: &ndtest_lock);
365 list_add(new: &res->list, head: &p->resources);
366 spin_unlock(lock: &ndtest_lock);
367
368 if (dma)
369 *dma = __dma;
370
371 if (!devm_add_action(&p->pdev.dev, ndtest_release_resource, res))
372 return res->buf;
373
374buf_err:
375 if (__dma && size >= DIMM_SIZE)
376 gen_pool_free(pool: ndtest_pool, addr: __dma, size);
377 if (buf)
378 vfree(addr: buf);
379 kfree(objp: res);
380
381 return NULL;
382}
383
384static ssize_t range_index_show(struct device *dev,
385 struct device_attribute *attr, char *buf)
386{
387 struct nd_region *nd_region = to_nd_region(dev);
388 struct ndtest_region *region = nd_region_provider_data(nd_region);
389
390 return sprintf(buf, fmt: "%d\n", region->range_index);
391}
392static DEVICE_ATTR_RO(range_index);
393
394static struct attribute *ndtest_region_attributes[] = {
395 &dev_attr_range_index.attr,
396 NULL,
397};
398
399static const struct attribute_group ndtest_region_attribute_group = {
400 .name = "papr",
401 .attrs = ndtest_region_attributes,
402};
403
404static const struct attribute_group *ndtest_region_attribute_groups[] = {
405 &ndtest_region_attribute_group,
406 NULL,
407};
408
409static int ndtest_create_region(struct ndtest_priv *p,
410 struct ndtest_region *region)
411{
412 struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING];
413 struct nd_region_desc *ndr_desc, _ndr_desc;
414 struct nd_interleave_set *nd_set;
415 struct resource res;
416 int i, ndimm = region->mapping[0].dimm;
417 u64 uuid[2];
418
419 memset(&res, 0, sizeof(res));
420 memset(&mappings, 0, sizeof(mappings));
421 memset(&_ndr_desc, 0, sizeof(_ndr_desc));
422 ndr_desc = &_ndr_desc;
423
424 if (!ndtest_alloc_resource(p, size: region->size, dma: &res.start))
425 return -ENOMEM;
426
427 res.end = res.start + region->size - 1;
428 ndr_desc->mapping = mappings;
429 ndr_desc->res = &res;
430 ndr_desc->provider_data = region;
431 ndr_desc->attr_groups = ndtest_region_attribute_groups;
432
433 if (uuid_parse(uuid: p->config->dimms[ndimm].uuid_str, u: (uuid_t *)uuid)) {
434 pr_err("failed to parse UUID\n");
435 return -ENXIO;
436 }
437
438 nd_set = devm_kzalloc(dev: &p->pdev.dev, size: sizeof(*nd_set), GFP_KERNEL);
439 if (!nd_set)
440 return -ENOMEM;
441
442 nd_set->cookie1 = cpu_to_le64(uuid[0]);
443 nd_set->cookie2 = cpu_to_le64(uuid[1]);
444 nd_set->altcookie = nd_set->cookie1;
445 ndr_desc->nd_set = nd_set;
446
447 for (i = 0; i < region->num_mappings; i++) {
448 ndimm = region->mapping[i].dimm;
449 mappings[i].start = region->mapping[i].start;
450 mappings[i].size = region->mapping[i].size;
451 mappings[i].position = region->mapping[i].position;
452 mappings[i].nvdimm = p->config->dimms[ndimm].nvdimm;
453 }
454
455 ndr_desc->num_mappings = region->num_mappings;
456 region->region = nvdimm_pmem_region_create(nvdimm_bus: p->bus, ndr_desc);
457
458 if (!region->region) {
459 dev_err(&p->pdev.dev, "Error registering region %pR\n",
460 ndr_desc->res);
461 return -ENXIO;
462 }
463
464 return 0;
465}
466
467static int ndtest_init_regions(struct ndtest_priv *p)
468{
469 int i, ret = 0;
470
471 for (i = 0; i < p->config->num_regions; i++) {
472 ret = ndtest_create_region(p, region: &p->config->regions[i]);
473 if (ret)
474 return ret;
475 }
476
477 return 0;
478}
479
480static void put_dimms(void *data)
481{
482 struct ndtest_priv *p = data;
483 int i;
484
485 for (i = 0; i < p->config->dimm_count; i++)
486 if (p->config->dimms[i].dev) {
487 device_unregister(dev: p->config->dimms[i].dev);
488 p->config->dimms[i].dev = NULL;
489 }
490}
491
492static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
493 char *buf)
494{
495 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
496
497 return sprintf(buf, fmt: "%#x\n", dimm->handle);
498}
499static DEVICE_ATTR_RO(handle);
500
501static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
502 char *buf)
503{
504 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
505
506 return sprintf(buf, fmt: "%#x\n", dimm->fail_cmd);
507}
508
509static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
510 const char *buf, size_t size)
511{
512 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
513 unsigned long val;
514 ssize_t rc;
515
516 rc = kstrtol(s: buf, base: 0, res: &val);
517 if (rc)
518 return rc;
519
520 dimm->fail_cmd = val;
521
522 return size;
523}
524static DEVICE_ATTR_RW(fail_cmd);
525
526static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr,
527 char *buf)
528{
529 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
530
531 return sprintf(buf, fmt: "%d\n", dimm->fail_cmd_code);
532}
533
534static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr,
535 const char *buf, size_t size)
536{
537 struct ndtest_dimm *dimm = dev_get_drvdata(dev);
538 unsigned long val;
539 ssize_t rc;
540
541 rc = kstrtol(s: buf, base: 0, res: &val);
542 if (rc)
543 return rc;
544
545 dimm->fail_cmd_code = val;
546 return size;
547}
548static DEVICE_ATTR_RW(fail_cmd_code);
549
550static struct attribute *dimm_attributes[] = {
551 &dev_attr_handle.attr,
552 &dev_attr_fail_cmd.attr,
553 &dev_attr_fail_cmd_code.attr,
554 NULL,
555};
556
557static struct attribute_group dimm_attribute_group = {
558 .attrs = dimm_attributes,
559};
560
561static const struct attribute_group *dimm_attribute_groups[] = {
562 &dimm_attribute_group,
563 NULL,
564};
565
566static ssize_t phys_id_show(struct device *dev,
567 struct device_attribute *attr, char *buf)
568{
569 struct nvdimm *nvdimm = to_nvdimm(dev);
570 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
571
572 return sprintf(buf, fmt: "%#x\n", dimm->physical_id);
573}
574static DEVICE_ATTR_RO(phys_id);
575
576static ssize_t vendor_show(struct device *dev,
577 struct device_attribute *attr, char *buf)
578{
579 return sprintf(buf, fmt: "0x1234567\n");
580}
581static DEVICE_ATTR_RO(vendor);
582
583static ssize_t id_show(struct device *dev,
584 struct device_attribute *attr, char *buf)
585{
586 struct nvdimm *nvdimm = to_nvdimm(dev);
587 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
588
589 return sprintf(buf, fmt: "%04x-%02x-%04x-%08x", 0xabcd,
590 0xa, 2016, ~(dimm->handle));
591}
592static DEVICE_ATTR_RO(id);
593
594static ssize_t nvdimm_handle_show(struct device *dev,
595 struct device_attribute *attr, char *buf)
596{
597 struct nvdimm *nvdimm = to_nvdimm(dev);
598 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
599
600 return sprintf(buf, fmt: "%#x\n", dimm->handle);
601}
602
603static struct device_attribute dev_attr_nvdimm_show_handle = {
604 .attr = { .name = "handle", .mode = 0444 },
605 .show = nvdimm_handle_show,
606};
607
608static ssize_t subsystem_vendor_show(struct device *dev,
609 struct device_attribute *attr, char *buf)
610{
611 return sprintf(buf, fmt: "0x%04x\n", 0);
612}
613static DEVICE_ATTR_RO(subsystem_vendor);
614
615static ssize_t dirty_shutdown_show(struct device *dev,
616 struct device_attribute *attr, char *buf)
617{
618 return sprintf(buf, fmt: "%d\n", 42);
619}
620static DEVICE_ATTR_RO(dirty_shutdown);
621
622static ssize_t formats_show(struct device *dev,
623 struct device_attribute *attr, char *buf)
624{
625 struct nvdimm *nvdimm = to_nvdimm(dev);
626 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
627
628 return sprintf(buf, fmt: "%d\n", dimm->num_formats);
629}
630static DEVICE_ATTR_RO(formats);
631
632static ssize_t format_show(struct device *dev,
633 struct device_attribute *attr, char *buf)
634{
635 struct nvdimm *nvdimm = to_nvdimm(dev);
636 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
637
638 if (dimm->num_formats > 1)
639 return sprintf(buf, fmt: "0x201\n");
640
641 return sprintf(buf, fmt: "0x101\n");
642}
643static DEVICE_ATTR_RO(format);
644
645static ssize_t format1_show(struct device *dev, struct device_attribute *attr,
646 char *buf)
647{
648 return sprintf(buf, fmt: "0x301\n");
649}
650static DEVICE_ATTR_RO(format1);
651
652static umode_t ndtest_nvdimm_attr_visible(struct kobject *kobj,
653 struct attribute *a, int n)
654{
655 struct device *dev = container_of(kobj, struct device, kobj);
656 struct nvdimm *nvdimm = to_nvdimm(dev);
657 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
658
659 if (a == &dev_attr_format1.attr && dimm->num_formats <= 1)
660 return 0;
661
662 return a->mode;
663}
664
665static ssize_t flags_show(struct device *dev,
666 struct device_attribute *attr, char *buf)
667{
668 struct nvdimm *nvdimm = to_nvdimm(dev);
669 struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
670 struct seq_buf s;
671 u64 flags;
672
673 flags = dimm->flags;
674
675 seq_buf_init(s: &s, buf, PAGE_SIZE);
676 if (flags & PAPR_PMEM_UNARMED_MASK)
677 seq_buf_printf(s: &s, fmt: "not_armed ");
678
679 if (flags & PAPR_PMEM_BAD_SHUTDOWN_MASK)
680 seq_buf_printf(s: &s, fmt: "flush_fail ");
681
682 if (flags & PAPR_PMEM_BAD_RESTORE_MASK)
683 seq_buf_printf(s: &s, fmt: "restore_fail ");
684
685 if (flags & PAPR_PMEM_SAVE_MASK)
686 seq_buf_printf(s: &s, fmt: "save_fail ");
687
688 if (flags & PAPR_PMEM_SMART_EVENT_MASK)
689 seq_buf_printf(s: &s, fmt: "smart_notify ");
690
691
692 if (seq_buf_used(s: &s))
693 seq_buf_printf(s: &s, fmt: "\n");
694
695 return seq_buf_used(s: &s);
696}
697static DEVICE_ATTR_RO(flags);
698
699static struct attribute *ndtest_nvdimm_attributes[] = {
700 &dev_attr_nvdimm_show_handle.attr,
701 &dev_attr_vendor.attr,
702 &dev_attr_id.attr,
703 &dev_attr_phys_id.attr,
704 &dev_attr_subsystem_vendor.attr,
705 &dev_attr_dirty_shutdown.attr,
706 &dev_attr_formats.attr,
707 &dev_attr_format.attr,
708 &dev_attr_format1.attr,
709 &dev_attr_flags.attr,
710 NULL,
711};
712
713static const struct attribute_group ndtest_nvdimm_attribute_group = {
714 .name = "papr",
715 .attrs = ndtest_nvdimm_attributes,
716 .is_visible = ndtest_nvdimm_attr_visible,
717};
718
719static const struct attribute_group *ndtest_nvdimm_attribute_groups[] = {
720 &ndtest_nvdimm_attribute_group,
721 NULL,
722};
723
724static int ndtest_dimm_register(struct ndtest_priv *priv,
725 struct ndtest_dimm *dimm, int id)
726{
727 struct device *dev = &priv->pdev.dev;
728 unsigned long dimm_flags = dimm->flags;
729
730 if (dimm->num_formats > 1)
731 set_bit(nr: NDD_LABELING, addr: &dimm_flags);
732
733 if (dimm->flags & PAPR_PMEM_UNARMED_MASK)
734 set_bit(nr: NDD_UNARMED, addr: &dimm_flags);
735
736 dimm->nvdimm = nvdimm_create(nvdimm_bus: priv->bus, provider_data: dimm,
737 groups: ndtest_nvdimm_attribute_groups, flags: dimm_flags,
738 NDTEST_SCM_DIMM_CMD_MASK, num_flush: 0, NULL);
739 if (!dimm->nvdimm) {
740 dev_err(dev, "Error creating DIMM object for %pOF\n", priv->dn);
741 return -ENXIO;
742 }
743
744 dimm->dev = device_create_with_groups(cls: &ndtest_dimm_class,
745 parent: &priv->pdev.dev,
746 devt: 0, drvdata: dimm, groups: dimm_attribute_groups,
747 fmt: "test_dimm%d", id);
748 if (!dimm->dev) {
749 pr_err("Could not create dimm device attributes\n");
750 return -ENOMEM;
751 }
752
753 return 0;
754}
755
756static int ndtest_nvdimm_init(struct ndtest_priv *p)
757{
758 struct ndtest_dimm *d;
759 void *res;
760 int i, id;
761
762 for (i = 0; i < p->config->dimm_count; i++) {
763 d = &p->config->dimms[i];
764 d->id = id = p->config->dimm_start + i;
765 res = ndtest_alloc_resource(p, size: LABEL_SIZE, NULL);
766 if (!res)
767 return -ENOMEM;
768
769 d->label_area = res;
770 sprintf(buf: d->label_area, fmt: "label%d", id);
771 d->config_size = LABEL_SIZE;
772
773 if (!ndtest_alloc_resource(p, size: d->size,
774 dma: &p->dimm_dma[id]))
775 return -ENOMEM;
776
777 if (!ndtest_alloc_resource(p, size: LABEL_SIZE,
778 dma: &p->label_dma[id]))
779 return -ENOMEM;
780
781 if (!ndtest_alloc_resource(p, size: LABEL_SIZE,
782 dma: &p->dcr_dma[id]))
783 return -ENOMEM;
784
785 d->address = p->dimm_dma[id];
786
787 ndtest_dimm_register(priv: p, dimm: d, id);
788 }
789
790 return 0;
791}
792
793static ssize_t compatible_show(struct device *dev,
794 struct device_attribute *attr, char *buf)
795{
796 return sprintf(buf, fmt: "nvdimm_test");
797}
798static DEVICE_ATTR_RO(compatible);
799
800static struct attribute *of_node_attributes[] = {
801 &dev_attr_compatible.attr,
802 NULL
803};
804
805static const struct attribute_group of_node_attribute_group = {
806 .name = "of_node",
807 .attrs = of_node_attributes,
808};
809
810static const struct attribute_group *ndtest_attribute_groups[] = {
811 &of_node_attribute_group,
812 NULL,
813};
814
815static int ndtest_bus_register(struct ndtest_priv *p)
816{
817 p->config = &bus_configs[p->pdev.id];
818
819 p->bus_desc.ndctl = ndtest_ctl;
820 p->bus_desc.module = THIS_MODULE;
821 p->bus_desc.provider_name = NULL;
822 p->bus_desc.attr_groups = ndtest_attribute_groups;
823
824 p->bus = nvdimm_bus_register(parent: &p->pdev.dev, nfit_desc: &p->bus_desc);
825 if (!p->bus) {
826 dev_err(&p->pdev.dev, "Error creating nvdimm bus %pOF\n", p->dn);
827 return -ENOMEM;
828 }
829
830 return 0;
831}
832
833static int ndtest_remove(struct platform_device *pdev)
834{
835 struct ndtest_priv *p = to_ndtest_priv(dev: &pdev->dev);
836
837 nvdimm_bus_unregister(nvdimm_bus: p->bus);
838 return 0;
839}
840
841static int ndtest_probe(struct platform_device *pdev)
842{
843 struct ndtest_priv *p;
844 int rc;
845
846 p = to_ndtest_priv(dev: &pdev->dev);
847 if (ndtest_bus_register(p))
848 return -ENOMEM;
849
850 p->dcr_dma = devm_kcalloc(dev: &p->pdev.dev, n: NUM_DCR,
851 size: sizeof(dma_addr_t), GFP_KERNEL);
852 p->label_dma = devm_kcalloc(dev: &p->pdev.dev, n: NUM_DCR,
853 size: sizeof(dma_addr_t), GFP_KERNEL);
854 p->dimm_dma = devm_kcalloc(dev: &p->pdev.dev, n: NUM_DCR,
855 size: sizeof(dma_addr_t), GFP_KERNEL);
856
857 rc = ndtest_nvdimm_init(p);
858 if (rc)
859 goto err;
860
861 rc = ndtest_init_regions(p);
862 if (rc)
863 goto err;
864
865 rc = devm_add_action_or_reset(&pdev->dev, put_dimms, p);
866 if (rc)
867 goto err;
868
869 platform_set_drvdata(pdev, data: p);
870
871 return 0;
872
873err:
874 pr_err("%s:%d Failed nvdimm init\n", __func__, __LINE__);
875 return rc;
876}
877
878static const struct platform_device_id ndtest_id[] = {
879 { KBUILD_MODNAME },
880 { },
881};
882
883static struct platform_driver ndtest_driver = {
884 .probe = ndtest_probe,
885 .remove = ndtest_remove,
886 .driver = {
887 .name = KBUILD_MODNAME,
888 },
889 .id_table = ndtest_id,
890};
891
892static void ndtest_release(struct device *dev)
893{
894 struct ndtest_priv *p = to_ndtest_priv(dev);
895
896 kfree(objp: p);
897}
898
899static void cleanup_devices(void)
900{
901 int i;
902
903 for (i = 0; i < NUM_INSTANCES; i++)
904 if (instances[i])
905 platform_device_unregister(&instances[i]->pdev);
906
907 nfit_test_teardown();
908
909 if (ndtest_pool)
910 gen_pool_destroy(ndtest_pool);
911
912
913 class_unregister(class: &ndtest_dimm_class);
914}
915
916static __init int ndtest_init(void)
917{
918 int rc, i;
919
920 pmem_test();
921 libnvdimm_test();
922 device_dax_test();
923 dax_pmem_test();
924
925 nfit_test_setup(lookup: ndtest_resource_lookup, NULL);
926
927 rc = class_register(class: &ndtest_dimm_class);
928 if (rc)
929 goto err_register;
930
931 ndtest_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
932 if (!ndtest_pool) {
933 rc = -ENOMEM;
934 goto err_register;
935 }
936
937 if (gen_pool_add(pool: ndtest_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
938 rc = -ENOMEM;
939 goto err_register;
940 }
941
942 /* Each instance can be taken as a bus, which can have multiple dimms */
943 for (i = 0; i < NUM_INSTANCES; i++) {
944 struct ndtest_priv *priv;
945 struct platform_device *pdev;
946
947 priv = kzalloc(size: sizeof(*priv), GFP_KERNEL);
948 if (!priv) {
949 rc = -ENOMEM;
950 goto err_register;
951 }
952
953 INIT_LIST_HEAD(list: &priv->resources);
954 pdev = &priv->pdev;
955 pdev->name = KBUILD_MODNAME;
956 pdev->id = i;
957 pdev->dev.release = ndtest_release;
958 rc = platform_device_register(pdev);
959 if (rc) {
960 put_device(dev: &pdev->dev);
961 goto err_register;
962 }
963 get_device(dev: &pdev->dev);
964
965 instances[i] = priv;
966 }
967
968 rc = platform_driver_register(&ndtest_driver);
969 if (rc)
970 goto err_register;
971
972 return 0;
973
974err_register:
975 pr_err("Error registering platform device\n");
976 cleanup_devices();
977
978 return rc;
979}
980
981static __exit void ndtest_exit(void)
982{
983 cleanup_devices();
984 platform_driver_unregister(&ndtest_driver);
985}
986
987module_init(ndtest_init);
988module_exit(ndtest_exit);
989MODULE_LICENSE("GPL");
990MODULE_AUTHOR("IBM Corporation");
991

source code of linux/tools/testing/nvdimm/test/ndtest.c