1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. |
4 | */ |
5 | #include <linux/list_sort.h> |
6 | #include <linux/libnvdimm.h> |
7 | #include <linux/module.h> |
8 | #include <linux/nospec.h> |
9 | #include <linux/mutex.h> |
10 | #include <linux/ndctl.h> |
11 | #include <linux/sysfs.h> |
12 | #include <linux/delay.h> |
13 | #include <linux/list.h> |
14 | #include <linux/acpi.h> |
15 | #include <linux/sort.h> |
16 | #include <linux/io.h> |
17 | #include <linux/nd.h> |
18 | #include <asm/cacheflush.h> |
19 | #include <acpi/nfit.h> |
20 | #include "intel.h" |
21 | #include "nfit.h" |
22 | |
23 | /* |
24 | * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is |
25 | * irrelevant. |
26 | */ |
27 | #include <linux/io-64-nonatomic-hi-lo.h> |
28 | |
29 | static bool force_enable_dimms; |
30 | module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); |
31 | MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status" ); |
32 | |
33 | static bool disable_vendor_specific; |
34 | module_param(disable_vendor_specific, bool, S_IRUGO); |
35 | MODULE_PARM_DESC(disable_vendor_specific, |
36 | "Limit commands to the publicly specified set" ); |
37 | |
38 | static unsigned long override_dsm_mask; |
39 | module_param(override_dsm_mask, ulong, S_IRUGO); |
40 | MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions" ); |
41 | |
42 | static int default_dsm_family = -1; |
43 | module_param(default_dsm_family, int, S_IRUGO); |
44 | MODULE_PARM_DESC(default_dsm_family, |
45 | "Try this DSM type first when identifying NVDIMM family" ); |
46 | |
47 | static bool no_init_ars; |
48 | module_param(no_init_ars, bool, 0644); |
49 | MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time" ); |
50 | |
51 | static bool force_labels; |
52 | module_param(force_labels, bool, 0444); |
53 | MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods" ); |
54 | |
55 | LIST_HEAD(acpi_descs); |
56 | DEFINE_MUTEX(acpi_desc_lock); |
57 | |
58 | static struct workqueue_struct *nfit_wq; |
59 | |
60 | struct nfit_table_prev { |
61 | struct list_head spas; |
62 | struct list_head memdevs; |
63 | struct list_head dcrs; |
64 | struct list_head bdws; |
65 | struct list_head idts; |
66 | struct list_head flushes; |
67 | }; |
68 | |
69 | static guid_t nfit_uuid[NFIT_UUID_MAX]; |
70 | |
71 | const guid_t *to_nfit_uuid(enum nfit_uuids id) |
72 | { |
73 | return &nfit_uuid[id]; |
74 | } |
75 | EXPORT_SYMBOL(to_nfit_uuid); |
76 | |
77 | static const guid_t *to_nfit_bus_uuid(int family) |
78 | { |
79 | if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT, |
80 | "only secondary bus families can be translated\n" )) |
81 | return NULL; |
82 | /* |
83 | * The index of bus UUIDs starts immediately following the last |
84 | * NVDIMM/leaf family. |
85 | */ |
86 | return to_nfit_uuid(family + NVDIMM_FAMILY_MAX); |
87 | } |
88 | |
89 | static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) |
90 | { |
91 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
92 | |
93 | /* |
94 | * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct |
95 | * acpi_device. |
96 | */ |
97 | if (!nd_desc->provider_name |
98 | || strcmp(nd_desc->provider_name, "ACPI.NFIT" ) != 0) |
99 | return NULL; |
100 | |
101 | return to_acpi_device(acpi_desc->dev); |
102 | } |
103 | |
104 | static int xlat_bus_status(void *buf, unsigned int cmd, u32 status) |
105 | { |
106 | struct nd_cmd_clear_error *clear_err; |
107 | struct nd_cmd_ars_status *ars_status; |
108 | u16 flags; |
109 | |
110 | switch (cmd) { |
111 | case ND_CMD_ARS_CAP: |
112 | if ((status & 0xffff) == NFIT_ARS_CAP_NONE) |
113 | return -ENOTTY; |
114 | |
115 | /* Command failed */ |
116 | if (status & 0xffff) |
117 | return -EIO; |
118 | |
119 | /* No supported scan types for this range */ |
120 | flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; |
121 | if ((status >> 16 & flags) == 0) |
122 | return -ENOTTY; |
123 | return 0; |
124 | case ND_CMD_ARS_START: |
125 | /* ARS is in progress */ |
126 | if ((status & 0xffff) == NFIT_ARS_START_BUSY) |
127 | return -EBUSY; |
128 | |
129 | /* Command failed */ |
130 | if (status & 0xffff) |
131 | return -EIO; |
132 | return 0; |
133 | case ND_CMD_ARS_STATUS: |
134 | ars_status = buf; |
135 | /* Command failed */ |
136 | if (status & 0xffff) |
137 | return -EIO; |
138 | /* Check extended status (Upper two bytes) */ |
139 | if (status == NFIT_ARS_STATUS_DONE) |
140 | return 0; |
141 | |
142 | /* ARS is in progress */ |
143 | if (status == NFIT_ARS_STATUS_BUSY) |
144 | return -EBUSY; |
145 | |
146 | /* No ARS performed for the current boot */ |
147 | if (status == NFIT_ARS_STATUS_NONE) |
148 | return -EAGAIN; |
149 | |
150 | /* |
151 | * ARS interrupted, either we overflowed or some other |
152 | * agent wants the scan to stop. If we didn't overflow |
153 | * then just continue with the returned results. |
154 | */ |
155 | if (status == NFIT_ARS_STATUS_INTR) { |
156 | if (ars_status->out_length >= 40 && (ars_status->flags |
157 | & NFIT_ARS_F_OVERFLOW)) |
158 | return -ENOSPC; |
159 | return 0; |
160 | } |
161 | |
162 | /* Unknown status */ |
163 | if (status >> 16) |
164 | return -EIO; |
165 | return 0; |
166 | case ND_CMD_CLEAR_ERROR: |
167 | clear_err = buf; |
168 | if (status & 0xffff) |
169 | return -EIO; |
170 | if (!clear_err->cleared) |
171 | return -EIO; |
172 | if (clear_err->length > clear_err->cleared) |
173 | return clear_err->cleared; |
174 | return 0; |
175 | default: |
176 | break; |
177 | } |
178 | |
179 | /* all other non-zero status results in an error */ |
180 | if (status) |
181 | return -EIO; |
182 | return 0; |
183 | } |
184 | |
185 | #define ACPI_LABELS_LOCKED 3 |
186 | |
187 | static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, |
188 | u32 status) |
189 | { |
190 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
191 | |
192 | switch (cmd) { |
193 | case ND_CMD_GET_CONFIG_SIZE: |
194 | /* |
195 | * In the _LSI, _LSR, _LSW case the locked status is |
196 | * communicated via the read/write commands |
197 | */ |
198 | if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) |
199 | break; |
200 | |
201 | if (status >> 16 & ND_CONFIG_LOCKED) |
202 | return -EACCES; |
203 | break; |
204 | case ND_CMD_GET_CONFIG_DATA: |
205 | if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) |
206 | && status == ACPI_LABELS_LOCKED) |
207 | return -EACCES; |
208 | break; |
209 | case ND_CMD_SET_CONFIG_DATA: |
210 | if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags) |
211 | && status == ACPI_LABELS_LOCKED) |
212 | return -EACCES; |
213 | break; |
214 | default: |
215 | break; |
216 | } |
217 | |
218 | /* all other non-zero status results in an error */ |
219 | if (status) |
220 | return -EIO; |
221 | return 0; |
222 | } |
223 | |
224 | static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, |
225 | u32 status) |
226 | { |
227 | if (!nvdimm) |
228 | return xlat_bus_status(buf, cmd, status); |
229 | return xlat_nvdimm_status(nvdimm, buf, cmd, status); |
230 | } |
231 | |
232 | /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */ |
233 | static union acpi_object *pkg_to_buf(union acpi_object *pkg) |
234 | { |
235 | int i; |
236 | void *dst; |
237 | size_t size = 0; |
238 | union acpi_object *buf = NULL; |
239 | |
240 | if (pkg->type != ACPI_TYPE_PACKAGE) { |
241 | WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n" , |
242 | pkg->type); |
243 | goto err; |
244 | } |
245 | |
246 | for (i = 0; i < pkg->package.count; i++) { |
247 | union acpi_object *obj = &pkg->package.elements[i]; |
248 | |
249 | if (obj->type == ACPI_TYPE_INTEGER) |
250 | size += 4; |
251 | else if (obj->type == ACPI_TYPE_BUFFER) |
252 | size += obj->buffer.length; |
253 | else { |
254 | WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n" , |
255 | obj->type); |
256 | goto err; |
257 | } |
258 | } |
259 | |
260 | buf = ACPI_ALLOCATE(sizeof(*buf) + size); |
261 | if (!buf) |
262 | goto err; |
263 | |
264 | dst = buf + 1; |
265 | buf->type = ACPI_TYPE_BUFFER; |
266 | buf->buffer.length = size; |
267 | buf->buffer.pointer = dst; |
268 | for (i = 0; i < pkg->package.count; i++) { |
269 | union acpi_object *obj = &pkg->package.elements[i]; |
270 | |
271 | if (obj->type == ACPI_TYPE_INTEGER) { |
272 | memcpy(dst, &obj->integer.value, 4); |
273 | dst += 4; |
274 | } else if (obj->type == ACPI_TYPE_BUFFER) { |
275 | memcpy(dst, obj->buffer.pointer, obj->buffer.length); |
276 | dst += obj->buffer.length; |
277 | } |
278 | } |
279 | err: |
280 | ACPI_FREE(pkg); |
281 | return buf; |
282 | } |
283 | |
284 | static union acpi_object *int_to_buf(union acpi_object *integer) |
285 | { |
286 | union acpi_object *buf = NULL; |
287 | void *dst = NULL; |
288 | |
289 | if (integer->type != ACPI_TYPE_INTEGER) { |
290 | WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n" , |
291 | integer->type); |
292 | goto err; |
293 | } |
294 | |
295 | buf = ACPI_ALLOCATE(sizeof(*buf) + 4); |
296 | if (!buf) |
297 | goto err; |
298 | |
299 | dst = buf + 1; |
300 | buf->type = ACPI_TYPE_BUFFER; |
301 | buf->buffer.length = 4; |
302 | buf->buffer.pointer = dst; |
303 | memcpy(dst, &integer->integer.value, 4); |
304 | err: |
305 | ACPI_FREE(integer); |
306 | return buf; |
307 | } |
308 | |
309 | static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset, |
310 | u32 len, void *data) |
311 | { |
312 | acpi_status rc; |
313 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; |
314 | struct acpi_object_list input = { |
315 | .count = 3, |
316 | .pointer = (union acpi_object []) { |
317 | [0] = { |
318 | .integer.type = ACPI_TYPE_INTEGER, |
319 | .integer.value = offset, |
320 | }, |
321 | [1] = { |
322 | .integer.type = ACPI_TYPE_INTEGER, |
323 | .integer.value = len, |
324 | }, |
325 | [2] = { |
326 | .buffer.type = ACPI_TYPE_BUFFER, |
327 | .buffer.pointer = data, |
328 | .buffer.length = len, |
329 | }, |
330 | }, |
331 | }; |
332 | |
333 | rc = acpi_evaluate_object(object: handle, pathname: "_LSW" , parameter_objects: &input, return_object_buffer: &buf); |
334 | if (ACPI_FAILURE(rc)) |
335 | return NULL; |
336 | return int_to_buf(integer: buf.pointer); |
337 | } |
338 | |
339 | static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset, |
340 | u32 len) |
341 | { |
342 | acpi_status rc; |
343 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; |
344 | struct acpi_object_list input = { |
345 | .count = 2, |
346 | .pointer = (union acpi_object []) { |
347 | [0] = { |
348 | .integer.type = ACPI_TYPE_INTEGER, |
349 | .integer.value = offset, |
350 | }, |
351 | [1] = { |
352 | .integer.type = ACPI_TYPE_INTEGER, |
353 | .integer.value = len, |
354 | }, |
355 | }, |
356 | }; |
357 | |
358 | rc = acpi_evaluate_object(object: handle, pathname: "_LSR" , parameter_objects: &input, return_object_buffer: &buf); |
359 | if (ACPI_FAILURE(rc)) |
360 | return NULL; |
361 | return pkg_to_buf(pkg: buf.pointer); |
362 | } |
363 | |
364 | static union acpi_object *acpi_label_info(acpi_handle handle) |
365 | { |
366 | acpi_status rc; |
367 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; |
368 | |
369 | rc = acpi_evaluate_object(object: handle, pathname: "_LSI" , NULL, return_object_buffer: &buf); |
370 | if (ACPI_FAILURE(rc)) |
371 | return NULL; |
372 | return pkg_to_buf(pkg: buf.pointer); |
373 | } |
374 | |
375 | static u8 nfit_dsm_revid(unsigned family, unsigned func) |
376 | { |
377 | static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = { |
378 | [NVDIMM_FAMILY_INTEL] = { |
379 | [NVDIMM_INTEL_GET_MODES ... |
380 | NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2, |
381 | }, |
382 | }; |
383 | u8 id; |
384 | |
385 | if (family > NVDIMM_FAMILY_MAX) |
386 | return 0; |
387 | if (func > NVDIMM_CMD_MAX) |
388 | return 0; |
389 | id = revid_table[family][func]; |
390 | if (id == 0) |
391 | return 1; /* default */ |
392 | return id; |
393 | } |
394 | |
395 | static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func) |
396 | { |
397 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
398 | |
399 | if (nfit_mem && nfit_mem->family == NVDIMM_FAMILY_INTEL |
400 | && func >= NVDIMM_INTEL_GET_SECURITY_STATE |
401 | && func <= NVDIMM_INTEL_MASTER_SECURE_ERASE) |
402 | return IS_ENABLED(CONFIG_NFIT_SECURITY_DEBUG); |
403 | return true; |
404 | } |
405 | |
406 | static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, |
407 | struct nd_cmd_pkg *call_pkg, int *family) |
408 | { |
409 | if (call_pkg) { |
410 | int i; |
411 | |
412 | if (nfit_mem && nfit_mem->family != call_pkg->nd_family) |
413 | return -ENOTTY; |
414 | |
415 | for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) |
416 | if (call_pkg->nd_reserved2[i]) |
417 | return -EINVAL; |
418 | *family = call_pkg->nd_family; |
419 | return call_pkg->nd_command; |
420 | } |
421 | |
422 | /* In the !call_pkg case, bus commands == bus functions */ |
423 | if (!nfit_mem) |
424 | return cmd; |
425 | |
426 | /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */ |
427 | if (nfit_mem->family == NVDIMM_FAMILY_INTEL) |
428 | return cmd; |
429 | |
430 | /* |
431 | * Force function number validation to fail since 0 is never |
432 | * published as a valid function in dsm_mask. |
433 | */ |
434 | return 0; |
435 | } |
436 | |
437 | int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, |
438 | unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) |
439 | { |
440 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
441 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
442 | union acpi_object in_obj, in_buf, *out_obj; |
443 | const struct nd_cmd_desc *desc = NULL; |
444 | struct device *dev = acpi_desc->dev; |
445 | struct nd_cmd_pkg *call_pkg = NULL; |
446 | const char *cmd_name, *dimm_name; |
447 | unsigned long cmd_mask, dsm_mask; |
448 | u32 offset, fw_status = 0; |
449 | acpi_handle handle; |
450 | const guid_t *guid; |
451 | int func, rc, i; |
452 | int family = 0; |
453 | |
454 | if (cmd_rc) |
455 | *cmd_rc = -EINVAL; |
456 | |
457 | if (cmd == ND_CMD_CALL) |
458 | call_pkg = buf; |
459 | func = cmd_to_func(nfit_mem, cmd, call_pkg, family: &family); |
460 | if (func < 0) |
461 | return func; |
462 | |
463 | if (nvdimm) { |
464 | struct acpi_device *adev = nfit_mem->adev; |
465 | |
466 | if (!adev) |
467 | return -ENOTTY; |
468 | |
469 | dimm_name = nvdimm_name(nvdimm); |
470 | cmd_name = nvdimm_cmd_name(cmd); |
471 | cmd_mask = nvdimm_cmd_mask(nvdimm); |
472 | dsm_mask = nfit_mem->dsm_mask; |
473 | desc = nd_cmd_dimm_desc(cmd); |
474 | guid = to_nfit_uuid(nfit_mem->family); |
475 | handle = adev->handle; |
476 | } else { |
477 | struct acpi_device *adev = to_acpi_dev(acpi_desc); |
478 | |
479 | cmd_name = nvdimm_bus_cmd_name(cmd); |
480 | cmd_mask = nd_desc->cmd_mask; |
481 | if (cmd == ND_CMD_CALL && call_pkg->nd_family) { |
482 | family = call_pkg->nd_family; |
483 | if (family > NVDIMM_BUS_FAMILY_MAX || |
484 | !test_bit(family, &nd_desc->bus_family_mask)) |
485 | return -EINVAL; |
486 | family = array_index_nospec(family, |
487 | NVDIMM_BUS_FAMILY_MAX + 1); |
488 | dsm_mask = acpi_desc->family_dsm_mask[family]; |
489 | guid = to_nfit_bus_uuid(family); |
490 | } else { |
491 | dsm_mask = acpi_desc->bus_dsm_mask; |
492 | guid = to_nfit_uuid(NFIT_DEV_BUS); |
493 | } |
494 | desc = nd_cmd_bus_desc(cmd); |
495 | handle = adev->handle; |
496 | dimm_name = "bus" ; |
497 | } |
498 | |
499 | if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) |
500 | return -ENOTTY; |
501 | |
502 | /* |
503 | * Check for a valid command. For ND_CMD_CALL, we also have to |
504 | * make sure that the DSM function is supported. |
505 | */ |
506 | if (cmd == ND_CMD_CALL && |
507 | (func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask))) |
508 | return -ENOTTY; |
509 | else if (!test_bit(cmd, &cmd_mask)) |
510 | return -ENOTTY; |
511 | |
512 | in_obj.type = ACPI_TYPE_PACKAGE; |
513 | in_obj.package.count = 1; |
514 | in_obj.package.elements = &in_buf; |
515 | in_buf.type = ACPI_TYPE_BUFFER; |
516 | in_buf.buffer.pointer = buf; |
517 | in_buf.buffer.length = 0; |
518 | |
519 | /* libnvdimm has already validated the input envelope */ |
520 | for (i = 0; i < desc->in_num; i++) |
521 | in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, |
522 | idx: i, buf); |
523 | |
524 | if (call_pkg) { |
525 | /* skip over package wrapper */ |
526 | in_buf.buffer.pointer = (void *) &call_pkg->nd_payload; |
527 | in_buf.buffer.length = call_pkg->nd_size_in; |
528 | } |
529 | |
530 | dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n" , |
531 | dimm_name, cmd, family, func, in_buf.buffer.length); |
532 | if (payload_dumpable(nvdimm, func)) |
533 | print_hex_dump_debug("nvdimm in " , DUMP_PREFIX_OFFSET, 4, 4, |
534 | in_buf.buffer.pointer, |
535 | min_t(u32, 256, in_buf.buffer.length), true); |
536 | |
537 | /* call the BIOS, prefer the named methods over _DSM if available */ |
538 | if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE |
539 | && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) |
540 | out_obj = acpi_label_info(handle); |
541 | else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA |
542 | && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { |
543 | struct nd_cmd_get_config_data_hdr *p = buf; |
544 | |
545 | out_obj = acpi_label_read(handle, offset: p->in_offset, len: p->in_length); |
546 | } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA |
547 | && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) { |
548 | struct nd_cmd_set_config_hdr *p = buf; |
549 | |
550 | out_obj = acpi_label_write(handle, offset: p->in_offset, len: p->in_length, |
551 | data: p->in_buf); |
552 | } else { |
553 | u8 revid; |
554 | |
555 | if (nvdimm) |
556 | revid = nfit_dsm_revid(family: nfit_mem->family, func); |
557 | else |
558 | revid = 1; |
559 | out_obj = acpi_evaluate_dsm(handle, guid, rev: revid, func, argv4: &in_obj); |
560 | } |
561 | |
562 | if (!out_obj) { |
563 | dev_dbg(dev, "%s _DSM failed cmd: %s\n" , dimm_name, cmd_name); |
564 | return -EINVAL; |
565 | } |
566 | |
567 | if (out_obj->type != ACPI_TYPE_BUFFER) { |
568 | dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n" , |
569 | dimm_name, cmd_name, out_obj->type); |
570 | rc = -EINVAL; |
571 | goto out; |
572 | } |
573 | |
574 | dev_dbg(dev, "%s cmd: %s output length: %d\n" , dimm_name, |
575 | cmd_name, out_obj->buffer.length); |
576 | print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, |
577 | out_obj->buffer.pointer, |
578 | min_t(u32, 128, out_obj->buffer.length), true); |
579 | |
580 | if (call_pkg) { |
581 | call_pkg->nd_fw_size = out_obj->buffer.length; |
582 | memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, |
583 | out_obj->buffer.pointer, |
584 | min(call_pkg->nd_fw_size, call_pkg->nd_size_out)); |
585 | |
586 | ACPI_FREE(out_obj); |
587 | /* |
588 | * Need to support FW function w/o known size in advance. |
589 | * Caller can determine required size based upon nd_fw_size. |
590 | * If we return an error (like elsewhere) then caller wouldn't |
591 | * be able to rely upon data returned to make calculation. |
592 | */ |
593 | if (cmd_rc) |
594 | *cmd_rc = 0; |
595 | return 0; |
596 | } |
597 | |
598 | for (i = 0, offset = 0; i < desc->out_num; i++) { |
599 | u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, idx: i, in_field: buf, |
600 | out_field: (u32 *) out_obj->buffer.pointer, |
601 | remainder: out_obj->buffer.length - offset); |
602 | |
603 | if (offset + out_size > out_obj->buffer.length) { |
604 | dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n" , |
605 | dimm_name, cmd_name, i); |
606 | break; |
607 | } |
608 | |
609 | if (in_buf.buffer.length + offset + out_size > buf_len) { |
610 | dev_dbg(dev, "%s output overrun cmd: %s field: %d\n" , |
611 | dimm_name, cmd_name, i); |
612 | rc = -ENXIO; |
613 | goto out; |
614 | } |
615 | memcpy(buf + in_buf.buffer.length + offset, |
616 | out_obj->buffer.pointer + offset, out_size); |
617 | offset += out_size; |
618 | } |
619 | |
620 | /* |
621 | * Set fw_status for all the commands with a known format to be |
622 | * later interpreted by xlat_status(). |
623 | */ |
624 | if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP |
625 | && cmd <= ND_CMD_CLEAR_ERROR) |
626 | || (nvdimm && cmd >= ND_CMD_SMART |
627 | && cmd <= ND_CMD_VENDOR))) |
628 | fw_status = *(u32 *) out_obj->buffer.pointer; |
629 | |
630 | if (offset + in_buf.buffer.length < buf_len) { |
631 | if (i >= 1) { |
632 | /* |
633 | * status valid, return the number of bytes left |
634 | * unfilled in the output buffer |
635 | */ |
636 | rc = buf_len - offset - in_buf.buffer.length; |
637 | if (cmd_rc) |
638 | *cmd_rc = xlat_status(nvdimm, buf, cmd, |
639 | status: fw_status); |
640 | } else { |
641 | dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n" , |
642 | __func__, dimm_name, cmd_name, buf_len, |
643 | offset); |
644 | rc = -ENXIO; |
645 | } |
646 | } else { |
647 | rc = 0; |
648 | if (cmd_rc) |
649 | *cmd_rc = xlat_status(nvdimm, buf, cmd, status: fw_status); |
650 | } |
651 | |
652 | out: |
653 | ACPI_FREE(out_obj); |
654 | |
655 | return rc; |
656 | } |
657 | EXPORT_SYMBOL_GPL(acpi_nfit_ctl); |
658 | |
659 | static const char *spa_type_name(u16 type) |
660 | { |
661 | static const char *to_name[] = { |
662 | [NFIT_SPA_VOLATILE] = "volatile" , |
663 | [NFIT_SPA_PM] = "pmem" , |
664 | [NFIT_SPA_DCR] = "dimm-control-region" , |
665 | [NFIT_SPA_BDW] = "block-data-window" , |
666 | [NFIT_SPA_VDISK] = "volatile-disk" , |
667 | [NFIT_SPA_VCD] = "volatile-cd" , |
668 | [NFIT_SPA_PDISK] = "persistent-disk" , |
669 | [NFIT_SPA_PCD] = "persistent-cd" , |
670 | |
671 | }; |
672 | |
673 | if (type > NFIT_SPA_PCD) |
674 | return "unknown" ; |
675 | |
676 | return to_name[type]; |
677 | } |
678 | |
679 | int nfit_spa_type(struct acpi_nfit_system_address *spa) |
680 | { |
681 | guid_t guid; |
682 | int i; |
683 | |
684 | import_guid(dst: &guid, src: spa->range_guid); |
685 | for (i = 0; i < NFIT_UUID_MAX; i++) |
686 | if (guid_equal(u1: to_nfit_uuid(i), u2: &guid)) |
687 | return i; |
688 | return -1; |
689 | } |
690 | |
691 | static size_t sizeof_spa(struct acpi_nfit_system_address *spa) |
692 | { |
693 | if (spa->flags & ACPI_NFIT_LOCATION_COOKIE_VALID) |
694 | return sizeof(*spa); |
695 | return sizeof(*spa) - 8; |
696 | } |
697 | |
698 | static bool add_spa(struct acpi_nfit_desc *acpi_desc, |
699 | struct nfit_table_prev *prev, |
700 | struct acpi_nfit_system_address *spa) |
701 | { |
702 | struct device *dev = acpi_desc->dev; |
703 | struct nfit_spa *nfit_spa; |
704 | |
705 | if (spa->header.length != sizeof_spa(spa)) |
706 | return false; |
707 | |
708 | list_for_each_entry(nfit_spa, &prev->spas, list) { |
709 | if (memcmp(p: nfit_spa->spa, q: spa, size: sizeof_spa(spa)) == 0) { |
710 | list_move_tail(list: &nfit_spa->list, head: &acpi_desc->spas); |
711 | return true; |
712 | } |
713 | } |
714 | |
715 | nfit_spa = devm_kzalloc(dev, size: sizeof(*nfit_spa) + sizeof_spa(spa), |
716 | GFP_KERNEL); |
717 | if (!nfit_spa) |
718 | return false; |
719 | INIT_LIST_HEAD(list: &nfit_spa->list); |
720 | memcpy(nfit_spa->spa, spa, sizeof_spa(spa)); |
721 | list_add_tail(new: &nfit_spa->list, head: &acpi_desc->spas); |
722 | dev_dbg(dev, "spa index: %d type: %s\n" , |
723 | spa->range_index, |
724 | spa_type_name(nfit_spa_type(spa))); |
725 | return true; |
726 | } |
727 | |
728 | static bool add_memdev(struct acpi_nfit_desc *acpi_desc, |
729 | struct nfit_table_prev *prev, |
730 | struct acpi_nfit_memory_map *memdev) |
731 | { |
732 | struct device *dev = acpi_desc->dev; |
733 | struct nfit_memdev *nfit_memdev; |
734 | |
735 | if (memdev->header.length != sizeof(*memdev)) |
736 | return false; |
737 | |
738 | list_for_each_entry(nfit_memdev, &prev->memdevs, list) |
739 | if (memcmp(p: nfit_memdev->memdev, q: memdev, size: sizeof(*memdev)) == 0) { |
740 | list_move_tail(list: &nfit_memdev->list, head: &acpi_desc->memdevs); |
741 | return true; |
742 | } |
743 | |
744 | nfit_memdev = devm_kzalloc(dev, size: sizeof(*nfit_memdev) + sizeof(*memdev), |
745 | GFP_KERNEL); |
746 | if (!nfit_memdev) |
747 | return false; |
748 | INIT_LIST_HEAD(list: &nfit_memdev->list); |
749 | memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); |
750 | list_add_tail(new: &nfit_memdev->list, head: &acpi_desc->memdevs); |
751 | dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n" , |
752 | memdev->device_handle, memdev->range_index, |
753 | memdev->region_index, memdev->flags); |
754 | return true; |
755 | } |
756 | |
757 | int nfit_get_smbios_id(u32 device_handle, u16 *flags) |
758 | { |
759 | struct acpi_nfit_memory_map *memdev; |
760 | struct acpi_nfit_desc *acpi_desc; |
761 | struct nfit_mem *nfit_mem; |
762 | u16 physical_id; |
763 | |
764 | mutex_lock(&acpi_desc_lock); |
765 | list_for_each_entry(acpi_desc, &acpi_descs, list) { |
766 | mutex_lock(&acpi_desc->init_mutex); |
767 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { |
768 | memdev = __to_nfit_memdev(nfit_mem); |
769 | if (memdev->device_handle == device_handle) { |
770 | *flags = memdev->flags; |
771 | physical_id = memdev->physical_id; |
772 | mutex_unlock(lock: &acpi_desc->init_mutex); |
773 | mutex_unlock(lock: &acpi_desc_lock); |
774 | return physical_id; |
775 | } |
776 | } |
777 | mutex_unlock(lock: &acpi_desc->init_mutex); |
778 | } |
779 | mutex_unlock(lock: &acpi_desc_lock); |
780 | |
781 | return -ENODEV; |
782 | } |
783 | EXPORT_SYMBOL_GPL(nfit_get_smbios_id); |
784 | |
785 | /* |
786 | * An implementation may provide a truncated control region if no block windows |
787 | * are defined. |
788 | */ |
789 | static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) |
790 | { |
791 | if (dcr->header.length < offsetof(struct acpi_nfit_control_region, |
792 | window_size)) |
793 | return 0; |
794 | if (dcr->windows) |
795 | return sizeof(*dcr); |
796 | return offsetof(struct acpi_nfit_control_region, window_size); |
797 | } |
798 | |
799 | static bool add_dcr(struct acpi_nfit_desc *acpi_desc, |
800 | struct nfit_table_prev *prev, |
801 | struct acpi_nfit_control_region *dcr) |
802 | { |
803 | struct device *dev = acpi_desc->dev; |
804 | struct nfit_dcr *nfit_dcr; |
805 | |
806 | if (!sizeof_dcr(dcr)) |
807 | return false; |
808 | |
809 | list_for_each_entry(nfit_dcr, &prev->dcrs, list) |
810 | if (memcmp(p: nfit_dcr->dcr, q: dcr, size: sizeof_dcr(dcr)) == 0) { |
811 | list_move_tail(list: &nfit_dcr->list, head: &acpi_desc->dcrs); |
812 | return true; |
813 | } |
814 | |
815 | nfit_dcr = devm_kzalloc(dev, size: sizeof(*nfit_dcr) + sizeof(*dcr), |
816 | GFP_KERNEL); |
817 | if (!nfit_dcr) |
818 | return false; |
819 | INIT_LIST_HEAD(list: &nfit_dcr->list); |
820 | memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); |
821 | list_add_tail(new: &nfit_dcr->list, head: &acpi_desc->dcrs); |
822 | dev_dbg(dev, "dcr index: %d windows: %d\n" , |
823 | dcr->region_index, dcr->windows); |
824 | return true; |
825 | } |
826 | |
827 | static bool add_bdw(struct acpi_nfit_desc *acpi_desc, |
828 | struct nfit_table_prev *prev, |
829 | struct acpi_nfit_data_region *bdw) |
830 | { |
831 | struct device *dev = acpi_desc->dev; |
832 | struct nfit_bdw *nfit_bdw; |
833 | |
834 | if (bdw->header.length != sizeof(*bdw)) |
835 | return false; |
836 | list_for_each_entry(nfit_bdw, &prev->bdws, list) |
837 | if (memcmp(p: nfit_bdw->bdw, q: bdw, size: sizeof(*bdw)) == 0) { |
838 | list_move_tail(list: &nfit_bdw->list, head: &acpi_desc->bdws); |
839 | return true; |
840 | } |
841 | |
842 | nfit_bdw = devm_kzalloc(dev, size: sizeof(*nfit_bdw) + sizeof(*bdw), |
843 | GFP_KERNEL); |
844 | if (!nfit_bdw) |
845 | return false; |
846 | INIT_LIST_HEAD(list: &nfit_bdw->list); |
847 | memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); |
848 | list_add_tail(new: &nfit_bdw->list, head: &acpi_desc->bdws); |
849 | dev_dbg(dev, "bdw dcr: %d windows: %d\n" , |
850 | bdw->region_index, bdw->windows); |
851 | return true; |
852 | } |
853 | |
854 | static size_t sizeof_idt(struct acpi_nfit_interleave *idt) |
855 | { |
856 | if (idt->header.length < sizeof(*idt)) |
857 | return 0; |
858 | return sizeof(*idt) + sizeof(u32) * idt->line_count; |
859 | } |
860 | |
861 | static bool add_idt(struct acpi_nfit_desc *acpi_desc, |
862 | struct nfit_table_prev *prev, |
863 | struct acpi_nfit_interleave *idt) |
864 | { |
865 | struct device *dev = acpi_desc->dev; |
866 | struct nfit_idt *nfit_idt; |
867 | |
868 | if (!sizeof_idt(idt)) |
869 | return false; |
870 | |
871 | list_for_each_entry(nfit_idt, &prev->idts, list) { |
872 | if (sizeof_idt(idt: nfit_idt->idt) != sizeof_idt(idt)) |
873 | continue; |
874 | |
875 | if (memcmp(p: nfit_idt->idt, q: idt, size: sizeof_idt(idt)) == 0) { |
876 | list_move_tail(list: &nfit_idt->list, head: &acpi_desc->idts); |
877 | return true; |
878 | } |
879 | } |
880 | |
881 | nfit_idt = devm_kzalloc(dev, size: sizeof(*nfit_idt) + sizeof_idt(idt), |
882 | GFP_KERNEL); |
883 | if (!nfit_idt) |
884 | return false; |
885 | INIT_LIST_HEAD(list: &nfit_idt->list); |
886 | memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); |
887 | list_add_tail(new: &nfit_idt->list, head: &acpi_desc->idts); |
888 | dev_dbg(dev, "idt index: %d num_lines: %d\n" , |
889 | idt->interleave_index, idt->line_count); |
890 | return true; |
891 | } |
892 | |
893 | static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) |
894 | { |
895 | if (flush->header.length < sizeof(*flush)) |
896 | return 0; |
897 | return struct_size(flush, hint_address, flush->hint_count); |
898 | } |
899 | |
900 | static bool add_flush(struct acpi_nfit_desc *acpi_desc, |
901 | struct nfit_table_prev *prev, |
902 | struct acpi_nfit_flush_address *flush) |
903 | { |
904 | struct device *dev = acpi_desc->dev; |
905 | struct nfit_flush *nfit_flush; |
906 | |
907 | if (!sizeof_flush(flush)) |
908 | return false; |
909 | |
910 | list_for_each_entry(nfit_flush, &prev->flushes, list) { |
911 | if (sizeof_flush(flush: nfit_flush->flush) != sizeof_flush(flush)) |
912 | continue; |
913 | |
914 | if (memcmp(p: nfit_flush->flush, q: flush, |
915 | size: sizeof_flush(flush)) == 0) { |
916 | list_move_tail(list: &nfit_flush->list, head: &acpi_desc->flushes); |
917 | return true; |
918 | } |
919 | } |
920 | |
921 | nfit_flush = devm_kzalloc(dev, size: sizeof(*nfit_flush) |
922 | + sizeof_flush(flush), GFP_KERNEL); |
923 | if (!nfit_flush) |
924 | return false; |
925 | INIT_LIST_HEAD(list: &nfit_flush->list); |
926 | memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); |
927 | list_add_tail(new: &nfit_flush->list, head: &acpi_desc->flushes); |
928 | dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n" , |
929 | flush->device_handle, flush->hint_count); |
930 | return true; |
931 | } |
932 | |
933 | static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc, |
934 | struct acpi_nfit_capabilities *pcap) |
935 | { |
936 | struct device *dev = acpi_desc->dev; |
937 | u32 mask; |
938 | |
939 | mask = (1 << (pcap->highest_capability + 1)) - 1; |
940 | acpi_desc->platform_cap = pcap->capabilities & mask; |
941 | dev_dbg(dev, "cap: %#x\n" , acpi_desc->platform_cap); |
942 | return true; |
943 | } |
944 | |
945 | static void *add_table(struct acpi_nfit_desc *acpi_desc, |
946 | struct nfit_table_prev *prev, void *table, const void *end) |
947 | { |
948 | struct device *dev = acpi_desc->dev; |
949 | struct acpi_nfit_header *hdr; |
950 | void *err = ERR_PTR(error: -ENOMEM); |
951 | |
952 | if (table >= end) |
953 | return NULL; |
954 | |
955 | hdr = table; |
956 | if (!hdr->length) { |
957 | dev_warn(dev, "found a zero length table '%d' parsing nfit\n" , |
958 | hdr->type); |
959 | return NULL; |
960 | } |
961 | |
962 | switch (hdr->type) { |
963 | case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: |
964 | if (!add_spa(acpi_desc, prev, spa: table)) |
965 | return err; |
966 | break; |
967 | case ACPI_NFIT_TYPE_MEMORY_MAP: |
968 | if (!add_memdev(acpi_desc, prev, memdev: table)) |
969 | return err; |
970 | break; |
971 | case ACPI_NFIT_TYPE_CONTROL_REGION: |
972 | if (!add_dcr(acpi_desc, prev, dcr: table)) |
973 | return err; |
974 | break; |
975 | case ACPI_NFIT_TYPE_DATA_REGION: |
976 | if (!add_bdw(acpi_desc, prev, bdw: table)) |
977 | return err; |
978 | break; |
979 | case ACPI_NFIT_TYPE_INTERLEAVE: |
980 | if (!add_idt(acpi_desc, prev, idt: table)) |
981 | return err; |
982 | break; |
983 | case ACPI_NFIT_TYPE_FLUSH_ADDRESS: |
984 | if (!add_flush(acpi_desc, prev, flush: table)) |
985 | return err; |
986 | break; |
987 | case ACPI_NFIT_TYPE_SMBIOS: |
988 | dev_dbg(dev, "smbios\n" ); |
989 | break; |
990 | case ACPI_NFIT_TYPE_CAPABILITIES: |
991 | if (!add_platform_cap(acpi_desc, pcap: table)) |
992 | return err; |
993 | break; |
994 | default: |
995 | dev_err(dev, "unknown table '%d' parsing nfit\n" , hdr->type); |
996 | break; |
997 | } |
998 | |
999 | return table + hdr->length; |
1000 | } |
1001 | |
1002 | static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, |
1003 | struct acpi_nfit_system_address *spa) |
1004 | { |
1005 | struct nfit_mem *nfit_mem, *found; |
1006 | struct nfit_memdev *nfit_memdev; |
1007 | int type = spa ? nfit_spa_type(spa) : 0; |
1008 | |
1009 | switch (type) { |
1010 | case NFIT_SPA_DCR: |
1011 | case NFIT_SPA_PM: |
1012 | break; |
1013 | default: |
1014 | if (spa) |
1015 | return 0; |
1016 | } |
1017 | |
1018 | /* |
1019 | * This loop runs in two modes, when a dimm is mapped the loop |
1020 | * adds memdev associations to an existing dimm, or creates a |
1021 | * dimm. In the unmapped dimm case this loop sweeps for memdev |
1022 | * instances with an invalid / zero range_index and adds those |
1023 | * dimms without spa associations. |
1024 | */ |
1025 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { |
1026 | struct nfit_flush *nfit_flush; |
1027 | struct nfit_dcr *nfit_dcr; |
1028 | u32 device_handle; |
1029 | u16 dcr; |
1030 | |
1031 | if (spa && nfit_memdev->memdev->range_index != spa->range_index) |
1032 | continue; |
1033 | if (!spa && nfit_memdev->memdev->range_index) |
1034 | continue; |
1035 | found = NULL; |
1036 | dcr = nfit_memdev->memdev->region_index; |
1037 | device_handle = nfit_memdev->memdev->device_handle; |
1038 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) |
1039 | if (__to_nfit_memdev(nfit_mem)->device_handle |
1040 | == device_handle) { |
1041 | found = nfit_mem; |
1042 | break; |
1043 | } |
1044 | |
1045 | if (found) |
1046 | nfit_mem = found; |
1047 | else { |
1048 | nfit_mem = devm_kzalloc(dev: acpi_desc->dev, |
1049 | size: sizeof(*nfit_mem), GFP_KERNEL); |
1050 | if (!nfit_mem) |
1051 | return -ENOMEM; |
1052 | INIT_LIST_HEAD(list: &nfit_mem->list); |
1053 | nfit_mem->acpi_desc = acpi_desc; |
1054 | list_add(new: &nfit_mem->list, head: &acpi_desc->dimms); |
1055 | } |
1056 | |
1057 | list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { |
1058 | if (nfit_dcr->dcr->region_index != dcr) |
1059 | continue; |
1060 | /* |
1061 | * Record the control region for the dimm. For |
1062 | * the ACPI 6.1 case, where there are separate |
1063 | * control regions for the pmem vs blk |
1064 | * interfaces, be sure to record the extended |
1065 | * blk details. |
1066 | */ |
1067 | if (!nfit_mem->dcr) |
1068 | nfit_mem->dcr = nfit_dcr->dcr; |
1069 | else if (nfit_mem->dcr->windows == 0 |
1070 | && nfit_dcr->dcr->windows) |
1071 | nfit_mem->dcr = nfit_dcr->dcr; |
1072 | break; |
1073 | } |
1074 | |
1075 | list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { |
1076 | struct acpi_nfit_flush_address *flush; |
1077 | u16 i; |
1078 | |
1079 | if (nfit_flush->flush->device_handle != device_handle) |
1080 | continue; |
1081 | nfit_mem->nfit_flush = nfit_flush; |
1082 | flush = nfit_flush->flush; |
1083 | nfit_mem->flush_wpq = devm_kcalloc(dev: acpi_desc->dev, |
1084 | n: flush->hint_count, |
1085 | size: sizeof(struct resource), |
1086 | GFP_KERNEL); |
1087 | if (!nfit_mem->flush_wpq) |
1088 | return -ENOMEM; |
1089 | for (i = 0; i < flush->hint_count; i++) { |
1090 | struct resource *res = &nfit_mem->flush_wpq[i]; |
1091 | |
1092 | res->start = flush->hint_address[i]; |
1093 | res->end = res->start + 8 - 1; |
1094 | } |
1095 | break; |
1096 | } |
1097 | |
1098 | if (dcr && !nfit_mem->dcr) { |
1099 | dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n" , |
1100 | spa->range_index, dcr); |
1101 | return -ENODEV; |
1102 | } |
1103 | |
1104 | if (type == NFIT_SPA_DCR) { |
1105 | struct nfit_idt *nfit_idt; |
1106 | u16 idt_idx; |
1107 | |
1108 | /* multiple dimms may share a SPA when interleaved */ |
1109 | nfit_mem->spa_dcr = spa; |
1110 | nfit_mem->memdev_dcr = nfit_memdev->memdev; |
1111 | idt_idx = nfit_memdev->memdev->interleave_index; |
1112 | list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { |
1113 | if (nfit_idt->idt->interleave_index != idt_idx) |
1114 | continue; |
1115 | nfit_mem->idt_dcr = nfit_idt->idt; |
1116 | break; |
1117 | } |
1118 | } else if (type == NFIT_SPA_PM) { |
1119 | /* |
1120 | * A single dimm may belong to multiple SPA-PM |
1121 | * ranges, record at least one in addition to |
1122 | * any SPA-DCR range. |
1123 | */ |
1124 | nfit_mem->memdev_pmem = nfit_memdev->memdev; |
1125 | } else |
1126 | nfit_mem->memdev_dcr = nfit_memdev->memdev; |
1127 | } |
1128 | |
1129 | return 0; |
1130 | } |
1131 | |
1132 | static int nfit_mem_cmp(void *priv, const struct list_head *_a, |
1133 | const struct list_head *_b) |
1134 | { |
1135 | struct nfit_mem *a = container_of(_a, typeof(*a), list); |
1136 | struct nfit_mem *b = container_of(_b, typeof(*b), list); |
1137 | u32 handleA, handleB; |
1138 | |
1139 | handleA = __to_nfit_memdev(nfit_mem: a)->device_handle; |
1140 | handleB = __to_nfit_memdev(nfit_mem: b)->device_handle; |
1141 | if (handleA < handleB) |
1142 | return -1; |
1143 | else if (handleA > handleB) |
1144 | return 1; |
1145 | return 0; |
1146 | } |
1147 | |
1148 | static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) |
1149 | { |
1150 | struct nfit_spa *nfit_spa; |
1151 | int rc; |
1152 | |
1153 | |
1154 | /* |
1155 | * For each SPA-DCR or SPA-PMEM address range find its |
1156 | * corresponding MEMDEV(s). From each MEMDEV find the |
1157 | * corresponding DCR. Then, if we're operating on a SPA-DCR, |
1158 | * try to find a SPA-BDW and a corresponding BDW that references |
1159 | * the DCR. Throw it all into an nfit_mem object. Note, that |
1160 | * BDWs are optional. |
1161 | */ |
1162 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { |
1163 | rc = __nfit_mem_init(acpi_desc, spa: nfit_spa->spa); |
1164 | if (rc) |
1165 | return rc; |
1166 | } |
1167 | |
1168 | /* |
1169 | * If a DIMM has failed to be mapped into SPA there will be no |
1170 | * SPA entries above. Find and register all the unmapped DIMMs |
1171 | * for reporting and recovery purposes. |
1172 | */ |
1173 | rc = __nfit_mem_init(acpi_desc, NULL); |
1174 | if (rc) |
1175 | return rc; |
1176 | |
1177 | list_sort(NULL, head: &acpi_desc->dimms, cmp: nfit_mem_cmp); |
1178 | |
1179 | return 0; |
1180 | } |
1181 | |
1182 | static ssize_t bus_dsm_mask_show(struct device *dev, |
1183 | struct device_attribute *attr, char *buf) |
1184 | { |
1185 | struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); |
1186 | struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); |
1187 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
1188 | |
1189 | return sprintf(buf, fmt: "%#lx\n" , acpi_desc->bus_dsm_mask); |
1190 | } |
1191 | static struct device_attribute dev_attr_bus_dsm_mask = |
1192 | __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL); |
1193 | |
1194 | static ssize_t revision_show(struct device *dev, |
1195 | struct device_attribute *attr, char *buf) |
1196 | { |
1197 | struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); |
1198 | struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); |
1199 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
1200 | |
1201 | return sprintf(buf, fmt: "%d\n" , acpi_desc->acpi_header.revision); |
1202 | } |
1203 | static DEVICE_ATTR_RO(revision); |
1204 | |
1205 | static ssize_t hw_error_scrub_show(struct device *dev, |
1206 | struct device_attribute *attr, char *buf) |
1207 | { |
1208 | struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); |
1209 | struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); |
1210 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
1211 | |
1212 | return sprintf(buf, fmt: "%d\n" , acpi_desc->scrub_mode); |
1213 | } |
1214 | |
1215 | /* |
1216 | * The 'hw_error_scrub' attribute can have the following values written to it: |
1217 | * '0': Switch to the default mode where an exception will only insert |
1218 | * the address of the memory error into the poison and badblocks lists. |
1219 | * '1': Enable a full scrub to happen if an exception for a memory error is |
1220 | * received. |
1221 | */ |
1222 | static ssize_t hw_error_scrub_store(struct device *dev, |
1223 | struct device_attribute *attr, const char *buf, size_t size) |
1224 | { |
1225 | struct nvdimm_bus_descriptor *nd_desc; |
1226 | ssize_t rc; |
1227 | long val; |
1228 | |
1229 | rc = kstrtol(s: buf, base: 0, res: &val); |
1230 | if (rc) |
1231 | return rc; |
1232 | |
1233 | device_lock(dev); |
1234 | nd_desc = dev_get_drvdata(dev); |
1235 | if (nd_desc) { |
1236 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
1237 | |
1238 | switch (val) { |
1239 | case HW_ERROR_SCRUB_ON: |
1240 | acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON; |
1241 | break; |
1242 | case HW_ERROR_SCRUB_OFF: |
1243 | acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF; |
1244 | break; |
1245 | default: |
1246 | rc = -EINVAL; |
1247 | break; |
1248 | } |
1249 | } |
1250 | device_unlock(dev); |
1251 | if (rc) |
1252 | return rc; |
1253 | return size; |
1254 | } |
1255 | static DEVICE_ATTR_RW(hw_error_scrub); |
1256 | |
1257 | /* |
1258 | * This shows the number of full Address Range Scrubs that have been |
1259 | * completed since driver load time. Userspace can wait on this using |
1260 | * select/poll etc. A '+' at the end indicates an ARS is in progress |
1261 | */ |
1262 | static ssize_t scrub_show(struct device *dev, |
1263 | struct device_attribute *attr, char *buf) |
1264 | { |
1265 | struct nvdimm_bus_descriptor *nd_desc; |
1266 | struct acpi_nfit_desc *acpi_desc; |
1267 | ssize_t rc = -ENXIO; |
1268 | bool busy; |
1269 | |
1270 | device_lock(dev); |
1271 | nd_desc = dev_get_drvdata(dev); |
1272 | if (!nd_desc) { |
1273 | device_unlock(dev); |
1274 | return rc; |
1275 | } |
1276 | acpi_desc = to_acpi_desc(nd_desc); |
1277 | |
1278 | mutex_lock(&acpi_desc->init_mutex); |
1279 | busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags) |
1280 | && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags); |
1281 | rc = sprintf(buf, fmt: "%d%s" , acpi_desc->scrub_count, busy ? "+\n" : "\n" ); |
1282 | /* Allow an admin to poll the busy state at a higher rate */ |
1283 | if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(nr: ARS_POLL, |
1284 | addr: &acpi_desc->scrub_flags)) { |
1285 | acpi_desc->scrub_tmo = 1; |
1286 | mod_delayed_work(wq: nfit_wq, dwork: &acpi_desc->dwork, HZ); |
1287 | } |
1288 | |
1289 | mutex_unlock(lock: &acpi_desc->init_mutex); |
1290 | device_unlock(dev); |
1291 | return rc; |
1292 | } |
1293 | |
1294 | static ssize_t scrub_store(struct device *dev, |
1295 | struct device_attribute *attr, const char *buf, size_t size) |
1296 | { |
1297 | struct nvdimm_bus_descriptor *nd_desc; |
1298 | ssize_t rc; |
1299 | long val; |
1300 | |
1301 | rc = kstrtol(s: buf, base: 0, res: &val); |
1302 | if (rc) |
1303 | return rc; |
1304 | if (val != 1) |
1305 | return -EINVAL; |
1306 | |
1307 | device_lock(dev); |
1308 | nd_desc = dev_get_drvdata(dev); |
1309 | if (nd_desc) { |
1310 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
1311 | |
1312 | rc = acpi_nfit_ars_rescan(acpi_desc, req_type: ARS_REQ_LONG); |
1313 | } |
1314 | device_unlock(dev); |
1315 | if (rc) |
1316 | return rc; |
1317 | return size; |
1318 | } |
1319 | static DEVICE_ATTR_RW(scrub); |
1320 | |
1321 | static bool ars_supported(struct nvdimm_bus *nvdimm_bus) |
1322 | { |
1323 | struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); |
1324 | const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START |
1325 | | 1 << ND_CMD_ARS_STATUS; |
1326 | |
1327 | return (nd_desc->cmd_mask & mask) == mask; |
1328 | } |
1329 | |
1330 | static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) |
1331 | { |
1332 | struct device *dev = kobj_to_dev(kobj); |
1333 | struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); |
1334 | |
1335 | if (a == &dev_attr_scrub.attr) |
1336 | return ars_supported(nvdimm_bus) ? a->mode : 0; |
1337 | |
1338 | if (a == &dev_attr_firmware_activate_noidle.attr) |
1339 | return intel_fwa_supported(nvdimm_bus) ? a->mode : 0; |
1340 | |
1341 | return a->mode; |
1342 | } |
1343 | |
1344 | static struct attribute *acpi_nfit_attributes[] = { |
1345 | &dev_attr_revision.attr, |
1346 | &dev_attr_scrub.attr, |
1347 | &dev_attr_hw_error_scrub.attr, |
1348 | &dev_attr_bus_dsm_mask.attr, |
1349 | &dev_attr_firmware_activate_noidle.attr, |
1350 | NULL, |
1351 | }; |
1352 | |
1353 | static const struct attribute_group acpi_nfit_attribute_group = { |
1354 | .name = "nfit" , |
1355 | .attrs = acpi_nfit_attributes, |
1356 | .is_visible = nfit_visible, |
1357 | }; |
1358 | |
1359 | static const struct attribute_group *acpi_nfit_attribute_groups[] = { |
1360 | &acpi_nfit_attribute_group, |
1361 | NULL, |
1362 | }; |
1363 | |
1364 | static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) |
1365 | { |
1366 | struct nvdimm *nvdimm = to_nvdimm(dev); |
1367 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
1368 | |
1369 | return __to_nfit_memdev(nfit_mem); |
1370 | } |
1371 | |
1372 | static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) |
1373 | { |
1374 | struct nvdimm *nvdimm = to_nvdimm(dev); |
1375 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
1376 | |
1377 | return nfit_mem->dcr; |
1378 | } |
1379 | |
1380 | static ssize_t handle_show(struct device *dev, |
1381 | struct device_attribute *attr, char *buf) |
1382 | { |
1383 | struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); |
1384 | |
1385 | return sprintf(buf, fmt: "%#x\n" , memdev->device_handle); |
1386 | } |
1387 | static DEVICE_ATTR_RO(handle); |
1388 | |
1389 | static ssize_t phys_id_show(struct device *dev, |
1390 | struct device_attribute *attr, char *buf) |
1391 | { |
1392 | struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); |
1393 | |
1394 | return sprintf(buf, fmt: "%#x\n" , memdev->physical_id); |
1395 | } |
1396 | static DEVICE_ATTR_RO(phys_id); |
1397 | |
1398 | static ssize_t vendor_show(struct device *dev, |
1399 | struct device_attribute *attr, char *buf) |
1400 | { |
1401 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
1402 | |
1403 | return sprintf(buf, fmt: "0x%04x\n" , be16_to_cpu(dcr->vendor_id)); |
1404 | } |
1405 | static DEVICE_ATTR_RO(vendor); |
1406 | |
1407 | static ssize_t rev_id_show(struct device *dev, |
1408 | struct device_attribute *attr, char *buf) |
1409 | { |
1410 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
1411 | |
1412 | return sprintf(buf, fmt: "0x%04x\n" , be16_to_cpu(dcr->revision_id)); |
1413 | } |
1414 | static DEVICE_ATTR_RO(rev_id); |
1415 | |
1416 | static ssize_t device_show(struct device *dev, |
1417 | struct device_attribute *attr, char *buf) |
1418 | { |
1419 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
1420 | |
1421 | return sprintf(buf, fmt: "0x%04x\n" , be16_to_cpu(dcr->device_id)); |
1422 | } |
1423 | static DEVICE_ATTR_RO(device); |
1424 | |
1425 | static ssize_t subsystem_vendor_show(struct device *dev, |
1426 | struct device_attribute *attr, char *buf) |
1427 | { |
1428 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
1429 | |
1430 | return sprintf(buf, fmt: "0x%04x\n" , be16_to_cpu(dcr->subsystem_vendor_id)); |
1431 | } |
1432 | static DEVICE_ATTR_RO(subsystem_vendor); |
1433 | |
1434 | static ssize_t subsystem_rev_id_show(struct device *dev, |
1435 | struct device_attribute *attr, char *buf) |
1436 | { |
1437 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
1438 | |
1439 | return sprintf(buf, fmt: "0x%04x\n" , |
1440 | be16_to_cpu(dcr->subsystem_revision_id)); |
1441 | } |
1442 | static DEVICE_ATTR_RO(subsystem_rev_id); |
1443 | |
1444 | static ssize_t subsystem_device_show(struct device *dev, |
1445 | struct device_attribute *attr, char *buf) |
1446 | { |
1447 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
1448 | |
1449 | return sprintf(buf, fmt: "0x%04x\n" , be16_to_cpu(dcr->subsystem_device_id)); |
1450 | } |
1451 | static DEVICE_ATTR_RO(subsystem_device); |
1452 | |
1453 | static int num_nvdimm_formats(struct nvdimm *nvdimm) |
1454 | { |
1455 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
1456 | int formats = 0; |
1457 | |
1458 | if (nfit_mem->memdev_pmem) |
1459 | formats++; |
1460 | return formats; |
1461 | } |
1462 | |
1463 | static ssize_t format_show(struct device *dev, |
1464 | struct device_attribute *attr, char *buf) |
1465 | { |
1466 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
1467 | |
1468 | return sprintf(buf, fmt: "0x%04x\n" , le16_to_cpu(dcr->code)); |
1469 | } |
1470 | static DEVICE_ATTR_RO(format); |
1471 | |
1472 | static ssize_t format1_show(struct device *dev, |
1473 | struct device_attribute *attr, char *buf) |
1474 | { |
1475 | u32 handle; |
1476 | ssize_t rc = -ENXIO; |
1477 | struct nfit_mem *nfit_mem; |
1478 | struct nfit_memdev *nfit_memdev; |
1479 | struct acpi_nfit_desc *acpi_desc; |
1480 | struct nvdimm *nvdimm = to_nvdimm(dev); |
1481 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
1482 | |
1483 | nfit_mem = nvdimm_provider_data(nvdimm); |
1484 | acpi_desc = nfit_mem->acpi_desc; |
1485 | handle = to_nfit_memdev(dev)->device_handle; |
1486 | |
1487 | /* assumes DIMMs have at most 2 published interface codes */ |
1488 | mutex_lock(&acpi_desc->init_mutex); |
1489 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { |
1490 | struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; |
1491 | struct nfit_dcr *nfit_dcr; |
1492 | |
1493 | if (memdev->device_handle != handle) |
1494 | continue; |
1495 | |
1496 | list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { |
1497 | if (nfit_dcr->dcr->region_index != memdev->region_index) |
1498 | continue; |
1499 | if (nfit_dcr->dcr->code == dcr->code) |
1500 | continue; |
1501 | rc = sprintf(buf, fmt: "0x%04x\n" , |
1502 | le16_to_cpu(nfit_dcr->dcr->code)); |
1503 | break; |
1504 | } |
1505 | if (rc != -ENXIO) |
1506 | break; |
1507 | } |
1508 | mutex_unlock(lock: &acpi_desc->init_mutex); |
1509 | return rc; |
1510 | } |
1511 | static DEVICE_ATTR_RO(format1); |
1512 | |
1513 | static ssize_t formats_show(struct device *dev, |
1514 | struct device_attribute *attr, char *buf) |
1515 | { |
1516 | struct nvdimm *nvdimm = to_nvdimm(dev); |
1517 | |
1518 | return sprintf(buf, fmt: "%d\n" , num_nvdimm_formats(nvdimm)); |
1519 | } |
1520 | static DEVICE_ATTR_RO(formats); |
1521 | |
1522 | static ssize_t serial_show(struct device *dev, |
1523 | struct device_attribute *attr, char *buf) |
1524 | { |
1525 | struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); |
1526 | |
1527 | return sprintf(buf, fmt: "0x%08x\n" , be32_to_cpu(dcr->serial_number)); |
1528 | } |
1529 | static DEVICE_ATTR_RO(serial); |
1530 | |
1531 | static ssize_t family_show(struct device *dev, |
1532 | struct device_attribute *attr, char *buf) |
1533 | { |
1534 | struct nvdimm *nvdimm = to_nvdimm(dev); |
1535 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
1536 | |
1537 | if (nfit_mem->family < 0) |
1538 | return -ENXIO; |
1539 | return sprintf(buf, fmt: "%d\n" , nfit_mem->family); |
1540 | } |
1541 | static DEVICE_ATTR_RO(family); |
1542 | |
1543 | static ssize_t dsm_mask_show(struct device *dev, |
1544 | struct device_attribute *attr, char *buf) |
1545 | { |
1546 | struct nvdimm *nvdimm = to_nvdimm(dev); |
1547 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
1548 | |
1549 | if (nfit_mem->family < 0) |
1550 | return -ENXIO; |
1551 | return sprintf(buf, fmt: "%#lx\n" , nfit_mem->dsm_mask); |
1552 | } |
1553 | static DEVICE_ATTR_RO(dsm_mask); |
1554 | |
1555 | static ssize_t flags_show(struct device *dev, |
1556 | struct device_attribute *attr, char *buf) |
1557 | { |
1558 | struct nvdimm *nvdimm = to_nvdimm(dev); |
1559 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
1560 | u16 flags = __to_nfit_memdev(nfit_mem)->flags; |
1561 | |
1562 | if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags)) |
1563 | flags |= ACPI_NFIT_MEM_FLUSH_FAILED; |
1564 | |
1565 | return sprintf(buf, fmt: "%s%s%s%s%s%s%s\n" , |
1566 | flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "" , |
1567 | flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "" , |
1568 | flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "" , |
1569 | flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "" , |
1570 | flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "" , |
1571 | flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "" , |
1572 | flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "" ); |
1573 | } |
1574 | static DEVICE_ATTR_RO(flags); |
1575 | |
1576 | static ssize_t id_show(struct device *dev, |
1577 | struct device_attribute *attr, char *buf) |
1578 | { |
1579 | struct nvdimm *nvdimm = to_nvdimm(dev); |
1580 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
1581 | |
1582 | return sprintf(buf, fmt: "%s\n" , nfit_mem->id); |
1583 | } |
1584 | static DEVICE_ATTR_RO(id); |
1585 | |
1586 | static ssize_t dirty_shutdown_show(struct device *dev, |
1587 | struct device_attribute *attr, char *buf) |
1588 | { |
1589 | struct nvdimm *nvdimm = to_nvdimm(dev); |
1590 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
1591 | |
1592 | return sprintf(buf, fmt: "%d\n" , nfit_mem->dirty_shutdown); |
1593 | } |
1594 | static DEVICE_ATTR_RO(dirty_shutdown); |
1595 | |
1596 | static struct attribute *acpi_nfit_dimm_attributes[] = { |
1597 | &dev_attr_handle.attr, |
1598 | &dev_attr_phys_id.attr, |
1599 | &dev_attr_vendor.attr, |
1600 | &dev_attr_device.attr, |
1601 | &dev_attr_rev_id.attr, |
1602 | &dev_attr_subsystem_vendor.attr, |
1603 | &dev_attr_subsystem_device.attr, |
1604 | &dev_attr_subsystem_rev_id.attr, |
1605 | &dev_attr_format.attr, |
1606 | &dev_attr_formats.attr, |
1607 | &dev_attr_format1.attr, |
1608 | &dev_attr_serial.attr, |
1609 | &dev_attr_flags.attr, |
1610 | &dev_attr_id.attr, |
1611 | &dev_attr_family.attr, |
1612 | &dev_attr_dsm_mask.attr, |
1613 | &dev_attr_dirty_shutdown.attr, |
1614 | NULL, |
1615 | }; |
1616 | |
1617 | static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, |
1618 | struct attribute *a, int n) |
1619 | { |
1620 | struct device *dev = kobj_to_dev(kobj); |
1621 | struct nvdimm *nvdimm = to_nvdimm(dev); |
1622 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
1623 | |
1624 | if (!to_nfit_dcr(dev)) { |
1625 | /* Without a dcr only the memdev attributes can be surfaced */ |
1626 | if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr |
1627 | || a == &dev_attr_flags.attr |
1628 | || a == &dev_attr_family.attr |
1629 | || a == &dev_attr_dsm_mask.attr) |
1630 | return a->mode; |
1631 | return 0; |
1632 | } |
1633 | |
1634 | if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) |
1635 | return 0; |
1636 | |
1637 | if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags) |
1638 | && a == &dev_attr_dirty_shutdown.attr) |
1639 | return 0; |
1640 | |
1641 | return a->mode; |
1642 | } |
1643 | |
1644 | static const struct attribute_group acpi_nfit_dimm_attribute_group = { |
1645 | .name = "nfit" , |
1646 | .attrs = acpi_nfit_dimm_attributes, |
1647 | .is_visible = acpi_nfit_dimm_attr_visible, |
1648 | }; |
1649 | |
1650 | static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { |
1651 | &acpi_nfit_dimm_attribute_group, |
1652 | NULL, |
1653 | }; |
1654 | |
1655 | static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, |
1656 | u32 device_handle) |
1657 | { |
1658 | struct nfit_mem *nfit_mem; |
1659 | |
1660 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) |
1661 | if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) |
1662 | return nfit_mem->nvdimm; |
1663 | |
1664 | return NULL; |
1665 | } |
1666 | |
1667 | void __acpi_nvdimm_notify(struct device *dev, u32 event) |
1668 | { |
1669 | struct nfit_mem *nfit_mem; |
1670 | struct acpi_nfit_desc *acpi_desc; |
1671 | |
1672 | dev_dbg(dev->parent, "%s: event: %d\n" , dev_name(dev), |
1673 | event); |
1674 | |
1675 | if (event != NFIT_NOTIFY_DIMM_HEALTH) { |
1676 | dev_dbg(dev->parent, "%s: unknown event: %d\n" , dev_name(dev), |
1677 | event); |
1678 | return; |
1679 | } |
1680 | |
1681 | acpi_desc = dev_get_drvdata(dev: dev->parent); |
1682 | if (!acpi_desc) |
1683 | return; |
1684 | |
1685 | /* |
1686 | * If we successfully retrieved acpi_desc, then we know nfit_mem data |
1687 | * is still valid. |
1688 | */ |
1689 | nfit_mem = dev_get_drvdata(dev); |
1690 | if (nfit_mem && nfit_mem->flags_attr) |
1691 | sysfs_notify_dirent(kn: nfit_mem->flags_attr); |
1692 | } |
1693 | EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify); |
1694 | |
1695 | static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) |
1696 | { |
1697 | struct acpi_device *adev = data; |
1698 | struct device *dev = &adev->dev; |
1699 | |
1700 | device_lock(dev: dev->parent); |
1701 | __acpi_nvdimm_notify(dev, event); |
1702 | device_unlock(dev: dev->parent); |
1703 | } |
1704 | |
1705 | static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) |
1706 | { |
1707 | acpi_handle handle; |
1708 | acpi_status status; |
1709 | |
1710 | status = acpi_get_handle(parent: adev->handle, pathname: method, ret_handle: &handle); |
1711 | |
1712 | if (ACPI_SUCCESS(status)) |
1713 | return true; |
1714 | return false; |
1715 | } |
1716 | |
1717 | __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) |
1718 | { |
1719 | struct device *dev = &nfit_mem->adev->dev; |
1720 | struct nd_intel_smart smart = { 0 }; |
1721 | union acpi_object in_buf = { |
1722 | .buffer.type = ACPI_TYPE_BUFFER, |
1723 | .buffer.length = 0, |
1724 | }; |
1725 | union acpi_object in_obj = { |
1726 | .package.type = ACPI_TYPE_PACKAGE, |
1727 | .package.count = 1, |
1728 | .package.elements = &in_buf, |
1729 | }; |
1730 | const u8 func = ND_INTEL_SMART; |
1731 | const guid_t *guid = to_nfit_uuid(nfit_mem->family); |
1732 | u8 revid = nfit_dsm_revid(family: nfit_mem->family, func); |
1733 | struct acpi_device *adev = nfit_mem->adev; |
1734 | acpi_handle handle = adev->handle; |
1735 | union acpi_object *out_obj; |
1736 | |
1737 | if ((nfit_mem->dsm_mask & (1 << func)) == 0) |
1738 | return; |
1739 | |
1740 | out_obj = acpi_evaluate_dsm(handle, guid, rev: revid, func, argv4: &in_obj); |
1741 | if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER |
1742 | || out_obj->buffer.length < sizeof(smart)) { |
1743 | dev_dbg(dev->parent, "%s: failed to retrieve initial health\n" , |
1744 | dev_name(dev)); |
1745 | ACPI_FREE(out_obj); |
1746 | return; |
1747 | } |
1748 | memcpy(&smart, out_obj->buffer.pointer, sizeof(smart)); |
1749 | ACPI_FREE(out_obj); |
1750 | |
1751 | if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) { |
1752 | if (smart.shutdown_state) |
1753 | set_bit(nr: NFIT_MEM_DIRTY, addr: &nfit_mem->flags); |
1754 | } |
1755 | |
1756 | if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) { |
1757 | set_bit(nr: NFIT_MEM_DIRTY_COUNT, addr: &nfit_mem->flags); |
1758 | nfit_mem->dirty_shutdown = smart.shutdown_count; |
1759 | } |
1760 | } |
1761 | |
1762 | static void populate_shutdown_status(struct nfit_mem *nfit_mem) |
1763 | { |
1764 | /* |
1765 | * For DIMMs that provide a dynamic facility to retrieve a |
1766 | * dirty-shutdown status and/or a dirty-shutdown count, cache |
1767 | * these values in nfit_mem. |
1768 | */ |
1769 | if (nfit_mem->family == NVDIMM_FAMILY_INTEL) |
1770 | nfit_intel_shutdown_status(nfit_mem); |
1771 | } |
1772 | |
1773 | static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, |
1774 | struct nfit_mem *nfit_mem, u32 device_handle) |
1775 | { |
1776 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
1777 | struct acpi_device *adev, *adev_dimm; |
1778 | struct device *dev = acpi_desc->dev; |
1779 | unsigned long dsm_mask, label_mask; |
1780 | const guid_t *guid; |
1781 | int i; |
1782 | int family = -1; |
1783 | struct acpi_nfit_control_region *dcr = nfit_mem->dcr; |
1784 | |
1785 | /* nfit test assumes 1:1 relationship between commands and dsms */ |
1786 | nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; |
1787 | nfit_mem->family = NVDIMM_FAMILY_INTEL; |
1788 | set_bit(NVDIMM_FAMILY_INTEL, addr: &nd_desc->dimm_family_mask); |
1789 | |
1790 | if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) |
1791 | sprintf(buf: nfit_mem->id, fmt: "%04x-%02x-%04x-%08x" , |
1792 | be16_to_cpu(dcr->vendor_id), |
1793 | dcr->manufacturing_location, |
1794 | be16_to_cpu(dcr->manufacturing_date), |
1795 | be32_to_cpu(dcr->serial_number)); |
1796 | else |
1797 | sprintf(buf: nfit_mem->id, fmt: "%04x-%08x" , |
1798 | be16_to_cpu(dcr->vendor_id), |
1799 | be32_to_cpu(dcr->serial_number)); |
1800 | |
1801 | adev = to_acpi_dev(acpi_desc); |
1802 | if (!adev) { |
1803 | /* unit test case */ |
1804 | populate_shutdown_status(nfit_mem); |
1805 | return 0; |
1806 | } |
1807 | |
1808 | adev_dimm = acpi_find_child_device(parent: adev, address: device_handle, check_children: false); |
1809 | nfit_mem->adev = adev_dimm; |
1810 | if (!adev_dimm) { |
1811 | dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n" , |
1812 | device_handle); |
1813 | return force_enable_dimms ? 0 : -ENODEV; |
1814 | } |
1815 | |
1816 | if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle, |
1817 | ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) { |
1818 | dev_err(dev, "%s: notification registration failed\n" , |
1819 | dev_name(&adev_dimm->dev)); |
1820 | return -ENXIO; |
1821 | } |
1822 | /* |
1823 | * Record nfit_mem for the notification path to track back to |
1824 | * the nfit sysfs attributes for this dimm device object. |
1825 | */ |
1826 | dev_set_drvdata(dev: &adev_dimm->dev, data: nfit_mem); |
1827 | |
1828 | /* |
1829 | * There are 4 "legacy" NVDIMM command sets |
1830 | * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before |
1831 | * an EFI working group was established to constrain this |
1832 | * proliferation. The nfit driver probes for the supported command |
1833 | * set by GUID. Note, if you're a platform developer looking to add |
1834 | * a new command set to this probe, consider using an existing set, |
1835 | * or otherwise seek approval to publish the command set at |
1836 | * http://www.uefi.org/RFIC_LIST. |
1837 | * |
1838 | * Note, that checking for function0 (bit0) tells us if any commands |
1839 | * are reachable through this GUID. |
1840 | */ |
1841 | clear_bit(NVDIMM_FAMILY_INTEL, addr: &nd_desc->dimm_family_mask); |
1842 | for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) |
1843 | if (acpi_check_dsm(handle: adev_dimm->handle, guid: to_nfit_uuid(i), rev: 1, funcs: 1)) { |
1844 | set_bit(nr: i, addr: &nd_desc->dimm_family_mask); |
1845 | if (family < 0 || i == default_dsm_family) |
1846 | family = i; |
1847 | } |
1848 | |
1849 | /* limit the supported commands to those that are publicly documented */ |
1850 | nfit_mem->family = family; |
1851 | if (override_dsm_mask && !disable_vendor_specific) |
1852 | dsm_mask = override_dsm_mask; |
1853 | else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { |
1854 | dsm_mask = NVDIMM_INTEL_CMDMASK; |
1855 | if (disable_vendor_specific) |
1856 | dsm_mask &= ~(1 << ND_CMD_VENDOR); |
1857 | } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { |
1858 | dsm_mask = 0x1c3c76; |
1859 | } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { |
1860 | dsm_mask = 0x1fe; |
1861 | if (disable_vendor_specific) |
1862 | dsm_mask &= ~(1 << 8); |
1863 | } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { |
1864 | dsm_mask = 0xffffffff; |
1865 | } else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) { |
1866 | dsm_mask = 0x1f; |
1867 | } else { |
1868 | dev_dbg(dev, "unknown dimm command family\n" ); |
1869 | nfit_mem->family = -1; |
1870 | /* DSMs are optional, continue loading the driver... */ |
1871 | return 0; |
1872 | } |
1873 | |
1874 | /* |
1875 | * Function 0 is the command interrogation function, don't |
1876 | * export it to potential userspace use, and enable it to be |
1877 | * used as an error value in acpi_nfit_ctl(). |
1878 | */ |
1879 | dsm_mask &= ~1UL; |
1880 | |
1881 | guid = to_nfit_uuid(nfit_mem->family); |
1882 | for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) |
1883 | if (acpi_check_dsm(handle: adev_dimm->handle, guid, |
1884 | rev: nfit_dsm_revid(family: nfit_mem->family, func: i), |
1885 | funcs: 1ULL << i)) |
1886 | set_bit(nr: i, addr: &nfit_mem->dsm_mask); |
1887 | |
1888 | /* |
1889 | * Prefer the NVDIMM_FAMILY_INTEL label read commands if present |
1890 | * due to their better semantics handling locked capacity. |
1891 | */ |
1892 | label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA |
1893 | | 1 << ND_CMD_SET_CONFIG_DATA; |
1894 | if (family == NVDIMM_FAMILY_INTEL |
1895 | && (dsm_mask & label_mask) == label_mask) |
1896 | /* skip _LS{I,R,W} enabling */; |
1897 | else { |
1898 | if (acpi_nvdimm_has_method(adev: adev_dimm, method: "_LSI" ) |
1899 | && acpi_nvdimm_has_method(adev: adev_dimm, method: "_LSR" )) { |
1900 | dev_dbg(dev, "%s: has _LSR\n" , dev_name(&adev_dimm->dev)); |
1901 | set_bit(nr: NFIT_MEM_LSR, addr: &nfit_mem->flags); |
1902 | } |
1903 | |
1904 | if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) |
1905 | && acpi_nvdimm_has_method(adev: adev_dimm, method: "_LSW" )) { |
1906 | dev_dbg(dev, "%s: has _LSW\n" , dev_name(&adev_dimm->dev)); |
1907 | set_bit(nr: NFIT_MEM_LSW, addr: &nfit_mem->flags); |
1908 | } |
1909 | |
1910 | /* |
1911 | * Quirk read-only label configurations to preserve |
1912 | * access to label-less namespaces by default. |
1913 | */ |
1914 | if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags) |
1915 | && !force_labels) { |
1916 | dev_dbg(dev, "%s: No _LSW, disable labels\n" , |
1917 | dev_name(&adev_dimm->dev)); |
1918 | clear_bit(nr: NFIT_MEM_LSR, addr: &nfit_mem->flags); |
1919 | } else |
1920 | dev_dbg(dev, "%s: Force enable labels\n" , |
1921 | dev_name(&adev_dimm->dev)); |
1922 | } |
1923 | |
1924 | populate_shutdown_status(nfit_mem); |
1925 | |
1926 | return 0; |
1927 | } |
1928 | |
1929 | static void shutdown_dimm_notify(void *data) |
1930 | { |
1931 | struct acpi_nfit_desc *acpi_desc = data; |
1932 | struct nfit_mem *nfit_mem; |
1933 | |
1934 | mutex_lock(&acpi_desc->init_mutex); |
1935 | /* |
1936 | * Clear out the nfit_mem->flags_attr and shut down dimm event |
1937 | * notifications. |
1938 | */ |
1939 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { |
1940 | struct acpi_device *adev_dimm = nfit_mem->adev; |
1941 | |
1942 | if (nfit_mem->flags_attr) { |
1943 | sysfs_put(kn: nfit_mem->flags_attr); |
1944 | nfit_mem->flags_attr = NULL; |
1945 | } |
1946 | if (adev_dimm) { |
1947 | acpi_remove_notify_handler(device: adev_dimm->handle, |
1948 | ACPI_DEVICE_NOTIFY, handler: acpi_nvdimm_notify); |
1949 | dev_set_drvdata(dev: &adev_dimm->dev, NULL); |
1950 | } |
1951 | } |
1952 | mutex_unlock(lock: &acpi_desc->init_mutex); |
1953 | } |
1954 | |
1955 | static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family) |
1956 | { |
1957 | switch (family) { |
1958 | case NVDIMM_FAMILY_INTEL: |
1959 | return intel_security_ops; |
1960 | default: |
1961 | return NULL; |
1962 | } |
1963 | } |
1964 | |
1965 | static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops( |
1966 | struct nfit_mem *nfit_mem) |
1967 | { |
1968 | unsigned long mask; |
1969 | struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; |
1970 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
1971 | |
1972 | if (!nd_desc->fw_ops) |
1973 | return NULL; |
1974 | |
1975 | if (nfit_mem->family != NVDIMM_FAMILY_INTEL) |
1976 | return NULL; |
1977 | |
1978 | mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK; |
1979 | if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK) |
1980 | return NULL; |
1981 | |
1982 | return intel_fw_ops; |
1983 | } |
1984 | |
1985 | static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) |
1986 | { |
1987 | struct nfit_mem *nfit_mem; |
1988 | int dimm_count = 0, rc; |
1989 | struct nvdimm *nvdimm; |
1990 | |
1991 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { |
1992 | struct acpi_nfit_flush_address *flush; |
1993 | unsigned long flags = 0, cmd_mask; |
1994 | struct nfit_memdev *nfit_memdev; |
1995 | u32 device_handle; |
1996 | u16 mem_flags; |
1997 | |
1998 | device_handle = __to_nfit_memdev(nfit_mem)->device_handle; |
1999 | nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); |
2000 | if (nvdimm) { |
2001 | dimm_count++; |
2002 | continue; |
2003 | } |
2004 | |
2005 | /* collate flags across all memdevs for this dimm */ |
2006 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { |
2007 | struct acpi_nfit_memory_map *dimm_memdev; |
2008 | |
2009 | dimm_memdev = __to_nfit_memdev(nfit_mem); |
2010 | if (dimm_memdev->device_handle |
2011 | != nfit_memdev->memdev->device_handle) |
2012 | continue; |
2013 | dimm_memdev->flags |= nfit_memdev->memdev->flags; |
2014 | } |
2015 | |
2016 | mem_flags = __to_nfit_memdev(nfit_mem)->flags; |
2017 | if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) |
2018 | set_bit(nr: NDD_UNARMED, addr: &flags); |
2019 | |
2020 | rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); |
2021 | if (rc) |
2022 | continue; |
2023 | |
2024 | /* |
2025 | * TODO: provide translation for non-NVDIMM_FAMILY_INTEL |
2026 | * devices (i.e. from nd_cmd to acpi_dsm) to standardize the |
2027 | * userspace interface. |
2028 | */ |
2029 | cmd_mask = 1UL << ND_CMD_CALL; |
2030 | if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { |
2031 | /* |
2032 | * These commands have a 1:1 correspondence |
2033 | * between DSM payload and libnvdimm ioctl |
2034 | * payload format. |
2035 | */ |
2036 | cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; |
2037 | } |
2038 | |
2039 | if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { |
2040 | set_bit(nr: ND_CMD_GET_CONFIG_SIZE, addr: &cmd_mask); |
2041 | set_bit(nr: ND_CMD_GET_CONFIG_DATA, addr: &cmd_mask); |
2042 | } |
2043 | if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) |
2044 | set_bit(nr: ND_CMD_SET_CONFIG_DATA, addr: &cmd_mask); |
2045 | |
2046 | flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush |
2047 | : NULL; |
2048 | nvdimm = __nvdimm_create(nvdimm_bus: acpi_desc->nvdimm_bus, provider_data: nfit_mem, |
2049 | groups: acpi_nfit_dimm_attribute_groups, |
2050 | flags, cmd_mask, num_flush: flush ? flush->hint_count : 0, |
2051 | flush_wpq: nfit_mem->flush_wpq, dimm_id: &nfit_mem->id[0], |
2052 | sec_ops: acpi_nfit_get_security_ops(family: nfit_mem->family), |
2053 | fw_ops: acpi_nfit_get_fw_ops(nfit_mem)); |
2054 | if (!nvdimm) |
2055 | return -ENOMEM; |
2056 | |
2057 | nfit_mem->nvdimm = nvdimm; |
2058 | dimm_count++; |
2059 | |
2060 | if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) |
2061 | continue; |
2062 | |
2063 | dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n" , |
2064 | nvdimm_name(nvdimm), |
2065 | mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "" , |
2066 | mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail" :"" , |
2067 | mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "" , |
2068 | mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "" , |
2069 | mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "" ); |
2070 | |
2071 | } |
2072 | |
2073 | rc = nvdimm_bus_check_dimm_count(nvdimm_bus: acpi_desc->nvdimm_bus, dimm_count); |
2074 | if (rc) |
2075 | return rc; |
2076 | |
2077 | /* |
2078 | * Now that dimms are successfully registered, and async registration |
2079 | * is flushed, attempt to enable event notification. |
2080 | */ |
2081 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { |
2082 | struct kernfs_node *nfit_kernfs; |
2083 | |
2084 | nvdimm = nfit_mem->nvdimm; |
2085 | if (!nvdimm) |
2086 | continue; |
2087 | |
2088 | nfit_kernfs = sysfs_get_dirent(parent: nvdimm_kobj(nvdimm)->sd, name: "nfit" ); |
2089 | if (nfit_kernfs) |
2090 | nfit_mem->flags_attr = sysfs_get_dirent(parent: nfit_kernfs, |
2091 | name: "flags" ); |
2092 | sysfs_put(kn: nfit_kernfs); |
2093 | if (!nfit_mem->flags_attr) |
2094 | dev_warn(acpi_desc->dev, "%s: notifications disabled\n" , |
2095 | nvdimm_name(nvdimm)); |
2096 | } |
2097 | |
2098 | return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify, |
2099 | acpi_desc); |
2100 | } |
2101 | |
2102 | /* |
2103 | * These constants are private because there are no kernel consumers of |
2104 | * these commands. |
2105 | */ |
2106 | enum nfit_aux_cmds { |
2107 | NFIT_CMD_TRANSLATE_SPA = 5, |
2108 | NFIT_CMD_ARS_INJECT_SET = 7, |
2109 | NFIT_CMD_ARS_INJECT_CLEAR = 8, |
2110 | NFIT_CMD_ARS_INJECT_GET = 9, |
2111 | }; |
2112 | |
2113 | static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) |
2114 | { |
2115 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
2116 | const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS); |
2117 | unsigned long dsm_mask, *mask; |
2118 | struct acpi_device *adev; |
2119 | int i; |
2120 | |
2121 | set_bit(nr: ND_CMD_CALL, addr: &nd_desc->cmd_mask); |
2122 | set_bit(NVDIMM_BUS_FAMILY_NFIT, addr: &nd_desc->bus_family_mask); |
2123 | |
2124 | /* enable nfit_test to inject bus command emulation */ |
2125 | if (acpi_desc->bus_cmd_force_en) { |
2126 | nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; |
2127 | mask = &nd_desc->bus_family_mask; |
2128 | if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) { |
2129 | set_bit(NVDIMM_BUS_FAMILY_INTEL, addr: mask); |
2130 | nd_desc->fw_ops = intel_bus_fw_ops; |
2131 | } |
2132 | } |
2133 | |
2134 | adev = to_acpi_dev(acpi_desc); |
2135 | if (!adev) |
2136 | return; |
2137 | |
2138 | for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) |
2139 | if (acpi_check_dsm(handle: adev->handle, guid, rev: 1, funcs: 1ULL << i)) |
2140 | set_bit(nr: i, addr: &nd_desc->cmd_mask); |
2141 | |
2142 | dsm_mask = |
2143 | (1 << ND_CMD_ARS_CAP) | |
2144 | (1 << ND_CMD_ARS_START) | |
2145 | (1 << ND_CMD_ARS_STATUS) | |
2146 | (1 << ND_CMD_CLEAR_ERROR) | |
2147 | (1 << NFIT_CMD_TRANSLATE_SPA) | |
2148 | (1 << NFIT_CMD_ARS_INJECT_SET) | |
2149 | (1 << NFIT_CMD_ARS_INJECT_CLEAR) | |
2150 | (1 << NFIT_CMD_ARS_INJECT_GET); |
2151 | for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) |
2152 | if (acpi_check_dsm(handle: adev->handle, guid, rev: 1, funcs: 1ULL << i)) |
2153 | set_bit(nr: i, addr: &acpi_desc->bus_dsm_mask); |
2154 | |
2155 | /* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */ |
2156 | dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK; |
2157 | guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL); |
2158 | mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]; |
2159 | for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) |
2160 | if (acpi_check_dsm(handle: adev->handle, guid, rev: 1, funcs: 1ULL << i)) |
2161 | set_bit(nr: i, addr: mask); |
2162 | |
2163 | if (*mask == dsm_mask) { |
2164 | set_bit(NVDIMM_BUS_FAMILY_INTEL, addr: &nd_desc->bus_family_mask); |
2165 | nd_desc->fw_ops = intel_bus_fw_ops; |
2166 | } |
2167 | } |
2168 | |
2169 | static ssize_t range_index_show(struct device *dev, |
2170 | struct device_attribute *attr, char *buf) |
2171 | { |
2172 | struct nd_region *nd_region = to_nd_region(dev); |
2173 | struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); |
2174 | |
2175 | return sprintf(buf, fmt: "%d\n" , nfit_spa->spa->range_index); |
2176 | } |
2177 | static DEVICE_ATTR_RO(range_index); |
2178 | |
2179 | static struct attribute *acpi_nfit_region_attributes[] = { |
2180 | &dev_attr_range_index.attr, |
2181 | NULL, |
2182 | }; |
2183 | |
2184 | static const struct attribute_group acpi_nfit_region_attribute_group = { |
2185 | .name = "nfit" , |
2186 | .attrs = acpi_nfit_region_attributes, |
2187 | }; |
2188 | |
2189 | static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { |
2190 | &acpi_nfit_region_attribute_group, |
2191 | NULL, |
2192 | }; |
2193 | |
2194 | /* enough info to uniquely specify an interleave set */ |
2195 | struct nfit_set_info { |
2196 | u64 region_offset; |
2197 | u32 serial_number; |
2198 | u32 pad; |
2199 | }; |
2200 | |
2201 | struct nfit_set_info2 { |
2202 | u64 region_offset; |
2203 | u32 serial_number; |
2204 | u16 vendor_id; |
2205 | u16 manufacturing_date; |
2206 | u8 manufacturing_location; |
2207 | u8 reserved[31]; |
2208 | }; |
2209 | |
2210 | static int cmp_map_compat(const void *m0, const void *m1) |
2211 | { |
2212 | const struct nfit_set_info *map0 = m0; |
2213 | const struct nfit_set_info *map1 = m1; |
2214 | |
2215 | return memcmp(p: &map0->region_offset, q: &map1->region_offset, |
2216 | size: sizeof(u64)); |
2217 | } |
2218 | |
2219 | static int cmp_map(const void *m0, const void *m1) |
2220 | { |
2221 | const struct nfit_set_info *map0 = m0; |
2222 | const struct nfit_set_info *map1 = m1; |
2223 | |
2224 | if (map0->region_offset < map1->region_offset) |
2225 | return -1; |
2226 | else if (map0->region_offset > map1->region_offset) |
2227 | return 1; |
2228 | return 0; |
2229 | } |
2230 | |
2231 | static int cmp_map2(const void *m0, const void *m1) |
2232 | { |
2233 | const struct nfit_set_info2 *map0 = m0; |
2234 | const struct nfit_set_info2 *map1 = m1; |
2235 | |
2236 | if (map0->region_offset < map1->region_offset) |
2237 | return -1; |
2238 | else if (map0->region_offset > map1->region_offset) |
2239 | return 1; |
2240 | return 0; |
2241 | } |
2242 | |
2243 | /* Retrieve the nth entry referencing this spa */ |
2244 | static struct acpi_nfit_memory_map *memdev_from_spa( |
2245 | struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) |
2246 | { |
2247 | struct nfit_memdev *nfit_memdev; |
2248 | |
2249 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) |
2250 | if (nfit_memdev->memdev->range_index == range_index) |
2251 | if (n-- == 0) |
2252 | return nfit_memdev->memdev; |
2253 | return NULL; |
2254 | } |
2255 | |
2256 | static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, |
2257 | struct nd_region_desc *ndr_desc, |
2258 | struct acpi_nfit_system_address *spa) |
2259 | { |
2260 | struct device *dev = acpi_desc->dev; |
2261 | struct nd_interleave_set *nd_set; |
2262 | u16 nr = ndr_desc->num_mappings; |
2263 | struct nfit_set_info2 *info2; |
2264 | struct nfit_set_info *info; |
2265 | int i; |
2266 | |
2267 | nd_set = devm_kzalloc(dev, size: sizeof(*nd_set), GFP_KERNEL); |
2268 | if (!nd_set) |
2269 | return -ENOMEM; |
2270 | import_guid(dst: &nd_set->type_guid, src: spa->range_guid); |
2271 | |
2272 | info = devm_kcalloc(dev, n: nr, size: sizeof(*info), GFP_KERNEL); |
2273 | if (!info) |
2274 | return -ENOMEM; |
2275 | |
2276 | info2 = devm_kcalloc(dev, n: nr, size: sizeof(*info2), GFP_KERNEL); |
2277 | if (!info2) |
2278 | return -ENOMEM; |
2279 | |
2280 | for (i = 0; i < nr; i++) { |
2281 | struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; |
2282 | struct nvdimm *nvdimm = mapping->nvdimm; |
2283 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
2284 | struct nfit_set_info *map = &info[i]; |
2285 | struct nfit_set_info2 *map2 = &info2[i]; |
2286 | struct acpi_nfit_memory_map *memdev = |
2287 | memdev_from_spa(acpi_desc, range_index: spa->range_index, n: i); |
2288 | struct acpi_nfit_control_region *dcr = nfit_mem->dcr; |
2289 | |
2290 | if (!memdev || !nfit_mem->dcr) { |
2291 | dev_err(dev, "%s: failed to find DCR\n" , __func__); |
2292 | return -ENODEV; |
2293 | } |
2294 | |
2295 | map->region_offset = memdev->region_offset; |
2296 | map->serial_number = dcr->serial_number; |
2297 | |
2298 | map2->region_offset = memdev->region_offset; |
2299 | map2->serial_number = dcr->serial_number; |
2300 | map2->vendor_id = dcr->vendor_id; |
2301 | map2->manufacturing_date = dcr->manufacturing_date; |
2302 | map2->manufacturing_location = dcr->manufacturing_location; |
2303 | } |
2304 | |
2305 | /* v1.1 namespaces */ |
2306 | sort(base: info, num: nr, size: sizeof(*info), cmp_func: cmp_map, NULL); |
2307 | nd_set->cookie1 = nd_fletcher64(addr: info, len: sizeof(*info) * nr, le: 0); |
2308 | |
2309 | /* v1.2 namespaces */ |
2310 | sort(base: info2, num: nr, size: sizeof(*info2), cmp_func: cmp_map2, NULL); |
2311 | nd_set->cookie2 = nd_fletcher64(addr: info2, len: sizeof(*info2) * nr, le: 0); |
2312 | |
2313 | /* support v1.1 namespaces created with the wrong sort order */ |
2314 | sort(base: info, num: nr, size: sizeof(*info), cmp_func: cmp_map_compat, NULL); |
2315 | nd_set->altcookie = nd_fletcher64(addr: info, len: sizeof(*info) * nr, le: 0); |
2316 | |
2317 | /* record the result of the sort for the mapping position */ |
2318 | for (i = 0; i < nr; i++) { |
2319 | struct nfit_set_info2 *map2 = &info2[i]; |
2320 | int j; |
2321 | |
2322 | for (j = 0; j < nr; j++) { |
2323 | struct nd_mapping_desc *mapping = &ndr_desc->mapping[j]; |
2324 | struct nvdimm *nvdimm = mapping->nvdimm; |
2325 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
2326 | struct acpi_nfit_control_region *dcr = nfit_mem->dcr; |
2327 | |
2328 | if (map2->serial_number == dcr->serial_number && |
2329 | map2->vendor_id == dcr->vendor_id && |
2330 | map2->manufacturing_date == dcr->manufacturing_date && |
2331 | map2->manufacturing_location |
2332 | == dcr->manufacturing_location) { |
2333 | mapping->position = i; |
2334 | break; |
2335 | } |
2336 | } |
2337 | } |
2338 | |
2339 | ndr_desc->nd_set = nd_set; |
2340 | devm_kfree(dev, p: info); |
2341 | devm_kfree(dev, p: info2); |
2342 | |
2343 | return 0; |
2344 | } |
2345 | |
2346 | static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, |
2347 | struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) |
2348 | { |
2349 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
2350 | struct acpi_nfit_system_address *spa = nfit_spa->spa; |
2351 | int cmd_rc, rc; |
2352 | |
2353 | cmd->address = spa->address; |
2354 | cmd->length = spa->length; |
2355 | rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, |
2356 | sizeof(*cmd), &cmd_rc); |
2357 | if (rc < 0) |
2358 | return rc; |
2359 | return cmd_rc; |
2360 | } |
2361 | |
2362 | static int ars_start(struct acpi_nfit_desc *acpi_desc, |
2363 | struct nfit_spa *nfit_spa, enum nfit_ars_state req_type) |
2364 | { |
2365 | int rc; |
2366 | int cmd_rc; |
2367 | struct nd_cmd_ars_start ars_start; |
2368 | struct acpi_nfit_system_address *spa = nfit_spa->spa; |
2369 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
2370 | |
2371 | memset(&ars_start, 0, sizeof(ars_start)); |
2372 | ars_start.address = spa->address; |
2373 | ars_start.length = spa->length; |
2374 | if (req_type == ARS_REQ_SHORT) |
2375 | ars_start.flags = ND_ARS_RETURN_PREV_DATA; |
2376 | if (nfit_spa_type(spa) == NFIT_SPA_PM) |
2377 | ars_start.type = ND_ARS_PERSISTENT; |
2378 | else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) |
2379 | ars_start.type = ND_ARS_VOLATILE; |
2380 | else |
2381 | return -ENOTTY; |
2382 | |
2383 | rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, |
2384 | sizeof(ars_start), &cmd_rc); |
2385 | |
2386 | if (rc < 0) |
2387 | return rc; |
2388 | if (cmd_rc < 0) |
2389 | return cmd_rc; |
2390 | set_bit(nr: ARS_VALID, addr: &acpi_desc->scrub_flags); |
2391 | return 0; |
2392 | } |
2393 | |
2394 | static int ars_continue(struct acpi_nfit_desc *acpi_desc) |
2395 | { |
2396 | int rc, cmd_rc; |
2397 | struct nd_cmd_ars_start ars_start; |
2398 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
2399 | struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; |
2400 | |
2401 | ars_start = (struct nd_cmd_ars_start) { |
2402 | .address = ars_status->restart_address, |
2403 | .length = ars_status->restart_length, |
2404 | .type = ars_status->type, |
2405 | }; |
2406 | rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, |
2407 | sizeof(ars_start), &cmd_rc); |
2408 | if (rc < 0) |
2409 | return rc; |
2410 | return cmd_rc; |
2411 | } |
2412 | |
2413 | static int ars_get_status(struct acpi_nfit_desc *acpi_desc) |
2414 | { |
2415 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
2416 | struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; |
2417 | int rc, cmd_rc; |
2418 | |
2419 | rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, |
2420 | acpi_desc->max_ars, &cmd_rc); |
2421 | if (rc < 0) |
2422 | return rc; |
2423 | return cmd_rc; |
2424 | } |
2425 | |
2426 | static void ars_complete(struct acpi_nfit_desc *acpi_desc, |
2427 | struct nfit_spa *nfit_spa) |
2428 | { |
2429 | struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; |
2430 | struct acpi_nfit_system_address *spa = nfit_spa->spa; |
2431 | struct nd_region *nd_region = nfit_spa->nd_region; |
2432 | struct device *dev; |
2433 | |
2434 | lockdep_assert_held(&acpi_desc->init_mutex); |
2435 | /* |
2436 | * Only advance the ARS state for ARS runs initiated by the |
2437 | * kernel, ignore ARS results from BIOS initiated runs for scrub |
2438 | * completion tracking. |
2439 | */ |
2440 | if (acpi_desc->scrub_spa != nfit_spa) |
2441 | return; |
2442 | |
2443 | if ((ars_status->address >= spa->address && ars_status->address |
2444 | < spa->address + spa->length) |
2445 | || (ars_status->address < spa->address)) { |
2446 | /* |
2447 | * Assume that if a scrub starts at an offset from the |
2448 | * start of nfit_spa that we are in the continuation |
2449 | * case. |
2450 | * |
2451 | * Otherwise, if the scrub covers the spa range, mark |
2452 | * any pending request complete. |
2453 | */ |
2454 | if (ars_status->address + ars_status->length |
2455 | >= spa->address + spa->length) |
2456 | /* complete */; |
2457 | else |
2458 | return; |
2459 | } else |
2460 | return; |
2461 | |
2462 | acpi_desc->scrub_spa = NULL; |
2463 | if (nd_region) { |
2464 | dev = nd_region_dev(nd_region); |
2465 | nvdimm_region_notify(nd_region, event: NVDIMM_REVALIDATE_POISON); |
2466 | } else |
2467 | dev = acpi_desc->dev; |
2468 | dev_dbg(dev, "ARS: range %d complete\n" , spa->range_index); |
2469 | } |
2470 | |
2471 | static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) |
2472 | { |
2473 | struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus; |
2474 | struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; |
2475 | int rc; |
2476 | u32 i; |
2477 | |
2478 | /* |
2479 | * First record starts at 44 byte offset from the start of the |
2480 | * payload. |
2481 | */ |
2482 | if (ars_status->out_length < 44) |
2483 | return 0; |
2484 | |
2485 | /* |
2486 | * Ignore potentially stale results that are only refreshed |
2487 | * after a start-ARS event. |
2488 | */ |
2489 | if (!test_and_clear_bit(nr: ARS_VALID, addr: &acpi_desc->scrub_flags)) { |
2490 | dev_dbg(acpi_desc->dev, "skip %d stale records\n" , |
2491 | ars_status->num_records); |
2492 | return 0; |
2493 | } |
2494 | |
2495 | for (i = 0; i < ars_status->num_records; i++) { |
2496 | /* only process full records */ |
2497 | if (ars_status->out_length |
2498 | < 44 + sizeof(struct nd_ars_record) * (i + 1)) |
2499 | break; |
2500 | rc = nvdimm_bus_add_badrange(nvdimm_bus, |
2501 | addr: ars_status->records[i].err_address, |
2502 | length: ars_status->records[i].length); |
2503 | if (rc) |
2504 | return rc; |
2505 | } |
2506 | if (i < ars_status->num_records) |
2507 | dev_warn(acpi_desc->dev, "detected truncated ars results\n" ); |
2508 | |
2509 | return 0; |
2510 | } |
2511 | |
2512 | static void acpi_nfit_remove_resource(void *data) |
2513 | { |
2514 | struct resource *res = data; |
2515 | |
2516 | remove_resource(old: res); |
2517 | } |
2518 | |
2519 | static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, |
2520 | struct nd_region_desc *ndr_desc) |
2521 | { |
2522 | struct resource *res, *nd_res = ndr_desc->res; |
2523 | int is_pmem, ret; |
2524 | |
2525 | /* No operation if the region is already registered as PMEM */ |
2526 | is_pmem = region_intersects(offset: nd_res->start, size: resource_size(res: nd_res), |
2527 | IORESOURCE_MEM, desc: IORES_DESC_PERSISTENT_MEMORY); |
2528 | if (is_pmem == REGION_INTERSECTS) |
2529 | return 0; |
2530 | |
2531 | res = devm_kzalloc(dev: acpi_desc->dev, size: sizeof(*res), GFP_KERNEL); |
2532 | if (!res) |
2533 | return -ENOMEM; |
2534 | |
2535 | res->name = "Persistent Memory" ; |
2536 | res->start = nd_res->start; |
2537 | res->end = nd_res->end; |
2538 | res->flags = IORESOURCE_MEM; |
2539 | res->desc = IORES_DESC_PERSISTENT_MEMORY; |
2540 | |
2541 | ret = insert_resource(parent: &iomem_resource, new: res); |
2542 | if (ret) |
2543 | return ret; |
2544 | |
2545 | ret = devm_add_action_or_reset(acpi_desc->dev, |
2546 | acpi_nfit_remove_resource, |
2547 | res); |
2548 | if (ret) |
2549 | return ret; |
2550 | |
2551 | return 0; |
2552 | } |
2553 | |
2554 | static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, |
2555 | struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc, |
2556 | struct acpi_nfit_memory_map *memdev, |
2557 | struct nfit_spa *nfit_spa) |
2558 | { |
2559 | struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, |
2560 | device_handle: memdev->device_handle); |
2561 | struct acpi_nfit_system_address *spa = nfit_spa->spa; |
2562 | |
2563 | if (!nvdimm) { |
2564 | dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n" , |
2565 | spa->range_index, memdev->device_handle); |
2566 | return -ENODEV; |
2567 | } |
2568 | |
2569 | mapping->nvdimm = nvdimm; |
2570 | switch (nfit_spa_type(spa)) { |
2571 | case NFIT_SPA_PM: |
2572 | case NFIT_SPA_VOLATILE: |
2573 | mapping->start = memdev->address; |
2574 | mapping->size = memdev->region_size; |
2575 | break; |
2576 | } |
2577 | |
2578 | return 0; |
2579 | } |
2580 | |
2581 | static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) |
2582 | { |
2583 | return (nfit_spa_type(spa) == NFIT_SPA_VDISK || |
2584 | nfit_spa_type(spa) == NFIT_SPA_VCD || |
2585 | nfit_spa_type(spa) == NFIT_SPA_PDISK || |
2586 | nfit_spa_type(spa) == NFIT_SPA_PCD); |
2587 | } |
2588 | |
2589 | static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa) |
2590 | { |
2591 | return (nfit_spa_type(spa) == NFIT_SPA_VDISK || |
2592 | nfit_spa_type(spa) == NFIT_SPA_VCD || |
2593 | nfit_spa_type(spa) == NFIT_SPA_VOLATILE); |
2594 | } |
2595 | |
2596 | static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, |
2597 | struct nfit_spa *nfit_spa) |
2598 | { |
2599 | static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; |
2600 | struct acpi_nfit_system_address *spa = nfit_spa->spa; |
2601 | struct nd_region_desc *ndr_desc, _ndr_desc; |
2602 | struct nfit_memdev *nfit_memdev; |
2603 | struct nvdimm_bus *nvdimm_bus; |
2604 | struct resource res; |
2605 | int count = 0, rc; |
2606 | |
2607 | if (nfit_spa->nd_region) |
2608 | return 0; |
2609 | |
2610 | if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { |
2611 | dev_dbg(acpi_desc->dev, "detected invalid spa index\n" ); |
2612 | return 0; |
2613 | } |
2614 | |
2615 | memset(&res, 0, sizeof(res)); |
2616 | memset(&mappings, 0, sizeof(mappings)); |
2617 | memset(&_ndr_desc, 0, sizeof(_ndr_desc)); |
2618 | res.start = spa->address; |
2619 | res.end = res.start + spa->length - 1; |
2620 | ndr_desc = &_ndr_desc; |
2621 | ndr_desc->res = &res; |
2622 | ndr_desc->provider_data = nfit_spa; |
2623 | ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; |
2624 | if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) { |
2625 | ndr_desc->numa_node = pxm_to_online_node(pxm: spa->proximity_domain); |
2626 | ndr_desc->target_node = pxm_to_node(spa->proximity_domain); |
2627 | } else { |
2628 | ndr_desc->numa_node = NUMA_NO_NODE; |
2629 | ndr_desc->target_node = NUMA_NO_NODE; |
2630 | } |
2631 | |
2632 | /* Fallback to address based numa information if node lookup failed */ |
2633 | if (ndr_desc->numa_node == NUMA_NO_NODE) { |
2634 | ndr_desc->numa_node = memory_add_physaddr_to_nid(start: spa->address); |
2635 | dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]" , |
2636 | NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end); |
2637 | } |
2638 | if (ndr_desc->target_node == NUMA_NO_NODE) { |
2639 | ndr_desc->target_node = phys_to_target_node(start: spa->address); |
2640 | dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]" , |
2641 | NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end); |
2642 | } |
2643 | |
2644 | /* |
2645 | * Persistence domain bits are hierarchical, if |
2646 | * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then |
2647 | * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied. |
2648 | */ |
2649 | if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) |
2650 | set_bit(nr: ND_REGION_PERSIST_CACHE, addr: &ndr_desc->flags); |
2651 | else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH) |
2652 | set_bit(nr: ND_REGION_PERSIST_MEMCTRL, addr: &ndr_desc->flags); |
2653 | |
2654 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { |
2655 | struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; |
2656 | struct nd_mapping_desc *mapping; |
2657 | |
2658 | /* range index 0 == unmapped in SPA or invalid-SPA */ |
2659 | if (memdev->range_index == 0 || spa->range_index == 0) |
2660 | continue; |
2661 | if (memdev->range_index != spa->range_index) |
2662 | continue; |
2663 | if (count >= ND_MAX_MAPPINGS) { |
2664 | dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n" , |
2665 | spa->range_index, ND_MAX_MAPPINGS); |
2666 | return -ENXIO; |
2667 | } |
2668 | mapping = &mappings[count++]; |
2669 | rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc, |
2670 | memdev, nfit_spa); |
2671 | if (rc) |
2672 | goto out; |
2673 | } |
2674 | |
2675 | ndr_desc->mapping = mappings; |
2676 | ndr_desc->num_mappings = count; |
2677 | rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); |
2678 | if (rc) |
2679 | goto out; |
2680 | |
2681 | nvdimm_bus = acpi_desc->nvdimm_bus; |
2682 | if (nfit_spa_type(spa) == NFIT_SPA_PM) { |
2683 | rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); |
2684 | if (rc) { |
2685 | dev_warn(acpi_desc->dev, |
2686 | "failed to insert pmem resource to iomem: %d\n" , |
2687 | rc); |
2688 | goto out; |
2689 | } |
2690 | |
2691 | nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, |
2692 | ndr_desc); |
2693 | if (!nfit_spa->nd_region) |
2694 | rc = -ENOMEM; |
2695 | } else if (nfit_spa_is_volatile(spa)) { |
2696 | nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, |
2697 | ndr_desc); |
2698 | if (!nfit_spa->nd_region) |
2699 | rc = -ENOMEM; |
2700 | } else if (nfit_spa_is_virtual(spa)) { |
2701 | nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, |
2702 | ndr_desc); |
2703 | if (!nfit_spa->nd_region) |
2704 | rc = -ENOMEM; |
2705 | } |
2706 | |
2707 | out: |
2708 | if (rc) |
2709 | dev_err(acpi_desc->dev, "failed to register spa range %d\n" , |
2710 | nfit_spa->spa->range_index); |
2711 | return rc; |
2712 | } |
2713 | |
2714 | static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc) |
2715 | { |
2716 | struct device *dev = acpi_desc->dev; |
2717 | struct nd_cmd_ars_status *ars_status; |
2718 | |
2719 | if (acpi_desc->ars_status) { |
2720 | memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); |
2721 | return 0; |
2722 | } |
2723 | |
2724 | ars_status = devm_kzalloc(dev, size: acpi_desc->max_ars, GFP_KERNEL); |
2725 | if (!ars_status) |
2726 | return -ENOMEM; |
2727 | acpi_desc->ars_status = ars_status; |
2728 | return 0; |
2729 | } |
2730 | |
2731 | static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc) |
2732 | { |
2733 | int rc; |
2734 | |
2735 | if (ars_status_alloc(acpi_desc)) |
2736 | return -ENOMEM; |
2737 | |
2738 | rc = ars_get_status(acpi_desc); |
2739 | |
2740 | if (rc < 0 && rc != -ENOSPC) |
2741 | return rc; |
2742 | |
2743 | if (ars_status_process_records(acpi_desc)) |
2744 | dev_err(acpi_desc->dev, "Failed to process ARS records\n" ); |
2745 | |
2746 | return rc; |
2747 | } |
2748 | |
2749 | static int ars_register(struct acpi_nfit_desc *acpi_desc, |
2750 | struct nfit_spa *nfit_spa) |
2751 | { |
2752 | int rc; |
2753 | |
2754 | if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) |
2755 | return acpi_nfit_register_region(acpi_desc, nfit_spa); |
2756 | |
2757 | set_bit(nr: ARS_REQ_SHORT, addr: &nfit_spa->ars_state); |
2758 | if (!no_init_ars) |
2759 | set_bit(nr: ARS_REQ_LONG, addr: &nfit_spa->ars_state); |
2760 | |
2761 | switch (acpi_nfit_query_poison(acpi_desc)) { |
2762 | case 0: |
2763 | case -ENOSPC: |
2764 | case -EAGAIN: |
2765 | rc = ars_start(acpi_desc, nfit_spa, req_type: ARS_REQ_SHORT); |
2766 | /* shouldn't happen, try again later */ |
2767 | if (rc == -EBUSY) |
2768 | break; |
2769 | if (rc) { |
2770 | set_bit(nr: ARS_FAILED, addr: &nfit_spa->ars_state); |
2771 | break; |
2772 | } |
2773 | clear_bit(nr: ARS_REQ_SHORT, addr: &nfit_spa->ars_state); |
2774 | rc = acpi_nfit_query_poison(acpi_desc); |
2775 | if (rc) |
2776 | break; |
2777 | acpi_desc->scrub_spa = nfit_spa; |
2778 | ars_complete(acpi_desc, nfit_spa); |
2779 | /* |
2780 | * If ars_complete() says we didn't complete the |
2781 | * short scrub, we'll try again with a long |
2782 | * request. |
2783 | */ |
2784 | acpi_desc->scrub_spa = NULL; |
2785 | break; |
2786 | case -EBUSY: |
2787 | case -ENOMEM: |
2788 | /* |
2789 | * BIOS was using ARS, wait for it to complete (or |
2790 | * resources to become available) and then perform our |
2791 | * own scrubs. |
2792 | */ |
2793 | break; |
2794 | default: |
2795 | set_bit(nr: ARS_FAILED, addr: &nfit_spa->ars_state); |
2796 | break; |
2797 | } |
2798 | |
2799 | return acpi_nfit_register_region(acpi_desc, nfit_spa); |
2800 | } |
2801 | |
2802 | static void ars_complete_all(struct acpi_nfit_desc *acpi_desc) |
2803 | { |
2804 | struct nfit_spa *nfit_spa; |
2805 | |
2806 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { |
2807 | if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) |
2808 | continue; |
2809 | ars_complete(acpi_desc, nfit_spa); |
2810 | } |
2811 | } |
2812 | |
2813 | static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, |
2814 | int query_rc) |
2815 | { |
2816 | unsigned int tmo = acpi_desc->scrub_tmo; |
2817 | struct device *dev = acpi_desc->dev; |
2818 | struct nfit_spa *nfit_spa; |
2819 | |
2820 | lockdep_assert_held(&acpi_desc->init_mutex); |
2821 | |
2822 | if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) |
2823 | return 0; |
2824 | |
2825 | if (query_rc == -EBUSY) { |
2826 | dev_dbg(dev, "ARS: ARS busy\n" ); |
2827 | return min(30U * 60U, tmo * 2); |
2828 | } |
2829 | if (query_rc == -ENOSPC) { |
2830 | dev_dbg(dev, "ARS: ARS continue\n" ); |
2831 | ars_continue(acpi_desc); |
2832 | return 1; |
2833 | } |
2834 | if (query_rc && query_rc != -EAGAIN) { |
2835 | unsigned long long addr, end; |
2836 | |
2837 | addr = acpi_desc->ars_status->address; |
2838 | end = addr + acpi_desc->ars_status->length; |
2839 | dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n" , addr, end, |
2840 | query_rc); |
2841 | } |
2842 | |
2843 | ars_complete_all(acpi_desc); |
2844 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { |
2845 | enum nfit_ars_state req_type; |
2846 | int rc; |
2847 | |
2848 | if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) |
2849 | continue; |
2850 | |
2851 | /* prefer short ARS requests first */ |
2852 | if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)) |
2853 | req_type = ARS_REQ_SHORT; |
2854 | else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) |
2855 | req_type = ARS_REQ_LONG; |
2856 | else |
2857 | continue; |
2858 | rc = ars_start(acpi_desc, nfit_spa, req_type); |
2859 | |
2860 | dev = nd_region_dev(nd_region: nfit_spa->nd_region); |
2861 | dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n" , |
2862 | nfit_spa->spa->range_index, |
2863 | req_type == ARS_REQ_SHORT ? "short" : "long" , |
2864 | rc); |
2865 | /* |
2866 | * Hmm, we raced someone else starting ARS? Try again in |
2867 | * a bit. |
2868 | */ |
2869 | if (rc == -EBUSY) |
2870 | return 1; |
2871 | if (rc == 0) { |
2872 | dev_WARN_ONCE(dev, acpi_desc->scrub_spa, |
2873 | "scrub start while range %d active\n" , |
2874 | acpi_desc->scrub_spa->spa->range_index); |
2875 | clear_bit(nr: req_type, addr: &nfit_spa->ars_state); |
2876 | acpi_desc->scrub_spa = nfit_spa; |
2877 | /* |
2878 | * Consider this spa last for future scrub |
2879 | * requests |
2880 | */ |
2881 | list_move_tail(list: &nfit_spa->list, head: &acpi_desc->spas); |
2882 | return 1; |
2883 | } |
2884 | |
2885 | dev_err(dev, "ARS: range %d ARS failed (%d)\n" , |
2886 | nfit_spa->spa->range_index, rc); |
2887 | set_bit(nr: ARS_FAILED, addr: &nfit_spa->ars_state); |
2888 | } |
2889 | return 0; |
2890 | } |
2891 | |
2892 | static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo) |
2893 | { |
2894 | lockdep_assert_held(&acpi_desc->init_mutex); |
2895 | |
2896 | set_bit(nr: ARS_BUSY, addr: &acpi_desc->scrub_flags); |
2897 | /* note this should only be set from within the workqueue */ |
2898 | if (tmo) |
2899 | acpi_desc->scrub_tmo = tmo; |
2900 | queue_delayed_work(wq: nfit_wq, dwork: &acpi_desc->dwork, delay: tmo * HZ); |
2901 | } |
2902 | |
2903 | static void sched_ars(struct acpi_nfit_desc *acpi_desc) |
2904 | { |
2905 | __sched_ars(acpi_desc, tmo: 0); |
2906 | } |
2907 | |
2908 | static void notify_ars_done(struct acpi_nfit_desc *acpi_desc) |
2909 | { |
2910 | lockdep_assert_held(&acpi_desc->init_mutex); |
2911 | |
2912 | clear_bit(nr: ARS_BUSY, addr: &acpi_desc->scrub_flags); |
2913 | acpi_desc->scrub_count++; |
2914 | if (acpi_desc->scrub_count_state) |
2915 | sysfs_notify_dirent(kn: acpi_desc->scrub_count_state); |
2916 | } |
2917 | |
2918 | static void acpi_nfit_scrub(struct work_struct *work) |
2919 | { |
2920 | struct acpi_nfit_desc *acpi_desc; |
2921 | unsigned int tmo; |
2922 | int query_rc; |
2923 | |
2924 | acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work); |
2925 | mutex_lock(&acpi_desc->init_mutex); |
2926 | query_rc = acpi_nfit_query_poison(acpi_desc); |
2927 | tmo = __acpi_nfit_scrub(acpi_desc, query_rc); |
2928 | if (tmo) |
2929 | __sched_ars(acpi_desc, tmo); |
2930 | else |
2931 | notify_ars_done(acpi_desc); |
2932 | memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); |
2933 | clear_bit(nr: ARS_POLL, addr: &acpi_desc->scrub_flags); |
2934 | mutex_unlock(lock: &acpi_desc->init_mutex); |
2935 | } |
2936 | |
2937 | static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, |
2938 | struct nfit_spa *nfit_spa) |
2939 | { |
2940 | int type = nfit_spa_type(spa: nfit_spa->spa); |
2941 | struct nd_cmd_ars_cap ars_cap; |
2942 | int rc; |
2943 | |
2944 | set_bit(nr: ARS_FAILED, addr: &nfit_spa->ars_state); |
2945 | memset(&ars_cap, 0, sizeof(ars_cap)); |
2946 | rc = ars_get_cap(acpi_desc, cmd: &ars_cap, nfit_spa); |
2947 | if (rc < 0) |
2948 | return; |
2949 | /* check that the supported scrub types match the spa type */ |
2950 | if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16) |
2951 | & ND_ARS_VOLATILE) == 0) |
2952 | return; |
2953 | if (type == NFIT_SPA_PM && ((ars_cap.status >> 16) |
2954 | & ND_ARS_PERSISTENT) == 0) |
2955 | return; |
2956 | |
2957 | nfit_spa->max_ars = ars_cap.max_ars_out; |
2958 | nfit_spa->clear_err_unit = ars_cap.clear_err_unit; |
2959 | acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars); |
2960 | clear_bit(nr: ARS_FAILED, addr: &nfit_spa->ars_state); |
2961 | } |
2962 | |
2963 | static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) |
2964 | { |
2965 | struct nfit_spa *nfit_spa; |
2966 | int rc, do_sched_ars = 0; |
2967 | |
2968 | set_bit(nr: ARS_VALID, addr: &acpi_desc->scrub_flags); |
2969 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { |
2970 | switch (nfit_spa_type(spa: nfit_spa->spa)) { |
2971 | case NFIT_SPA_VOLATILE: |
2972 | case NFIT_SPA_PM: |
2973 | acpi_nfit_init_ars(acpi_desc, nfit_spa); |
2974 | break; |
2975 | } |
2976 | } |
2977 | |
2978 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { |
2979 | switch (nfit_spa_type(spa: nfit_spa->spa)) { |
2980 | case NFIT_SPA_VOLATILE: |
2981 | case NFIT_SPA_PM: |
2982 | /* register regions and kick off initial ARS run */ |
2983 | rc = ars_register(acpi_desc, nfit_spa); |
2984 | if (rc) |
2985 | return rc; |
2986 | |
2987 | /* |
2988 | * Kick off background ARS if at least one |
2989 | * region successfully registered ARS |
2990 | */ |
2991 | if (!test_bit(ARS_FAILED, &nfit_spa->ars_state)) |
2992 | do_sched_ars++; |
2993 | break; |
2994 | case NFIT_SPA_BDW: |
2995 | /* nothing to register */ |
2996 | break; |
2997 | case NFIT_SPA_DCR: |
2998 | case NFIT_SPA_VDISK: |
2999 | case NFIT_SPA_VCD: |
3000 | case NFIT_SPA_PDISK: |
3001 | case NFIT_SPA_PCD: |
3002 | /* register known regions that don't support ARS */ |
3003 | rc = acpi_nfit_register_region(acpi_desc, nfit_spa); |
3004 | if (rc) |
3005 | return rc; |
3006 | break; |
3007 | default: |
3008 | /* don't register unknown regions */ |
3009 | break; |
3010 | } |
3011 | } |
3012 | |
3013 | if (do_sched_ars) |
3014 | sched_ars(acpi_desc); |
3015 | return 0; |
3016 | } |
3017 | |
3018 | static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, |
3019 | struct nfit_table_prev *prev) |
3020 | { |
3021 | struct device *dev = acpi_desc->dev; |
3022 | |
3023 | if (!list_empty(head: &prev->spas) || |
3024 | !list_empty(head: &prev->memdevs) || |
3025 | !list_empty(head: &prev->dcrs) || |
3026 | !list_empty(head: &prev->bdws) || |
3027 | !list_empty(head: &prev->idts) || |
3028 | !list_empty(head: &prev->flushes)) { |
3029 | dev_err(dev, "new nfit deletes entries (unsupported)\n" ); |
3030 | return -ENXIO; |
3031 | } |
3032 | return 0; |
3033 | } |
3034 | |
3035 | static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) |
3036 | { |
3037 | struct device *dev = acpi_desc->dev; |
3038 | struct kernfs_node *nfit; |
3039 | struct device *bus_dev; |
3040 | |
3041 | if (!ars_supported(nvdimm_bus: acpi_desc->nvdimm_bus)) |
3042 | return 0; |
3043 | |
3044 | bus_dev = to_nvdimm_bus_dev(nvdimm_bus: acpi_desc->nvdimm_bus); |
3045 | nfit = sysfs_get_dirent(parent: bus_dev->kobj.sd, name: "nfit" ); |
3046 | if (!nfit) { |
3047 | dev_err(dev, "sysfs_get_dirent 'nfit' failed\n" ); |
3048 | return -ENODEV; |
3049 | } |
3050 | acpi_desc->scrub_count_state = sysfs_get_dirent(parent: nfit, name: "scrub" ); |
3051 | sysfs_put(kn: nfit); |
3052 | if (!acpi_desc->scrub_count_state) { |
3053 | dev_err(dev, "sysfs_get_dirent 'scrub' failed\n" ); |
3054 | return -ENODEV; |
3055 | } |
3056 | |
3057 | return 0; |
3058 | } |
3059 | |
3060 | static void acpi_nfit_unregister(void *data) |
3061 | { |
3062 | struct acpi_nfit_desc *acpi_desc = data; |
3063 | |
3064 | nvdimm_bus_unregister(nvdimm_bus: acpi_desc->nvdimm_bus); |
3065 | } |
3066 | |
3067 | int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) |
3068 | { |
3069 | struct device *dev = acpi_desc->dev; |
3070 | struct nfit_table_prev prev; |
3071 | const void *end; |
3072 | int rc; |
3073 | |
3074 | if (!acpi_desc->nvdimm_bus) { |
3075 | acpi_nfit_init_dsms(acpi_desc); |
3076 | |
3077 | acpi_desc->nvdimm_bus = nvdimm_bus_register(parent: dev, |
3078 | nfit_desc: &acpi_desc->nd_desc); |
3079 | if (!acpi_desc->nvdimm_bus) |
3080 | return -ENOMEM; |
3081 | |
3082 | rc = devm_add_action_or_reset(dev, acpi_nfit_unregister, |
3083 | acpi_desc); |
3084 | if (rc) |
3085 | return rc; |
3086 | |
3087 | rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); |
3088 | if (rc) |
3089 | return rc; |
3090 | |
3091 | /* register this acpi_desc for mce notifications */ |
3092 | mutex_lock(&acpi_desc_lock); |
3093 | list_add_tail(new: &acpi_desc->list, head: &acpi_descs); |
3094 | mutex_unlock(lock: &acpi_desc_lock); |
3095 | } |
3096 | |
3097 | mutex_lock(&acpi_desc->init_mutex); |
3098 | |
3099 | INIT_LIST_HEAD(list: &prev.spas); |
3100 | INIT_LIST_HEAD(list: &prev.memdevs); |
3101 | INIT_LIST_HEAD(list: &prev.dcrs); |
3102 | INIT_LIST_HEAD(list: &prev.bdws); |
3103 | INIT_LIST_HEAD(list: &prev.idts); |
3104 | INIT_LIST_HEAD(list: &prev.flushes); |
3105 | |
3106 | list_cut_position(list: &prev.spas, head: &acpi_desc->spas, |
3107 | entry: acpi_desc->spas.prev); |
3108 | list_cut_position(list: &prev.memdevs, head: &acpi_desc->memdevs, |
3109 | entry: acpi_desc->memdevs.prev); |
3110 | list_cut_position(list: &prev.dcrs, head: &acpi_desc->dcrs, |
3111 | entry: acpi_desc->dcrs.prev); |
3112 | list_cut_position(list: &prev.bdws, head: &acpi_desc->bdws, |
3113 | entry: acpi_desc->bdws.prev); |
3114 | list_cut_position(list: &prev.idts, head: &acpi_desc->idts, |
3115 | entry: acpi_desc->idts.prev); |
3116 | list_cut_position(list: &prev.flushes, head: &acpi_desc->flushes, |
3117 | entry: acpi_desc->flushes.prev); |
3118 | |
3119 | end = data + sz; |
3120 | while (!IS_ERR_OR_NULL(ptr: data)) |
3121 | data = add_table(acpi_desc, prev: &prev, table: data, end); |
3122 | |
3123 | if (IS_ERR(ptr: data)) { |
3124 | dev_dbg(dev, "nfit table parsing error: %ld\n" , PTR_ERR(data)); |
3125 | rc = PTR_ERR(ptr: data); |
3126 | goto out_unlock; |
3127 | } |
3128 | |
3129 | rc = acpi_nfit_check_deletions(acpi_desc, prev: &prev); |
3130 | if (rc) |
3131 | goto out_unlock; |
3132 | |
3133 | rc = nfit_mem_init(acpi_desc); |
3134 | if (rc) |
3135 | goto out_unlock; |
3136 | |
3137 | rc = acpi_nfit_register_dimms(acpi_desc); |
3138 | if (rc) |
3139 | goto out_unlock; |
3140 | |
3141 | rc = acpi_nfit_register_regions(acpi_desc); |
3142 | |
3143 | out_unlock: |
3144 | mutex_unlock(lock: &acpi_desc->init_mutex); |
3145 | return rc; |
3146 | } |
3147 | EXPORT_SYMBOL_GPL(acpi_nfit_init); |
3148 | |
3149 | static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) |
3150 | { |
3151 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
3152 | struct device *dev = acpi_desc->dev; |
3153 | |
3154 | /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ |
3155 | device_lock(dev); |
3156 | device_unlock(dev); |
3157 | |
3158 | /* Bounce the init_mutex to complete initial registration */ |
3159 | mutex_lock(&acpi_desc->init_mutex); |
3160 | mutex_unlock(lock: &acpi_desc->init_mutex); |
3161 | |
3162 | return 0; |
3163 | } |
3164 | |
3165 | static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, |
3166 | struct nvdimm *nvdimm, unsigned int cmd) |
3167 | { |
3168 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
3169 | |
3170 | if (nvdimm) |
3171 | return 0; |
3172 | if (cmd != ND_CMD_ARS_START) |
3173 | return 0; |
3174 | |
3175 | /* |
3176 | * The kernel and userspace may race to initiate a scrub, but |
3177 | * the scrub thread is prepared to lose that initial race. It |
3178 | * just needs guarantees that any ARS it initiates are not |
3179 | * interrupted by any intervening start requests from userspace. |
3180 | */ |
3181 | if (work_busy(work: &acpi_desc->dwork.work)) |
3182 | return -EBUSY; |
3183 | |
3184 | return 0; |
3185 | } |
3186 | |
3187 | /* |
3188 | * Prevent security and firmware activate commands from being issued via |
3189 | * ioctl. |
3190 | */ |
3191 | static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, |
3192 | struct nvdimm *nvdimm, unsigned int cmd, void *buf) |
3193 | { |
3194 | struct nd_cmd_pkg *call_pkg = buf; |
3195 | unsigned int func; |
3196 | |
3197 | if (nvdimm && cmd == ND_CMD_CALL && |
3198 | call_pkg->nd_family == NVDIMM_FAMILY_INTEL) { |
3199 | func = call_pkg->nd_command; |
3200 | if (func > NVDIMM_CMD_MAX || |
3201 | (1 << func) & NVDIMM_INTEL_DENY_CMDMASK) |
3202 | return -EOPNOTSUPP; |
3203 | } |
3204 | |
3205 | /* block all non-nfit bus commands */ |
3206 | if (!nvdimm && cmd == ND_CMD_CALL && |
3207 | call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT) |
3208 | return -EOPNOTSUPP; |
3209 | |
3210 | return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd); |
3211 | } |
3212 | |
3213 | int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, |
3214 | enum nfit_ars_state req_type) |
3215 | { |
3216 | struct device *dev = acpi_desc->dev; |
3217 | int scheduled = 0, busy = 0; |
3218 | struct nfit_spa *nfit_spa; |
3219 | |
3220 | mutex_lock(&acpi_desc->init_mutex); |
3221 | if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) { |
3222 | mutex_unlock(lock: &acpi_desc->init_mutex); |
3223 | return 0; |
3224 | } |
3225 | |
3226 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { |
3227 | int type = nfit_spa_type(spa: nfit_spa->spa); |
3228 | |
3229 | if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE) |
3230 | continue; |
3231 | if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) |
3232 | continue; |
3233 | |
3234 | if (test_and_set_bit(nr: req_type, addr: &nfit_spa->ars_state)) |
3235 | busy++; |
3236 | else |
3237 | scheduled++; |
3238 | } |
3239 | if (scheduled) { |
3240 | sched_ars(acpi_desc); |
3241 | dev_dbg(dev, "ars_scan triggered\n" ); |
3242 | } |
3243 | mutex_unlock(lock: &acpi_desc->init_mutex); |
3244 | |
3245 | if (scheduled) |
3246 | return 0; |
3247 | if (busy) |
3248 | return -EBUSY; |
3249 | return -ENOTTY; |
3250 | } |
3251 | |
3252 | void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) |
3253 | { |
3254 | struct nvdimm_bus_descriptor *nd_desc; |
3255 | |
3256 | dev_set_drvdata(dev, data: acpi_desc); |
3257 | acpi_desc->dev = dev; |
3258 | nd_desc = &acpi_desc->nd_desc; |
3259 | nd_desc->provider_name = "ACPI.NFIT" ; |
3260 | nd_desc->module = THIS_MODULE; |
3261 | nd_desc->ndctl = acpi_nfit_ctl; |
3262 | nd_desc->flush_probe = acpi_nfit_flush_probe; |
3263 | nd_desc->clear_to_send = acpi_nfit_clear_to_send; |
3264 | nd_desc->attr_groups = acpi_nfit_attribute_groups; |
3265 | |
3266 | INIT_LIST_HEAD(list: &acpi_desc->spas); |
3267 | INIT_LIST_HEAD(list: &acpi_desc->dcrs); |
3268 | INIT_LIST_HEAD(list: &acpi_desc->bdws); |
3269 | INIT_LIST_HEAD(list: &acpi_desc->idts); |
3270 | INIT_LIST_HEAD(list: &acpi_desc->flushes); |
3271 | INIT_LIST_HEAD(list: &acpi_desc->memdevs); |
3272 | INIT_LIST_HEAD(list: &acpi_desc->dimms); |
3273 | INIT_LIST_HEAD(list: &acpi_desc->list); |
3274 | mutex_init(&acpi_desc->init_mutex); |
3275 | acpi_desc->scrub_tmo = 1; |
3276 | INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub); |
3277 | } |
3278 | EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); |
3279 | |
3280 | static void acpi_nfit_put_table(void *table) |
3281 | { |
3282 | acpi_put_table(table); |
3283 | } |
3284 | |
3285 | static void acpi_nfit_notify(acpi_handle handle, u32 event, void *data) |
3286 | { |
3287 | struct acpi_device *adev = data; |
3288 | |
3289 | device_lock(dev: &adev->dev); |
3290 | __acpi_nfit_notify(dev: &adev->dev, handle, event); |
3291 | device_unlock(dev: &adev->dev); |
3292 | } |
3293 | |
3294 | static void acpi_nfit_remove_notify_handler(void *data) |
3295 | { |
3296 | struct acpi_device *adev = data; |
3297 | |
3298 | acpi_dev_remove_notify_handler(adev, ACPI_DEVICE_NOTIFY, |
3299 | handler: acpi_nfit_notify); |
3300 | } |
3301 | |
3302 | void acpi_nfit_shutdown(void *data) |
3303 | { |
3304 | struct acpi_nfit_desc *acpi_desc = data; |
3305 | struct device *bus_dev = to_nvdimm_bus_dev(nvdimm_bus: acpi_desc->nvdimm_bus); |
3306 | |
3307 | /* |
3308 | * Destruct under acpi_desc_lock so that nfit_handle_mce does not |
3309 | * race teardown |
3310 | */ |
3311 | mutex_lock(&acpi_desc_lock); |
3312 | list_del(entry: &acpi_desc->list); |
3313 | mutex_unlock(lock: &acpi_desc_lock); |
3314 | |
3315 | mutex_lock(&acpi_desc->init_mutex); |
3316 | set_bit(nr: ARS_CANCEL, addr: &acpi_desc->scrub_flags); |
3317 | mutex_unlock(lock: &acpi_desc->init_mutex); |
3318 | cancel_delayed_work_sync(dwork: &acpi_desc->dwork); |
3319 | |
3320 | /* |
3321 | * Bounce the nvdimm bus lock to make sure any in-flight |
3322 | * acpi_nfit_ars_rescan() submissions have had a chance to |
3323 | * either submit or see ->cancel set. |
3324 | */ |
3325 | device_lock(dev: bus_dev); |
3326 | device_unlock(dev: bus_dev); |
3327 | |
3328 | flush_workqueue(nfit_wq); |
3329 | } |
3330 | EXPORT_SYMBOL_GPL(acpi_nfit_shutdown); |
3331 | |
3332 | static int acpi_nfit_add(struct acpi_device *adev) |
3333 | { |
3334 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; |
3335 | struct acpi_nfit_desc *acpi_desc; |
3336 | struct device *dev = &adev->dev; |
3337 | struct acpi_table_header *tbl; |
3338 | acpi_status status = AE_OK; |
3339 | acpi_size sz; |
3340 | int rc = 0; |
3341 | |
3342 | rc = acpi_dev_install_notify_handler(adev, ACPI_DEVICE_NOTIFY, |
3343 | handler: acpi_nfit_notify, context: adev); |
3344 | if (rc) |
3345 | return rc; |
3346 | |
3347 | rc = devm_add_action_or_reset(dev, acpi_nfit_remove_notify_handler, |
3348 | adev); |
3349 | if (rc) |
3350 | return rc; |
3351 | |
3352 | status = acpi_get_table(ACPI_SIG_NFIT, instance: 0, out_table: &tbl); |
3353 | if (ACPI_FAILURE(status)) { |
3354 | /* The NVDIMM root device allows OS to trigger enumeration of |
3355 | * NVDIMMs through NFIT at boot time and re-enumeration at |
3356 | * root level via the _FIT method during runtime. |
3357 | * This is ok to return 0 here, we could have an nvdimm |
3358 | * hotplugged later and evaluate _FIT method which returns |
3359 | * data in the format of a series of NFIT Structures. |
3360 | */ |
3361 | dev_dbg(dev, "failed to find NFIT at startup\n" ); |
3362 | return 0; |
3363 | } |
3364 | |
3365 | rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl); |
3366 | if (rc) |
3367 | return rc; |
3368 | sz = tbl->length; |
3369 | |
3370 | acpi_desc = devm_kzalloc(dev, size: sizeof(*acpi_desc), GFP_KERNEL); |
3371 | if (!acpi_desc) |
3372 | return -ENOMEM; |
3373 | acpi_nfit_desc_init(acpi_desc, &adev->dev); |
3374 | |
3375 | /* Save the acpi header for exporting the revision via sysfs */ |
3376 | acpi_desc->acpi_header = *tbl; |
3377 | |
3378 | /* Evaluate _FIT and override with that if present */ |
3379 | status = acpi_evaluate_object(object: adev->handle, pathname: "_FIT" , NULL, return_object_buffer: &buf); |
3380 | if (ACPI_SUCCESS(status) && buf.length > 0) { |
3381 | union acpi_object *obj = buf.pointer; |
3382 | |
3383 | if (obj->type == ACPI_TYPE_BUFFER) |
3384 | rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, |
3385 | obj->buffer.length); |
3386 | else |
3387 | dev_dbg(dev, "invalid type %d, ignoring _FIT\n" , |
3388 | (int) obj->type); |
3389 | kfree(objp: buf.pointer); |
3390 | } else |
3391 | /* skip over the lead-in header table */ |
3392 | rc = acpi_nfit_init(acpi_desc, (void *) tbl |
3393 | + sizeof(struct acpi_table_nfit), |
3394 | sz - sizeof(struct acpi_table_nfit)); |
3395 | |
3396 | if (rc) |
3397 | return rc; |
3398 | |
3399 | return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); |
3400 | } |
3401 | |
3402 | static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle) |
3403 | { |
3404 | struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); |
3405 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; |
3406 | union acpi_object *obj; |
3407 | acpi_status status; |
3408 | int ret; |
3409 | |
3410 | if (!dev->driver) { |
3411 | /* dev->driver may be null if we're being removed */ |
3412 | dev_dbg(dev, "no driver found for dev\n" ); |
3413 | return; |
3414 | } |
3415 | |
3416 | if (!acpi_desc) { |
3417 | acpi_desc = devm_kzalloc(dev, size: sizeof(*acpi_desc), GFP_KERNEL); |
3418 | if (!acpi_desc) |
3419 | return; |
3420 | acpi_nfit_desc_init(acpi_desc, dev); |
3421 | } else { |
3422 | /* |
3423 | * Finish previous registration before considering new |
3424 | * regions. |
3425 | */ |
3426 | flush_workqueue(nfit_wq); |
3427 | } |
3428 | |
3429 | /* Evaluate _FIT */ |
3430 | status = acpi_evaluate_object(object: handle, pathname: "_FIT" , NULL, return_object_buffer: &buf); |
3431 | if (ACPI_FAILURE(status)) { |
3432 | dev_err(dev, "failed to evaluate _FIT\n" ); |
3433 | return; |
3434 | } |
3435 | |
3436 | obj = buf.pointer; |
3437 | if (obj->type == ACPI_TYPE_BUFFER) { |
3438 | ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, |
3439 | obj->buffer.length); |
3440 | if (ret) |
3441 | dev_err(dev, "failed to merge updated NFIT\n" ); |
3442 | } else |
3443 | dev_err(dev, "Invalid _FIT\n" ); |
3444 | kfree(objp: buf.pointer); |
3445 | } |
3446 | |
3447 | static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle) |
3448 | { |
3449 | struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); |
3450 | |
3451 | if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) |
3452 | acpi_nfit_ars_rescan(acpi_desc, req_type: ARS_REQ_LONG); |
3453 | else |
3454 | acpi_nfit_ars_rescan(acpi_desc, req_type: ARS_REQ_SHORT); |
3455 | } |
3456 | |
3457 | void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) |
3458 | { |
3459 | dev_dbg(dev, "event: 0x%x\n" , event); |
3460 | |
3461 | switch (event) { |
3462 | case NFIT_NOTIFY_UPDATE: |
3463 | return acpi_nfit_update_notify(dev, handle); |
3464 | case NFIT_NOTIFY_UC_MEMORY_ERROR: |
3465 | return acpi_nfit_uc_error_notify(dev, handle); |
3466 | default: |
3467 | return; |
3468 | } |
3469 | } |
3470 | EXPORT_SYMBOL_GPL(__acpi_nfit_notify); |
3471 | |
3472 | static const struct acpi_device_id acpi_nfit_ids[] = { |
3473 | { "ACPI0012" , 0 }, |
3474 | { "" , 0 }, |
3475 | }; |
3476 | MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); |
3477 | |
3478 | static struct acpi_driver acpi_nfit_driver = { |
3479 | .name = KBUILD_MODNAME, |
3480 | .ids = acpi_nfit_ids, |
3481 | .ops = { |
3482 | .add = acpi_nfit_add, |
3483 | }, |
3484 | }; |
3485 | |
3486 | static __init int nfit_init(void) |
3487 | { |
3488 | int ret; |
3489 | |
3490 | BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); |
3491 | BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 64); |
3492 | BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); |
3493 | BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 16); |
3494 | BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 8); |
3495 | BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); |
3496 | BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); |
3497 | BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16); |
3498 | |
3499 | guid_parse(UUID_VOLATILE_MEMORY, u: &nfit_uuid[NFIT_SPA_VOLATILE]); |
3500 | guid_parse(UUID_PERSISTENT_MEMORY, u: &nfit_uuid[NFIT_SPA_PM]); |
3501 | guid_parse(UUID_CONTROL_REGION, u: &nfit_uuid[NFIT_SPA_DCR]); |
3502 | guid_parse(UUID_DATA_REGION, u: &nfit_uuid[NFIT_SPA_BDW]); |
3503 | guid_parse(UUID_VOLATILE_VIRTUAL_DISK, u: &nfit_uuid[NFIT_SPA_VDISK]); |
3504 | guid_parse(UUID_VOLATILE_VIRTUAL_CD, u: &nfit_uuid[NFIT_SPA_VCD]); |
3505 | guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, u: &nfit_uuid[NFIT_SPA_PDISK]); |
3506 | guid_parse(UUID_PERSISTENT_VIRTUAL_CD, u: &nfit_uuid[NFIT_SPA_PCD]); |
3507 | guid_parse(UUID_NFIT_BUS, u: &nfit_uuid[NFIT_DEV_BUS]); |
3508 | guid_parse(UUID_NFIT_DIMM, u: &nfit_uuid[NFIT_DEV_DIMM]); |
3509 | guid_parse(UUID_NFIT_DIMM_N_HPE1, u: &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); |
3510 | guid_parse(UUID_NFIT_DIMM_N_HPE2, u: &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); |
3511 | guid_parse(UUID_NFIT_DIMM_N_MSFT, u: &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); |
3512 | guid_parse(UUID_NFIT_DIMM_N_HYPERV, u: &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]); |
3513 | guid_parse(UUID_INTEL_BUS, u: &nfit_uuid[NFIT_BUS_INTEL]); |
3514 | |
3515 | nfit_wq = create_singlethread_workqueue("nfit" ); |
3516 | if (!nfit_wq) |
3517 | return -ENOMEM; |
3518 | |
3519 | nfit_mce_register(); |
3520 | ret = acpi_bus_register_driver(driver: &acpi_nfit_driver); |
3521 | if (ret) { |
3522 | nfit_mce_unregister(); |
3523 | destroy_workqueue(wq: nfit_wq); |
3524 | } |
3525 | |
3526 | return ret; |
3527 | |
3528 | } |
3529 | |
3530 | static __exit void nfit_exit(void) |
3531 | { |
3532 | nfit_mce_unregister(); |
3533 | acpi_bus_unregister_driver(driver: &acpi_nfit_driver); |
3534 | destroy_workqueue(wq: nfit_wq); |
3535 | WARN_ON(!list_empty(&acpi_descs)); |
3536 | } |
3537 | |
3538 | module_init(nfit_init); |
3539 | module_exit(nfit_exit); |
3540 | MODULE_LICENSE("GPL v2" ); |
3541 | MODULE_AUTHOR("Intel Corporation" ); |
3542 | |