1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2014 Intel Corporation
4 *
5 * Authors:
6 * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
7 *
8 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
9 *
10 * This device driver implements the TPM interface as defined in
11 * the TCG CRB 2.0 TPM specification.
12 */
13
14#include <linux/acpi.h>
15#include <linux/highmem.h>
16#include <linux/rculist.h>
17#include <linux/module.h>
18#include <linux/pm_runtime.h>
19#ifdef CONFIG_ARM64
20#include <linux/arm-smccc.h>
21#endif
22#include "tpm.h"
23
24#define ACPI_SIG_TPM2 "TPM2"
25#define TPM_CRB_MAX_RESOURCES 3
26
27static const guid_t crb_acpi_start_guid =
28 GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
29 0xB7, 0xCD, 0xF0, 0x20, 0x3C, 0x03, 0x68, 0xD4);
30
31enum crb_defaults {
32 CRB_ACPI_START_REVISION_ID = 1,
33 CRB_ACPI_START_INDEX = 1,
34};
35
36enum crb_loc_ctrl {
37 CRB_LOC_CTRL_REQUEST_ACCESS = BIT(0),
38 CRB_LOC_CTRL_RELINQUISH = BIT(1),
39};
40
41enum crb_loc_state {
42 CRB_LOC_STATE_LOC_ASSIGNED = BIT(1),
43 CRB_LOC_STATE_TPM_REG_VALID_STS = BIT(7),
44};
45
46enum crb_ctrl_req {
47 CRB_CTRL_REQ_CMD_READY = BIT(0),
48 CRB_CTRL_REQ_GO_IDLE = BIT(1),
49};
50
51enum crb_ctrl_sts {
52 CRB_CTRL_STS_ERROR = BIT(0),
53 CRB_CTRL_STS_TPM_IDLE = BIT(1),
54};
55
56enum crb_start {
57 CRB_START_INVOKE = BIT(0),
58};
59
60enum crb_cancel {
61 CRB_CANCEL_INVOKE = BIT(0),
62};
63
64struct crb_regs_head {
65 u32 loc_state;
66 u32 reserved1;
67 u32 loc_ctrl;
68 u32 loc_sts;
69 u8 reserved2[32];
70 u64 intf_id;
71 u64 ctrl_ext;
72} __packed;
73
74struct crb_regs_tail {
75 u32 ctrl_req;
76 u32 ctrl_sts;
77 u32 ctrl_cancel;
78 u32 ctrl_start;
79 u32 ctrl_int_enable;
80 u32 ctrl_int_sts;
81 u32 ctrl_cmd_size;
82 u32 ctrl_cmd_pa_low;
83 u32 ctrl_cmd_pa_high;
84 u32 ctrl_rsp_size;
85 u64 ctrl_rsp_pa;
86} __packed;
87
88enum crb_status {
89 CRB_DRV_STS_COMPLETE = BIT(0),
90};
91
92struct crb_priv {
93 u32 sm;
94 const char *hid;
95 struct crb_regs_head __iomem *regs_h;
96 struct crb_regs_tail __iomem *regs_t;
97 u8 __iomem *cmd;
98 u8 __iomem *rsp;
99 u32 cmd_size;
100 u32 smc_func_id;
101 u32 __iomem *pluton_start_addr;
102 u32 __iomem *pluton_reply_addr;
103};
104
105struct tpm2_crb_smc {
106 u32 interrupt;
107 u8 interrupt_flags;
108 u8 op_flags;
109 u16 reserved2;
110 u32 smc_func_id;
111};
112
113struct tpm2_crb_pluton {
114 u64 start_addr;
115 u64 reply_addr;
116};
117
118static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
119 unsigned long timeout)
120{
121 ktime_t start;
122 ktime_t stop;
123
124 start = ktime_get();
125 stop = ktime_add(start, ms_to_ktime(timeout));
126
127 do {
128 if ((ioread32(reg) & mask) == value)
129 return true;
130
131 usleep_range(min: 50, max: 100);
132 } while (ktime_before(cmp1: ktime_get(), cmp2: stop));
133
134 return ((ioread32(reg) & mask) == value);
135}
136
137static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete)
138{
139 if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
140 return 0;
141
142 if (!crb_wait_for_reg_32(reg: priv->pluton_reply_addr, mask: ~0, value: 1, timeout: TPM2_TIMEOUT_C))
143 return -ETIME;
144
145 iowrite32(1, priv->pluton_start_addr);
146 if (wait_for_complete == false)
147 return 0;
148
149 if (!crb_wait_for_reg_32(reg: priv->pluton_start_addr,
150 mask: 0xffffffff, value: 0, timeout: 200))
151 return -ETIME;
152
153 return 0;
154}
155
156/**
157 * __crb_go_idle - request tpm crb device to go the idle state
158 *
159 * @dev: crb device
160 * @priv: crb private data
161 *
162 * Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ
163 * The device should respond within TIMEOUT_C by clearing the bit.
164 * Anyhow, we do not wait here as a consequent CMD_READY request
165 * will be handled correctly even if idle was not completed.
166 *
167 * The function does nothing for devices with ACPI-start method
168 * or SMC-start method.
169 *
170 * Return: 0 always
171 */
172static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
173{
174 int rc;
175
176 if ((priv->sm == ACPI_TPM2_START_METHOD) ||
177 (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
178 (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
179 return 0;
180
181 iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
182
183 rc = crb_try_pluton_doorbell(priv, wait_for_complete: true);
184 if (rc)
185 return rc;
186
187 if (!crb_wait_for_reg_32(reg: &priv->regs_t->ctrl_req,
188 mask: CRB_CTRL_REQ_GO_IDLE/* mask */,
189 value: 0, /* value */
190 timeout: TPM2_TIMEOUT_C)) {
191 dev_warn(dev, "goIdle timed out\n");
192 return -ETIME;
193 }
194
195 return 0;
196}
197
198static int crb_go_idle(struct tpm_chip *chip)
199{
200 struct device *dev = &chip->dev;
201 struct crb_priv *priv = dev_get_drvdata(dev);
202
203 return __crb_go_idle(dev, priv);
204}
205
206/**
207 * __crb_cmd_ready - request tpm crb device to enter ready state
208 *
209 * @dev: crb device
210 * @priv: crb private data
211 *
212 * Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ
213 * and poll till the device acknowledge it by clearing the bit.
214 * The device should respond within TIMEOUT_C.
215 *
216 * The function does nothing for devices with ACPI-start method
217 * or SMC-start method.
218 *
219 * Return: 0 on success -ETIME on timeout;
220 */
221static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
222{
223 int rc;
224
225 if ((priv->sm == ACPI_TPM2_START_METHOD) ||
226 (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
227 (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
228 return 0;
229
230 iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
231
232 rc = crb_try_pluton_doorbell(priv, wait_for_complete: true);
233 if (rc)
234 return rc;
235
236 if (!crb_wait_for_reg_32(reg: &priv->regs_t->ctrl_req,
237 mask: CRB_CTRL_REQ_CMD_READY /* mask */,
238 value: 0, /* value */
239 timeout: TPM2_TIMEOUT_C)) {
240 dev_warn(dev, "cmdReady timed out\n");
241 return -ETIME;
242 }
243
244 return 0;
245}
246
247static int crb_cmd_ready(struct tpm_chip *chip)
248{
249 struct device *dev = &chip->dev;
250 struct crb_priv *priv = dev_get_drvdata(dev);
251
252 return __crb_cmd_ready(dev, priv);
253}
254
255static int __crb_request_locality(struct device *dev,
256 struct crb_priv *priv, int loc)
257{
258 u32 value = CRB_LOC_STATE_LOC_ASSIGNED |
259 CRB_LOC_STATE_TPM_REG_VALID_STS;
260
261 if (!priv->regs_h)
262 return 0;
263
264 iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl);
265 if (!crb_wait_for_reg_32(reg: &priv->regs_h->loc_state, mask: value, value,
266 timeout: TPM2_TIMEOUT_C)) {
267 dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
268 return -ETIME;
269 }
270
271 return 0;
272}
273
274static int crb_request_locality(struct tpm_chip *chip, int loc)
275{
276 struct crb_priv *priv = dev_get_drvdata(dev: &chip->dev);
277
278 return __crb_request_locality(dev: &chip->dev, priv, loc);
279}
280
281static int __crb_relinquish_locality(struct device *dev,
282 struct crb_priv *priv, int loc)
283{
284 u32 mask = CRB_LOC_STATE_LOC_ASSIGNED |
285 CRB_LOC_STATE_TPM_REG_VALID_STS;
286 u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS;
287
288 if (!priv->regs_h)
289 return 0;
290
291 iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
292 if (!crb_wait_for_reg_32(reg: &priv->regs_h->loc_state, mask, value,
293 timeout: TPM2_TIMEOUT_C)) {
294 dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n");
295 return -ETIME;
296 }
297
298 return 0;
299}
300
301static int crb_relinquish_locality(struct tpm_chip *chip, int loc)
302{
303 struct crb_priv *priv = dev_get_drvdata(dev: &chip->dev);
304
305 return __crb_relinquish_locality(dev: &chip->dev, priv, loc);
306}
307
308static u8 crb_status(struct tpm_chip *chip)
309{
310 struct crb_priv *priv = dev_get_drvdata(dev: &chip->dev);
311 u8 sts = 0;
312
313 if ((ioread32(&priv->regs_t->ctrl_start) & CRB_START_INVOKE) !=
314 CRB_START_INVOKE)
315 sts |= CRB_DRV_STS_COMPLETE;
316
317 return sts;
318}
319
320static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
321{
322 struct crb_priv *priv = dev_get_drvdata(dev: &chip->dev);
323 unsigned int expected;
324
325 /* A sanity check that the upper layer wants to get at least the header
326 * as that is the minimum size for any TPM response.
327 */
328 if (count < TPM_HEADER_SIZE)
329 return -EIO;
330
331 /* If this bit is set, according to the spec, the TPM is in
332 * unrecoverable condition.
333 */
334 if (ioread32(&priv->regs_t->ctrl_sts) & CRB_CTRL_STS_ERROR)
335 return -EIO;
336
337 /* Read the first 8 bytes in order to get the length of the response.
338 * We read exactly a quad word in order to make sure that the remaining
339 * reads will be aligned.
340 */
341 memcpy_fromio(buf, priv->rsp, 8);
342
343 expected = be32_to_cpup(p: (__be32 *)&buf[2]);
344 if (expected > count || expected < TPM_HEADER_SIZE)
345 return -EIO;
346
347 memcpy_fromio(&buf[8], &priv->rsp[8], expected - 8);
348
349 return expected;
350}
351
352static int crb_do_acpi_start(struct tpm_chip *chip)
353{
354 union acpi_object *obj;
355 int rc;
356
357 obj = acpi_evaluate_dsm(handle: chip->acpi_dev_handle,
358 guid: &crb_acpi_start_guid,
359 rev: CRB_ACPI_START_REVISION_ID,
360 func: CRB_ACPI_START_INDEX,
361 NULL);
362 if (!obj)
363 return -ENXIO;
364 rc = obj->integer.value == 0 ? 0 : -ENXIO;
365 ACPI_FREE(obj);
366 return rc;
367}
368
369#ifdef CONFIG_ARM64
370/*
371 * This is a TPM Command Response Buffer start method that invokes a
372 * Secure Monitor Call to requrest the firmware to execute or cancel
373 * a TPM 2.0 command.
374 */
375static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
376{
377 struct arm_smccc_res res;
378
379 arm_smccc_smc(func_id, 0, 0, 0, 0, 0, 0, 0, &res);
380 if (res.a0 != 0) {
381 dev_err(dev,
382 FW_BUG "tpm_crb_smc_start() returns res.a0 = 0x%lx\n",
383 res.a0);
384 return -EIO;
385 }
386
387 return 0;
388}
389#else
390static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
391{
392 dev_err(dev, FW_BUG "tpm_crb: incorrect start method\n");
393 return -EINVAL;
394}
395#endif
396
397static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
398{
399 struct crb_priv *priv = dev_get_drvdata(dev: &chip->dev);
400 int rc = 0;
401
402 /* Zero the cancel register so that the next command will not get
403 * canceled.
404 */
405 iowrite32(0, &priv->regs_t->ctrl_cancel);
406
407 if (len > priv->cmd_size) {
408 dev_err(&chip->dev, "invalid command count value %zd %d\n",
409 len, priv->cmd_size);
410 return -E2BIG;
411 }
412
413 /* Seems to be necessary for every command */
414 if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
415 __crb_cmd_ready(dev: &chip->dev, priv);
416
417 memcpy_toio(priv->cmd, buf, len);
418
419 /* Make sure that cmd is populated before issuing start. */
420 wmb();
421
422 /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
423 * report only ACPI start but in practice seems to require both
424 * CRB start, hence invoking CRB start method if hid == MSFT0101.
425 */
426 if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
427 (priv->sm == ACPI_TPM2_MEMORY_MAPPED) ||
428 (!strcmp(priv->hid, "MSFT0101")))
429 iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
430
431 if ((priv->sm == ACPI_TPM2_START_METHOD) ||
432 (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD))
433 rc = crb_do_acpi_start(chip);
434
435 if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
436 iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
437 rc = tpm_crb_smc_start(dev: &chip->dev, func_id: priv->smc_func_id);
438 }
439
440 if (rc)
441 return rc;
442
443 return crb_try_pluton_doorbell(priv, wait_for_complete: false);
444}
445
446static void crb_cancel(struct tpm_chip *chip)
447{
448 struct crb_priv *priv = dev_get_drvdata(dev: &chip->dev);
449
450 iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel);
451
452 if (((priv->sm == ACPI_TPM2_START_METHOD) ||
453 (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) &&
454 crb_do_acpi_start(chip))
455 dev_err(&chip->dev, "ACPI Start failed\n");
456}
457
458static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
459{
460 struct crb_priv *priv = dev_get_drvdata(dev: &chip->dev);
461 u32 cancel = ioread32(&priv->regs_t->ctrl_cancel);
462
463 return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
464}
465
466static const struct tpm_class_ops tpm_crb = {
467 .flags = TPM_OPS_AUTO_STARTUP,
468 .status = crb_status,
469 .recv = crb_recv,
470 .send = crb_send,
471 .cancel = crb_cancel,
472 .req_canceled = crb_req_canceled,
473 .go_idle = crb_go_idle,
474 .cmd_ready = crb_cmd_ready,
475 .request_locality = crb_request_locality,
476 .relinquish_locality = crb_relinquish_locality,
477 .req_complete_mask = CRB_DRV_STS_COMPLETE,
478 .req_complete_val = CRB_DRV_STS_COMPLETE,
479};
480
481static int crb_check_resource(struct acpi_resource *ares, void *data)
482{
483 struct resource *iores_array = data;
484 struct resource_win win;
485 struct resource *res = &(win.res);
486 int i;
487
488 if (acpi_dev_resource_memory(ares, res) ||
489 acpi_dev_resource_address_space(ares, win: &win)) {
490 for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) {
491 if (resource_type(res: iores_array + i) != IORESOURCE_MEM) {
492 iores_array[i] = *res;
493 iores_array[i].name = NULL;
494 break;
495 }
496 }
497 }
498
499 return 1;
500}
501
502static void __iomem *crb_map_res(struct device *dev, struct resource *iores,
503 void __iomem **iobase_ptr, u64 start, u32 size)
504{
505 struct resource new_res = {
506 .start = start,
507 .end = start + size - 1,
508 .flags = IORESOURCE_MEM,
509 };
510
511 /* Detect a 64 bit address on a 32 bit system */
512 if (start != new_res.start)
513 return IOMEM_ERR_PTR(-EINVAL);
514
515 if (!iores)
516 return devm_ioremap_resource(dev, res: &new_res);
517
518 if (!*iobase_ptr) {
519 *iobase_ptr = devm_ioremap_resource(dev, res: iores);
520 if (IS_ERR(ptr: *iobase_ptr))
521 return *iobase_ptr;
522 }
523
524 return *iobase_ptr + (new_res.start - iores->start);
525}
526
527/*
528 * Work around broken BIOSs that return inconsistent values from the ACPI
529 * region vs the registers. Trust the ACPI region. Such broken systems
530 * probably cannot send large TPM commands since the buffer will be truncated.
531 */
532static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res,
533 u64 start, u64 size)
534{
535 if (io_res->start > start || io_res->end < start)
536 return size;
537
538 if (start + size - 1 <= io_res->end)
539 return size;
540
541 dev_err(dev,
542 FW_BUG "ACPI region does not cover the entire command/response buffer. %pr vs %llx %llx\n",
543 io_res, start, size);
544
545 return io_res->end - start + 1;
546}
547
548static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
549 struct acpi_table_tpm2 *buf)
550{
551 struct list_head acpi_resource_list;
552 struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} };
553 void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL};
554 struct device *dev = &device->dev;
555 struct resource *iores;
556 void __iomem **iobase_ptr;
557 int i;
558 u32 pa_high, pa_low;
559 u64 cmd_pa;
560 u32 cmd_size;
561 __le64 __rsp_pa;
562 u64 rsp_pa;
563 u32 rsp_size;
564 int ret;
565
566 /*
567 * Pluton sometimes does not define ACPI memory regions.
568 * Mapping is then done in crb_map_pluton
569 */
570 if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
571 INIT_LIST_HEAD(list: &acpi_resource_list);
572 ret = acpi_dev_get_resources(adev: device, list: &acpi_resource_list,
573 preproc: crb_check_resource, preproc_data: iores_array);
574 if (ret < 0)
575 return ret;
576 acpi_dev_free_resource_list(list: &acpi_resource_list);
577
578 if (resource_type(res: iores_array) != IORESOURCE_MEM) {
579 dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
580 return -EINVAL;
581 } else if (resource_type(res: iores_array + TPM_CRB_MAX_RESOURCES) ==
582 IORESOURCE_MEM) {
583 dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
584 memset(iores_array + TPM_CRB_MAX_RESOURCES,
585 0, sizeof(*iores_array));
586 iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
587 }
588 }
589
590 iores = NULL;
591 iobase_ptr = NULL;
592 for (i = 0; resource_type(res: iores_array + i) == IORESOURCE_MEM; ++i) {
593 if (buf->control_address >= iores_array[i].start &&
594 buf->control_address + sizeof(struct crb_regs_tail) - 1 <=
595 iores_array[i].end) {
596 iores = iores_array + i;
597 iobase_ptr = iobase_array + i;
598 break;
599 }
600 }
601
602 priv->regs_t = crb_map_res(dev, iores, iobase_ptr, start: buf->control_address,
603 size: sizeof(struct crb_regs_tail));
604
605 if (IS_ERR(ptr: priv->regs_t))
606 return PTR_ERR(ptr: priv->regs_t);
607
608 /* The ACPI IO region starts at the head area and continues to include
609 * the control area, as one nice sane region except for some older
610 * stuff that puts the control area outside the ACPI IO region.
611 */
612 if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
613 (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
614 if (iores &&
615 buf->control_address == iores->start +
616 sizeof(*priv->regs_h))
617 priv->regs_h = *iobase_ptr;
618 else
619 dev_warn(dev, FW_BUG "Bad ACPI memory layout");
620 }
621
622 ret = __crb_request_locality(dev, priv, loc: 0);
623 if (ret)
624 return ret;
625
626 /*
627 * PTT HW bug w/a: wake up the device to access
628 * possibly not retained registers.
629 */
630 ret = __crb_cmd_ready(dev, priv);
631 if (ret)
632 goto out_relinquish_locality;
633
634 pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
635 pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
636 cmd_pa = ((u64)pa_high << 32) | pa_low;
637 cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size);
638
639 iores = NULL;
640 iobase_ptr = NULL;
641 for (i = 0; iores_array[i].end; ++i) {
642 if (cmd_pa >= iores_array[i].start &&
643 cmd_pa <= iores_array[i].end) {
644 iores = iores_array + i;
645 iobase_ptr = iobase_array + i;
646 break;
647 }
648 }
649
650 if (iores)
651 cmd_size = crb_fixup_cmd_size(dev, io_res: iores, start: cmd_pa, size: cmd_size);
652
653 dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
654 pa_high, pa_low, cmd_size);
655
656 priv->cmd = crb_map_res(dev, iores, iobase_ptr, start: cmd_pa, size: cmd_size);
657 if (IS_ERR(ptr: priv->cmd)) {
658 ret = PTR_ERR(ptr: priv->cmd);
659 goto out;
660 }
661
662 memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8);
663 rsp_pa = le64_to_cpu(__rsp_pa);
664 rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size);
665
666 iores = NULL;
667 iobase_ptr = NULL;
668 for (i = 0; resource_type(res: iores_array + i) == IORESOURCE_MEM; ++i) {
669 if (rsp_pa >= iores_array[i].start &&
670 rsp_pa <= iores_array[i].end) {
671 iores = iores_array + i;
672 iobase_ptr = iobase_array + i;
673 break;
674 }
675 }
676
677 if (iores)
678 rsp_size = crb_fixup_cmd_size(dev, io_res: iores, start: rsp_pa, size: rsp_size);
679
680 if (cmd_pa != rsp_pa) {
681 priv->rsp = crb_map_res(dev, iores, iobase_ptr,
682 start: rsp_pa, size: rsp_size);
683 ret = PTR_ERR_OR_ZERO(ptr: priv->rsp);
684 goto out;
685 }
686
687 /* According to the PTP specification, overlapping command and response
688 * buffer sizes must be identical.
689 */
690 if (cmd_size != rsp_size) {
691 dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical");
692 ret = -EINVAL;
693 goto out;
694 }
695
696 priv->rsp = priv->cmd;
697
698out:
699 if (!ret)
700 priv->cmd_size = cmd_size;
701
702 __crb_go_idle(dev, priv);
703
704out_relinquish_locality:
705
706 __crb_relinquish_locality(dev, priv, loc: 0);
707
708 return ret;
709}
710
711static int crb_map_pluton(struct device *dev, struct crb_priv *priv,
712 struct acpi_table_tpm2 *buf, struct tpm2_crb_pluton *crb_pluton)
713{
714 priv->pluton_start_addr = crb_map_res(dev, NULL, NULL,
715 start: crb_pluton->start_addr, size: 4);
716 if (IS_ERR(ptr: priv->pluton_start_addr))
717 return PTR_ERR(ptr: priv->pluton_start_addr);
718
719 priv->pluton_reply_addr = crb_map_res(dev, NULL, NULL,
720 start: crb_pluton->reply_addr, size: 4);
721 if (IS_ERR(ptr: priv->pluton_reply_addr))
722 return PTR_ERR(ptr: priv->pluton_reply_addr);
723
724 return 0;
725}
726
727static int crb_acpi_add(struct acpi_device *device)
728{
729 struct acpi_table_tpm2 *buf;
730 struct crb_priv *priv;
731 struct tpm_chip *chip;
732 struct device *dev = &device->dev;
733 struct tpm2_crb_smc *crb_smc;
734 struct tpm2_crb_pluton *crb_pluton;
735 acpi_status status;
736 u32 sm;
737 int rc;
738
739 status = acpi_get_table(ACPI_SIG_TPM2, instance: 1,
740 out_table: (struct acpi_table_header **) &buf);
741 if (ACPI_FAILURE(status) || buf->header.length < sizeof(*buf)) {
742 dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
743 return -EINVAL;
744 }
745
746 /* Should the FIFO driver handle this? */
747 sm = buf->start_method;
748 if (sm == ACPI_TPM2_MEMORY_MAPPED) {
749 rc = -ENODEV;
750 goto out;
751 }
752
753 priv = devm_kzalloc(dev, size: sizeof(struct crb_priv), GFP_KERNEL);
754 if (!priv) {
755 rc = -ENOMEM;
756 goto out;
757 }
758
759 if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
760 if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
761 dev_err(dev,
762 FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
763 buf->header.length,
764 ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
765 rc = -EINVAL;
766 goto out;
767 }
768 crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
769 priv->smc_func_id = crb_smc->smc_func_id;
770 }
771
772 if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
773 if (buf->header.length < (sizeof(*buf) + sizeof(*crb_pluton))) {
774 dev_err(dev,
775 FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
776 buf->header.length,
777 ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON);
778 rc = -EINVAL;
779 goto out;
780 }
781 crb_pluton = ACPI_ADD_PTR(struct tpm2_crb_pluton, buf, sizeof(*buf));
782 rc = crb_map_pluton(dev, priv, buf, crb_pluton);
783 if (rc)
784 goto out;
785 }
786
787 priv->sm = sm;
788 priv->hid = acpi_device_hid(device);
789
790 rc = crb_map_io(device, priv, buf);
791 if (rc)
792 goto out;
793
794 chip = tpmm_chip_alloc(pdev: dev, ops: &tpm_crb);
795 if (IS_ERR(ptr: chip)) {
796 rc = PTR_ERR(ptr: chip);
797 goto out;
798 }
799
800 dev_set_drvdata(dev: &chip->dev, data: priv);
801 chip->acpi_dev_handle = device->handle;
802 chip->flags = TPM_CHIP_FLAG_TPM2;
803
804 rc = tpm_chip_bootstrap(chip);
805 if (rc)
806 goto out;
807
808#ifdef CONFIG_X86
809 /* A quirk for https://www.amd.com/en/support/kb/faq/pa-410 */
810 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
811 priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
812 dev_info(dev, "Disabling hwrng\n");
813 chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
814 }
815#endif /* CONFIG_X86 */
816
817 rc = tpm_chip_register(chip);
818
819out:
820 acpi_put_table(table: (struct acpi_table_header *)buf);
821 return rc;
822}
823
824static void crb_acpi_remove(struct acpi_device *device)
825{
826 struct device *dev = &device->dev;
827 struct tpm_chip *chip = dev_get_drvdata(dev);
828
829 tpm_chip_unregister(chip);
830}
831
832static const struct dev_pm_ops crb_pm = {
833 SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
834};
835
836static const struct acpi_device_id crb_device_ids[] = {
837 {"MSFT0101", 0},
838 {"", 0},
839};
840MODULE_DEVICE_TABLE(acpi, crb_device_ids);
841
842static struct acpi_driver crb_acpi_driver = {
843 .name = "tpm_crb",
844 .ids = crb_device_ids,
845 .ops = {
846 .add = crb_acpi_add,
847 .remove = crb_acpi_remove,
848 },
849 .drv = {
850 .pm = &crb_pm,
851 },
852};
853
854module_acpi_driver(crb_acpi_driver);
855MODULE_AUTHOR("Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>");
856MODULE_DESCRIPTION("TPM2 Driver");
857MODULE_VERSION("0.1");
858MODULE_LICENSE("GPL");
859

source code of linux/drivers/char/tpm/tpm_crb.c