1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ |
3 | |
4 | /* \file cc_driver.h |
5 | * ARM CryptoCell Linux Crypto Driver |
6 | */ |
7 | |
8 | #ifndef __CC_DRIVER_H__ |
9 | #define __CC_DRIVER_H__ |
10 | |
11 | #ifdef COMP_IN_WQ |
12 | #include <linux/workqueue.h> |
13 | #else |
14 | #include <linux/interrupt.h> |
15 | #endif |
16 | #include <linux/dma-mapping.h> |
17 | #include <crypto/algapi.h> |
18 | #include <crypto/internal/skcipher.h> |
19 | #include <crypto/aes.h> |
20 | #include <crypto/sha1.h> |
21 | #include <crypto/sha2.h> |
22 | #include <crypto/aead.h> |
23 | #include <crypto/authenc.h> |
24 | #include <crypto/hash.h> |
25 | #include <crypto/skcipher.h> |
26 | #include <linux/clk.h> |
27 | #include <linux/platform_device.h> |
28 | |
29 | #include "cc_host_regs.h" |
30 | #include "cc_crypto_ctx.h" |
31 | #include "cc_hw_queue_defs.h" |
32 | #include "cc_sram_mgr.h" |
33 | |
34 | extern bool cc_dump_desc; |
35 | extern bool cc_dump_bytes; |
36 | |
37 | #define DRV_MODULE_VERSION "5.0" |
38 | |
39 | enum cc_hw_rev { |
40 | CC_HW_REV_630 = 630, |
41 | CC_HW_REV_710 = 710, |
42 | CC_HW_REV_712 = 712, |
43 | CC_HW_REV_713 = 713 |
44 | }; |
45 | |
46 | enum cc_std_body { |
47 | CC_STD_NIST = 0x1, |
48 | CC_STD_OSCCA = 0x2, |
49 | CC_STD_ALL = 0x3 |
50 | }; |
51 | |
52 | #define CC_PINS_FULL 0x0 |
53 | #define CC_PINS_SLIM 0x9F |
54 | |
55 | /* Maximum DMA mask supported by IP */ |
56 | #define DMA_BIT_MASK_LEN 48 |
57 | |
58 | #define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \ |
59 | (1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \ |
60 | (1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \ |
61 | (1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT)) |
62 | |
63 | #define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT) |
64 | |
65 | #define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT) |
66 | |
67 | #define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT) |
68 | |
69 | #define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT) |
70 | |
71 | #define AXIM_MON_COMP_VALUE CC_GENMASK(CC_AXIM_MON_COMP_VALUE) |
72 | |
73 | #define CC_CPP_AES_ABORT_MASK ( \ |
74 | BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \ |
75 | BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT) | \ |
76 | BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT) | \ |
77 | BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT) | \ |
78 | BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT) | \ |
79 | BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT) | \ |
80 | BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT) | \ |
81 | BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT)) |
82 | |
83 | #define CC_CPP_SM4_ABORT_MASK ( \ |
84 | BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT) | \ |
85 | BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT) | \ |
86 | BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT) | \ |
87 | BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT) | \ |
88 | BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT) | \ |
89 | BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT) | \ |
90 | BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT) | \ |
91 | BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT)) |
92 | |
93 | /* Register name mangling macro */ |
94 | #define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET |
95 | |
96 | /* TEE FIPS status interrupt */ |
97 | #define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT) |
98 | |
99 | #define CC_CRA_PRIO 400 |
100 | |
101 | #define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */ |
102 | |
103 | #define MAX_REQUEST_QUEUE_SIZE 4096 |
104 | #define MAX_MLLI_BUFF_SIZE 2080 |
105 | |
106 | /* Definitions for HW descriptors DIN/DOUT fields */ |
107 | #define NS_BIT 1 |
108 | #define AXI_ID 0 |
109 | /* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID |
110 | * field in the HW descriptor. The DMA engine +8 that value. |
111 | */ |
112 | |
113 | struct cc_cpp_req { |
114 | bool is_cpp; |
115 | enum cc_cpp_alg alg; |
116 | u8 slot; |
117 | }; |
118 | |
119 | #define CC_MAX_IVGEN_DMA_ADDRESSES 3 |
120 | struct cc_crypto_req { |
121 | void (*user_cb)(struct device *dev, void *req, int err); |
122 | void *user_arg; |
123 | struct completion seq_compl; /* request completion */ |
124 | struct cc_cpp_req cpp; |
125 | }; |
126 | |
127 | /** |
128 | * struct cc_drvdata - driver private data context |
129 | * @cc_base: virt address of the CC registers |
130 | * @irq: bitmap indicating source of last interrupt |
131 | */ |
132 | struct cc_drvdata { |
133 | void __iomem *cc_base; |
134 | int irq; |
135 | struct completion hw_queue_avail; /* wait for HW queue availability */ |
136 | struct platform_device *plat_dev; |
137 | u32 mlli_sram_addr; |
138 | struct dma_pool *mlli_buffs_pool; |
139 | struct list_head alg_list; |
140 | void *hash_handle; |
141 | void *aead_handle; |
142 | void *request_mgr_handle; |
143 | void *fips_handle; |
144 | u32 sram_free_offset; /* offset to non-allocated area in SRAM */ |
145 | struct dentry *dir; /* for debugfs */ |
146 | struct clk *clk; |
147 | bool coherent; |
148 | char *hw_rev_name; |
149 | enum cc_hw_rev hw_rev; |
150 | u32 axim_mon_offset; |
151 | u32 sig_offset; |
152 | u32 ver_offset; |
153 | int std_bodies; |
154 | bool sec_disabled; |
155 | u32 comp_mask; |
156 | u32 cache_params; |
157 | u32 ace_const; |
158 | }; |
159 | |
160 | struct cc_crypto_alg { |
161 | struct list_head entry; |
162 | int cipher_mode; |
163 | int flow_mode; /* Note: currently, refers to the cipher mode only. */ |
164 | int auth_mode; |
165 | struct cc_drvdata *drvdata; |
166 | struct skcipher_alg skcipher_alg; |
167 | struct aead_alg aead_alg; |
168 | }; |
169 | |
170 | struct cc_alg_template { |
171 | char name[CRYPTO_MAX_ALG_NAME]; |
172 | char driver_name[CRYPTO_MAX_ALG_NAME]; |
173 | unsigned int blocksize; |
174 | union { |
175 | struct skcipher_alg skcipher; |
176 | struct aead_alg aead; |
177 | } template_u; |
178 | int cipher_mode; |
179 | int flow_mode; /* Note: currently, refers to the cipher mode only. */ |
180 | int auth_mode; |
181 | u32 min_hw_rev; |
182 | enum cc_std_body std_body; |
183 | bool sec_func; |
184 | unsigned int data_unit; |
185 | struct cc_drvdata *drvdata; |
186 | }; |
187 | |
188 | struct async_gen_req_ctx { |
189 | dma_addr_t iv_dma_addr; |
190 | u8 *iv; |
191 | enum drv_crypto_direction op_type; |
192 | }; |
193 | |
194 | static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata) |
195 | { |
196 | return &drvdata->plat_dev->dev; |
197 | } |
198 | |
199 | void __dump_byte_array(const char *name, const u8 *buf, size_t len); |
200 | static inline void dump_byte_array(const char *name, const u8 *the_array, |
201 | size_t size) |
202 | { |
203 | if (cc_dump_bytes) |
204 | __dump_byte_array(name, buf: the_array, len: size); |
205 | } |
206 | |
207 | bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata); |
208 | int init_cc_regs(struct cc_drvdata *drvdata); |
209 | void fini_cc_regs(struct cc_drvdata *drvdata); |
210 | unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata); |
211 | |
212 | static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val) |
213 | { |
214 | iowrite32(val, (drvdata->cc_base + reg)); |
215 | } |
216 | |
217 | static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg) |
218 | { |
219 | return ioread32(drvdata->cc_base + reg); |
220 | } |
221 | |
222 | static inline gfp_t cc_gfp_flags(struct crypto_async_request *req) |
223 | { |
224 | return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
225 | GFP_KERNEL : GFP_ATOMIC; |
226 | } |
227 | |
228 | static inline void set_queue_last_ind(struct cc_drvdata *drvdata, |
229 | struct cc_hw_desc *pdesc) |
230 | { |
231 | if (drvdata->hw_rev >= CC_HW_REV_712) |
232 | set_queue_last_ind_bit(pdesc); |
233 | } |
234 | |
235 | #endif /*__CC_DRIVER_H__*/ |
236 | |