1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019 HiSilicon Limited. */
3
4#ifndef __HISI_SEC_V2_H
5#define __HISI_SEC_V2_H
6
7#include <linux/hisi_acc_qm.h>
8#include "sec_crypto.h"
9
10/* Algorithm resource per hardware SEC queue */
11struct sec_alg_res {
12 u8 *pbuf;
13 dma_addr_t pbuf_dma;
14 u8 *c_ivin;
15 dma_addr_t c_ivin_dma;
16 u8 *a_ivin;
17 dma_addr_t a_ivin_dma;
18 u8 *out_mac;
19 dma_addr_t out_mac_dma;
20 u16 depth;
21};
22
23/* Cipher request of SEC private */
24struct sec_cipher_req {
25 struct hisi_acc_hw_sgl *c_out;
26 dma_addr_t c_out_dma;
27 u8 *c_ivin;
28 dma_addr_t c_ivin_dma;
29 struct skcipher_request *sk_req;
30 u32 c_len;
31 bool encrypt;
32};
33
34struct sec_aead_req {
35 u8 *out_mac;
36 dma_addr_t out_mac_dma;
37 u8 *a_ivin;
38 dma_addr_t a_ivin_dma;
39 struct aead_request *aead_req;
40};
41
42/* SEC request of Crypto */
43struct sec_req {
44 union {
45 struct sec_sqe sec_sqe;
46 struct sec_sqe3 sec_sqe3;
47 };
48 struct sec_ctx *ctx;
49 struct sec_qp_ctx *qp_ctx;
50
51 /**
52 * Common parameter of the SEC request.
53 */
54 struct hisi_acc_hw_sgl *in;
55 dma_addr_t in_dma;
56 struct sec_cipher_req c_req;
57 struct sec_aead_req aead_req;
58 struct list_head backlog_head;
59
60 int err_type;
61 int req_id;
62 u32 flag;
63
64 /* Status of the SEC request */
65 bool fake_busy;
66 bool use_pbuf;
67};
68
69/**
70 * struct sec_req_op - Operations for SEC request
71 * @buf_map: DMA map the SGL buffers of the request
72 * @buf_unmap: DMA unmap the SGL buffers of the request
73 * @bd_fill: Fill the SEC queue BD
74 * @bd_send: Send the SEC BD into the hardware queue
75 * @callback: Call back for the request
76 * @process: Main processing logic of Skcipher
77 */
78struct sec_req_op {
79 int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
80 void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
81 void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
82 int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
83 int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
84 void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err);
85 int (*process)(struct sec_ctx *ctx, struct sec_req *req);
86};
87
88/* SEC auth context */
89struct sec_auth_ctx {
90 dma_addr_t a_key_dma;
91 u8 *a_key;
92 u8 a_key_len;
93 u8 mac_len;
94 u8 a_alg;
95 bool fallback;
96 struct crypto_shash *hash_tfm;
97 struct crypto_aead *fallback_aead_tfm;
98};
99
100/* SEC cipher context which cipher's relatives */
101struct sec_cipher_ctx {
102 u8 *c_key;
103 dma_addr_t c_key_dma;
104 sector_t iv_offset;
105 u32 c_gran_size;
106 u32 ivsize;
107 u8 c_mode;
108 u8 c_alg;
109 u8 c_key_len;
110
111 /* add software support */
112 bool fallback;
113 struct crypto_sync_skcipher *fbtfm;
114};
115
116/* SEC queue context which defines queue's relatives */
117struct sec_qp_ctx {
118 struct hisi_qp *qp;
119 struct sec_req **req_list;
120 struct idr req_idr;
121 struct sec_alg_res *res;
122 struct sec_ctx *ctx;
123 spinlock_t req_lock;
124 struct list_head backlog;
125 struct hisi_acc_sgl_pool *c_in_pool;
126 struct hisi_acc_sgl_pool *c_out_pool;
127};
128
129enum sec_alg_type {
130 SEC_SKCIPHER,
131 SEC_AEAD
132};
133
134/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
135struct sec_ctx {
136 struct sec_qp_ctx *qp_ctx;
137 struct sec_dev *sec;
138 const struct sec_req_op *req_op;
139 struct hisi_qp **qps;
140
141 /* Half queues for encipher, and half for decipher */
142 u32 hlf_q_num;
143
144 /* Threshold for fake busy, trigger to return -EBUSY to user */
145 u32 fake_req_limit;
146
147 /* Current cyclic index to select a queue for encipher */
148 atomic_t enc_qcyclic;
149
150 /* Current cyclic index to select a queue for decipher */
151 atomic_t dec_qcyclic;
152
153 enum sec_alg_type alg_type;
154 bool pbuf_supported;
155 struct sec_cipher_ctx c_ctx;
156 struct sec_auth_ctx a_ctx;
157 u8 type_supported;
158 struct device *dev;
159};
160
161
162enum sec_debug_file_index {
163 SEC_CLEAR_ENABLE,
164 SEC_DEBUG_FILE_NUM,
165};
166
167struct sec_debug_file {
168 enum sec_debug_file_index index;
169 spinlock_t lock;
170 struct hisi_qm *qm;
171};
172
173struct sec_dfx {
174 atomic64_t send_cnt;
175 atomic64_t recv_cnt;
176 atomic64_t send_busy_cnt;
177 atomic64_t recv_busy_cnt;
178 atomic64_t err_bd_cnt;
179 atomic64_t invalid_req_cnt;
180 atomic64_t done_flag_cnt;
181};
182
183struct sec_debug {
184 struct sec_dfx dfx;
185 struct sec_debug_file files[SEC_DEBUG_FILE_NUM];
186};
187
188struct sec_dev {
189 struct hisi_qm qm;
190 struct sec_debug debug;
191 u32 ctx_q_num;
192 bool iommu_used;
193};
194
195enum sec_cap_type {
196 SEC_QM_NFE_MASK_CAP = 0x0,
197 SEC_QM_RESET_MASK_CAP,
198 SEC_QM_OOO_SHUTDOWN_MASK_CAP,
199 SEC_QM_CE_MASK_CAP,
200 SEC_NFE_MASK_CAP,
201 SEC_RESET_MASK_CAP,
202 SEC_OOO_SHUTDOWN_MASK_CAP,
203 SEC_CE_MASK_CAP,
204 SEC_CLUSTER_NUM_CAP,
205 SEC_CORE_TYPE_NUM_CAP,
206 SEC_CORE_NUM_CAP,
207 SEC_CORES_PER_CLUSTER_NUM_CAP,
208 SEC_CORE_ENABLE_BITMAP,
209 SEC_DRV_ALG_BITMAP_LOW,
210 SEC_DRV_ALG_BITMAP_HIGH,
211 SEC_DEV_ALG_BITMAP_LOW,
212 SEC_DEV_ALG_BITMAP_HIGH,
213 SEC_CORE1_ALG_BITMAP_LOW,
214 SEC_CORE1_ALG_BITMAP_HIGH,
215 SEC_CORE2_ALG_BITMAP_LOW,
216 SEC_CORE2_ALG_BITMAP_HIGH,
217 SEC_CORE3_ALG_BITMAP_LOW,
218 SEC_CORE3_ALG_BITMAP_HIGH,
219 SEC_CORE4_ALG_BITMAP_LOW,
220 SEC_CORE4_ALG_BITMAP_HIGH,
221};
222
223enum sec_cap_reg_record_idx {
224 SEC_DRV_ALG_BITMAP_LOW_IDX = 0x0,
225 SEC_DRV_ALG_BITMAP_HIGH_IDX,
226 SEC_DEV_ALG_BITMAP_LOW_IDX,
227 SEC_DEV_ALG_BITMAP_HIGH_IDX,
228};
229
230void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
231struct hisi_qp **sec_create_qps(void);
232int sec_register_to_crypto(struct hisi_qm *qm);
233void sec_unregister_from_crypto(struct hisi_qm *qm);
234u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low);
235#endif
236

source code of linux/drivers/crypto/hisilicon/sec2/sec.h