1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2016 Broadcom
4 */
5
6#include <linux/debugfs.h>
7
8#include "cipher.h"
9#include "util.h"
10
11/* offset of SPU_OFIFO_CTRL register */
12#define SPU_OFIFO_CTRL 0x40
13#define SPU_FIFO_WATERMARK 0x1FF
14
15/**
16 * spu_sg_at_offset() - Find the scatterlist entry at a given distance from the
17 * start of a scatterlist.
18 * @sg: [in] Start of a scatterlist
19 * @skip: [in] Distance from the start of the scatterlist, in bytes
20 * @sge: [out] Scatterlist entry at skip bytes from start
21 * @sge_offset: [out] Number of bytes from start of sge buffer to get to
22 * requested distance.
23 *
24 * Return: 0 if entry found at requested distance
25 * < 0 otherwise
26 */
27int spu_sg_at_offset(struct scatterlist *sg, unsigned int skip,
28 struct scatterlist **sge, unsigned int *sge_offset)
29{
30 /* byte index from start of sg to the end of the previous entry */
31 unsigned int index = 0;
32 /* byte index from start of sg to the end of the current entry */
33 unsigned int next_index;
34
35 next_index = sg->length;
36 while (next_index <= skip) {
37 sg = sg_next(sg);
38 index = next_index;
39 if (!sg)
40 return -EINVAL;
41 next_index += sg->length;
42 }
43
44 *sge_offset = skip - index;
45 *sge = sg;
46 return 0;
47}
48
49/* Copy len bytes of sg data, starting at offset skip, to a dest buffer */
50void sg_copy_part_to_buf(struct scatterlist *src, u8 *dest,
51 unsigned int len, unsigned int skip)
52{
53 size_t copied;
54 unsigned int nents = sg_nents(sg: src);
55
56 copied = sg_pcopy_to_buffer(sgl: src, nents, buf: dest, buflen: len, skip);
57 if (copied != len) {
58 flow_log(format: "%s copied %u bytes of %u requested. ",
59 __func__, (u32)copied, len);
60 flow_log(format: "sg with %u entries and skip %u\n", nents, skip);
61 }
62}
63
64/*
65 * Copy data into a scatterlist starting at a specified offset in the
66 * scatterlist. Specifically, copy len bytes of data in the buffer src
67 * into the scatterlist dest, starting skip bytes into the scatterlist.
68 */
69void sg_copy_part_from_buf(struct scatterlist *dest, u8 *src,
70 unsigned int len, unsigned int skip)
71{
72 size_t copied;
73 unsigned int nents = sg_nents(sg: dest);
74
75 copied = sg_pcopy_from_buffer(sgl: dest, nents, buf: src, buflen: len, skip);
76 if (copied != len) {
77 flow_log(format: "%s copied %u bytes of %u requested. ",
78 __func__, (u32)copied, len);
79 flow_log(format: "sg with %u entries and skip %u\n", nents, skip);
80 }
81}
82
83/**
84 * spu_sg_count() - Determine number of elements in scatterlist to provide a
85 * specified number of bytes.
86 * @sg_list: scatterlist to examine
87 * @skip: index of starting point
88 * @nbytes: consider elements of scatterlist until reaching this number of
89 * bytes
90 *
91 * Return: the number of sg entries contributing to nbytes of data
92 */
93int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes)
94{
95 struct scatterlist *sg;
96 int sg_nents = 0;
97 unsigned int offset;
98
99 if (!sg_list)
100 return 0;
101
102 if (spu_sg_at_offset(sg: sg_list, skip, sge: &sg, sge_offset: &offset) < 0)
103 return 0;
104
105 while (sg && (nbytes > 0)) {
106 sg_nents++;
107 nbytes -= (sg->length - offset);
108 offset = 0;
109 sg = sg_next(sg);
110 }
111 return sg_nents;
112}
113
114/**
115 * spu_msg_sg_add() - Copy scatterlist entries from one sg to another, up to a
116 * given length.
117 * @to_sg: scatterlist to copy to
118 * @from_sg: scatterlist to copy from
119 * @from_skip: number of bytes to skip in from_sg. Non-zero when previous
120 * request included part of the buffer in entry in from_sg.
121 * Assumes from_skip < from_sg->length.
122 * @from_nents: number of entries in from_sg
123 * @length: number of bytes to copy. may reach this limit before exhausting
124 * from_sg.
125 *
126 * Copies the entries themselves, not the data in the entries. Assumes to_sg has
127 * enough entries. Does not limit the size of an individual buffer in to_sg.
128 *
129 * to_sg, from_sg, skip are all updated to end of copy
130 *
131 * Return: Number of bytes copied
132 */
133u32 spu_msg_sg_add(struct scatterlist **to_sg,
134 struct scatterlist **from_sg, u32 *from_skip,
135 u8 from_nents, u32 length)
136{
137 struct scatterlist *sg; /* an entry in from_sg */
138 struct scatterlist *to = *to_sg;
139 struct scatterlist *from = *from_sg;
140 u32 skip = *from_skip;
141 u32 offset;
142 int i;
143 u32 entry_len = 0;
144 u32 frag_len = 0; /* length of entry added to to_sg */
145 u32 copied = 0; /* number of bytes copied so far */
146
147 if (length == 0)
148 return 0;
149
150 for_each_sg(from, sg, from_nents, i) {
151 /* number of bytes in this from entry not yet used */
152 entry_len = sg->length - skip;
153 frag_len = min(entry_len, length - copied);
154 offset = sg->offset + skip;
155 if (frag_len)
156 sg_set_page(sg: to++, page: sg_page(sg), len: frag_len, offset);
157 copied += frag_len;
158 if (copied == entry_len) {
159 /* used up all of from entry */
160 skip = 0; /* start at beginning of next entry */
161 }
162 if (copied == length)
163 break;
164 }
165 *to_sg = to;
166 *from_sg = sg;
167 if (frag_len < entry_len)
168 *from_skip = skip + frag_len;
169 else
170 *from_skip = 0;
171
172 return copied;
173}
174
175void add_to_ctr(u8 *ctr_pos, unsigned int increment)
176{
177 __be64 *high_be = (__be64 *)ctr_pos;
178 __be64 *low_be = high_be + 1;
179 u64 orig_low = __be64_to_cpu(*low_be);
180 u64 new_low = orig_low + (u64)increment;
181
182 *low_be = __cpu_to_be64(new_low);
183 if (new_low < orig_low)
184 /* there was a carry from the low 8 bytes */
185 *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
186}
187
188struct sdesc {
189 struct shash_desc shash;
190 char ctx[];
191};
192
193/**
194 * do_shash() - Do a synchronous hash operation in software
195 * @name: The name of the hash algorithm
196 * @result: Buffer where digest is to be written
197 * @data1: First part of data to hash. May be NULL.
198 * @data1_len: Length of data1, in bytes
199 * @data2: Second part of data to hash. May be NULL.
200 * @data2_len: Length of data2, in bytes
201 * @key: Key (if keyed hash)
202 * @key_len: Length of key, in bytes (or 0 if non-keyed hash)
203 *
204 * Note that the crypto API will not select this driver's own transform because
205 * this driver only registers asynchronous algos.
206 *
207 * Return: 0 if hash successfully stored in result
208 * < 0 otherwise
209 */
210int do_shash(unsigned char *name, unsigned char *result,
211 const u8 *data1, unsigned int data1_len,
212 const u8 *data2, unsigned int data2_len,
213 const u8 *key, unsigned int key_len)
214{
215 int rc;
216 unsigned int size;
217 struct crypto_shash *hash;
218 struct sdesc *sdesc;
219
220 hash = crypto_alloc_shash(alg_name: name, type: 0, mask: 0);
221 if (IS_ERR(ptr: hash)) {
222 rc = PTR_ERR(ptr: hash);
223 pr_err("%s: Crypto %s allocation error %d\n", __func__, name, rc);
224 return rc;
225 }
226
227 size = sizeof(struct shash_desc) + crypto_shash_descsize(tfm: hash);
228 sdesc = kmalloc(size, GFP_KERNEL);
229 if (!sdesc) {
230 rc = -ENOMEM;
231 goto do_shash_err;
232 }
233 sdesc->shash.tfm = hash;
234
235 if (key_len > 0) {
236 rc = crypto_shash_setkey(tfm: hash, key, keylen: key_len);
237 if (rc) {
238 pr_err("%s: Could not setkey %s shash\n", __func__, name);
239 goto do_shash_err;
240 }
241 }
242
243 rc = crypto_shash_init(desc: &sdesc->shash);
244 if (rc) {
245 pr_err("%s: Could not init %s shash\n", __func__, name);
246 goto do_shash_err;
247 }
248 rc = crypto_shash_update(desc: &sdesc->shash, data: data1, len: data1_len);
249 if (rc) {
250 pr_err("%s: Could not update1\n", __func__);
251 goto do_shash_err;
252 }
253 if (data2 && data2_len) {
254 rc = crypto_shash_update(desc: &sdesc->shash, data: data2, len: data2_len);
255 if (rc) {
256 pr_err("%s: Could not update2\n", __func__);
257 goto do_shash_err;
258 }
259 }
260 rc = crypto_shash_final(desc: &sdesc->shash, out: result);
261 if (rc)
262 pr_err("%s: Could not generate %s hash\n", __func__, name);
263
264do_shash_err:
265 crypto_free_shash(tfm: hash);
266 kfree(objp: sdesc);
267
268 return rc;
269}
270
271#ifdef DEBUG
272/* Dump len bytes of a scatterlist starting at skip bytes into the sg */
273void __dump_sg(struct scatterlist *sg, unsigned int skip, unsigned int len)
274{
275 u8 dbuf[16];
276 unsigned int idx = skip;
277 unsigned int num_out = 0; /* number of bytes dumped so far */
278 unsigned int count;
279
280 if (packet_debug_logging) {
281 while (num_out < len) {
282 count = (len - num_out > 16) ? 16 : len - num_out;
283 sg_copy_part_to_buf(sg, dbuf, count, idx);
284 num_out += count;
285 print_hex_dump(KERN_ALERT, " sg: ", DUMP_PREFIX_NONE,
286 4, 1, dbuf, count, false);
287 idx += 16;
288 }
289 }
290 if (debug_logging_sleep)
291 msleep(debug_logging_sleep);
292}
293#endif
294
295/* Returns the name for a given cipher alg/mode */
296char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode)
297{
298 switch (alg) {
299 case CIPHER_ALG_RC4:
300 return "rc4";
301 case CIPHER_ALG_AES:
302 switch (mode) {
303 case CIPHER_MODE_CBC:
304 return "cbc(aes)";
305 case CIPHER_MODE_ECB:
306 return "ecb(aes)";
307 case CIPHER_MODE_OFB:
308 return "ofb(aes)";
309 case CIPHER_MODE_CFB:
310 return "cfb(aes)";
311 case CIPHER_MODE_CTR:
312 return "ctr(aes)";
313 case CIPHER_MODE_XTS:
314 return "xts(aes)";
315 case CIPHER_MODE_GCM:
316 return "gcm(aes)";
317 default:
318 return "aes";
319 }
320 break;
321 case CIPHER_ALG_DES:
322 switch (mode) {
323 case CIPHER_MODE_CBC:
324 return "cbc(des)";
325 case CIPHER_MODE_ECB:
326 return "ecb(des)";
327 case CIPHER_MODE_CTR:
328 return "ctr(des)";
329 default:
330 return "des";
331 }
332 break;
333 case CIPHER_ALG_3DES:
334 switch (mode) {
335 case CIPHER_MODE_CBC:
336 return "cbc(des3_ede)";
337 case CIPHER_MODE_ECB:
338 return "ecb(des3_ede)";
339 case CIPHER_MODE_CTR:
340 return "ctr(des3_ede)";
341 default:
342 return "3des";
343 }
344 break;
345 default:
346 return "other";
347 }
348}
349
350static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
351 size_t count, loff_t *offp)
352{
353 struct bcm_device_private *ipriv;
354 char *buf;
355 ssize_t ret, out_offset, out_count;
356 int i;
357 u32 fifo_len;
358 u32 spu_ofifo_ctrl;
359 u32 alg;
360 u32 mode;
361 u32 op_cnt;
362
363 out_count = 2048;
364
365 buf = kmalloc(size: out_count, GFP_KERNEL);
366 if (!buf)
367 return -ENOMEM;
368
369 ipriv = filp->private_data;
370 out_offset = 0;
371 out_offset += scnprintf(buf: buf + out_offset, size: out_count - out_offset,
372 fmt: "Number of SPUs.........%u\n",
373 ipriv->spu.num_spu);
374 out_offset += scnprintf(buf: buf + out_offset, size: out_count - out_offset,
375 fmt: "Current sessions.......%u\n",
376 atomic_read(v: &ipriv->session_count));
377 out_offset += scnprintf(buf: buf + out_offset, size: out_count - out_offset,
378 fmt: "Session count..........%u\n",
379 atomic_read(v: &ipriv->stream_count));
380 out_offset += scnprintf(buf: buf + out_offset, size: out_count - out_offset,
381 fmt: "Cipher setkey..........%u\n",
382 atomic_read(v: &ipriv->setkey_cnt[SPU_OP_CIPHER]));
383 out_offset += scnprintf(buf: buf + out_offset, size: out_count - out_offset,
384 fmt: "Cipher Ops.............%u\n",
385 atomic_read(v: &ipriv->op_counts[SPU_OP_CIPHER]));
386 for (alg = 0; alg < CIPHER_ALG_LAST; alg++) {
387 for (mode = 0; mode < CIPHER_MODE_LAST; mode++) {
388 op_cnt = atomic_read(v: &ipriv->cipher_cnt[alg][mode]);
389 if (op_cnt) {
390 out_offset += scnprintf(buf: buf + out_offset,
391 size: out_count - out_offset,
392 fmt: " %-13s%11u\n",
393 spu_alg_name(alg, mode), op_cnt);
394 }
395 }
396 }
397 out_offset += scnprintf(buf: buf + out_offset, size: out_count - out_offset,
398 fmt: "Hash Ops...............%u\n",
399 atomic_read(v: &ipriv->op_counts[SPU_OP_HASH]));
400 for (alg = 0; alg < HASH_ALG_LAST; alg++) {
401 op_cnt = atomic_read(v: &ipriv->hash_cnt[alg]);
402 if (op_cnt) {
403 out_offset += scnprintf(buf: buf + out_offset,
404 size: out_count - out_offset,
405 fmt: " %-13s%11u\n",
406 hash_alg_name[alg], op_cnt);
407 }
408 }
409 out_offset += scnprintf(buf: buf + out_offset, size: out_count - out_offset,
410 fmt: "HMAC setkey............%u\n",
411 atomic_read(v: &ipriv->setkey_cnt[SPU_OP_HMAC]));
412 out_offset += scnprintf(buf: buf + out_offset, size: out_count - out_offset,
413 fmt: "HMAC Ops...............%u\n",
414 atomic_read(v: &ipriv->op_counts[SPU_OP_HMAC]));
415 for (alg = 0; alg < HASH_ALG_LAST; alg++) {
416 op_cnt = atomic_read(v: &ipriv->hmac_cnt[alg]);
417 if (op_cnt) {
418 out_offset += scnprintf(buf: buf + out_offset,
419 size: out_count - out_offset,
420 fmt: " %-13s%11u\n",
421 hash_alg_name[alg], op_cnt);
422 }
423 }
424 out_offset += scnprintf(buf: buf + out_offset, size: out_count - out_offset,
425 fmt: "AEAD setkey............%u\n",
426 atomic_read(v: &ipriv->setkey_cnt[SPU_OP_AEAD]));
427
428 out_offset += scnprintf(buf: buf + out_offset, size: out_count - out_offset,
429 fmt: "AEAD Ops...............%u\n",
430 atomic_read(v: &ipriv->op_counts[SPU_OP_AEAD]));
431 for (alg = 0; alg < AEAD_TYPE_LAST; alg++) {
432 op_cnt = atomic_read(v: &ipriv->aead_cnt[alg]);
433 if (op_cnt) {
434 out_offset += scnprintf(buf: buf + out_offset,
435 size: out_count - out_offset,
436 fmt: " %-13s%11u\n",
437 aead_alg_name[alg], op_cnt);
438 }
439 }
440 out_offset += scnprintf(buf: buf + out_offset, size: out_count - out_offset,
441 fmt: "Bytes of req data......%llu\n",
442 (u64)atomic64_read(v: &ipriv->bytes_out));
443 out_offset += scnprintf(buf: buf + out_offset, size: out_count - out_offset,
444 fmt: "Bytes of resp data.....%llu\n",
445 (u64)atomic64_read(v: &ipriv->bytes_in));
446 out_offset += scnprintf(buf: buf + out_offset, size: out_count - out_offset,
447 fmt: "Mailbox full...........%u\n",
448 atomic_read(v: &ipriv->mb_no_spc));
449 out_offset += scnprintf(buf: buf + out_offset, size: out_count - out_offset,
450 fmt: "Mailbox send failures..%u\n",
451 atomic_read(v: &ipriv->mb_send_fail));
452 out_offset += scnprintf(buf: buf + out_offset, size: out_count - out_offset,
453 fmt: "Check ICV errors.......%u\n",
454 atomic_read(v: &ipriv->bad_icv));
455 if (ipriv->spu.spu_type == SPU_TYPE_SPUM)
456 for (i = 0; i < ipriv->spu.num_spu; i++) {
457 spu_ofifo_ctrl = ioread32(ipriv->spu.reg_vbase[i] +
458 SPU_OFIFO_CTRL);
459 fifo_len = spu_ofifo_ctrl & SPU_FIFO_WATERMARK;
460 out_offset += scnprintf(buf: buf + out_offset,
461 size: out_count - out_offset,
462 fmt: "SPU %d output FIFO high water.....%u\n",
463 i, fifo_len);
464 }
465
466 if (out_offset > out_count)
467 out_offset = out_count;
468
469 ret = simple_read_from_buffer(to: ubuf, count, ppos: offp, from: buf, available: out_offset);
470 kfree(objp: buf);
471 return ret;
472}
473
474static const struct file_operations spu_debugfs_stats = {
475 .owner = THIS_MODULE,
476 .open = simple_open,
477 .read = spu_debugfs_read,
478};
479
480/*
481 * Create the debug FS directories. If the top-level directory has not yet
482 * been created, create it now. Create a stats file in this directory for
483 * a SPU.
484 */
485void spu_setup_debugfs(void)
486{
487 if (!debugfs_initialized())
488 return;
489
490 if (!iproc_priv.debugfs_dir)
491 iproc_priv.debugfs_dir = debugfs_create_dir(KBUILD_MODNAME,
492 NULL);
493
494 if (!iproc_priv.debugfs_stats)
495 /* Create file with permissions S_IRUSR */
496 debugfs_create_file(name: "stats", mode: 0400, parent: iproc_priv.debugfs_dir,
497 data: &iproc_priv, fops: &spu_debugfs_stats);
498}
499
500void spu_free_debugfs(void)
501{
502 debugfs_remove_recursive(dentry: iproc_priv.debugfs_dir);
503 iproc_priv.debugfs_dir = NULL;
504}
505
506/**
507 * format_value_ccm() - Format a value into a buffer, using a specified number
508 * of bytes (i.e. maybe writing value X into a 4 byte
509 * buffer, or maybe into a 12 byte buffer), as per the
510 * SPU CCM spec.
511 *
512 * @val: value to write (up to max of unsigned int)
513 * @buf: (pointer to) buffer to write the value
514 * @len: number of bytes to use (0 to 255)
515 *
516 */
517void format_value_ccm(unsigned int val, u8 *buf, u8 len)
518{
519 int i;
520
521 /* First clear full output buffer */
522 memset(buf, 0, len);
523
524 /* Then, starting from right side, fill in with data */
525 for (i = 0; i < len; i++) {
526 buf[len - i - 1] = (val >> (8 * i)) & 0xff;
527 if (i >= 3)
528 break; /* Only handle up to 32 bits of 'val' */
529 }
530}
531

source code of linux/drivers/crypto/bcm/util.c