1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2005,2006,2007,2008 IBM Corporation |
4 | * |
5 | * Authors: |
6 | * Mimi Zohar <zohar@us.ibm.com> |
7 | * Kylene Hall <kjhall@us.ibm.com> |
8 | * |
9 | * File: ima_crypto.c |
10 | * Calculates md5/sha1 file hash, template hash, boot-aggreate hash |
11 | */ |
12 | |
13 | #include <linux/kernel.h> |
14 | #include <linux/moduleparam.h> |
15 | #include <linux/ratelimit.h> |
16 | #include <linux/file.h> |
17 | #include <linux/crypto.h> |
18 | #include <linux/scatterlist.h> |
19 | #include <linux/err.h> |
20 | #include <linux/slab.h> |
21 | #include <crypto/hash.h> |
22 | |
23 | #include "ima.h" |
24 | |
25 | /* minimum file size for ahash use */ |
26 | static unsigned long ima_ahash_minsize; |
27 | module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644); |
28 | MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use" ); |
29 | |
30 | /* default is 0 - 1 page. */ |
31 | static int ima_maxorder; |
32 | static unsigned int ima_bufsize = PAGE_SIZE; |
33 | |
34 | static int param_set_bufsize(const char *val, const struct kernel_param *kp) |
35 | { |
36 | unsigned long long size; |
37 | int order; |
38 | |
39 | size = memparse(ptr: val, NULL); |
40 | order = get_order(size); |
41 | if (order > MAX_PAGE_ORDER) |
42 | return -EINVAL; |
43 | ima_maxorder = order; |
44 | ima_bufsize = PAGE_SIZE << order; |
45 | return 0; |
46 | } |
47 | |
48 | static const struct kernel_param_ops param_ops_bufsize = { |
49 | .set = param_set_bufsize, |
50 | .get = param_get_uint, |
51 | }; |
52 | #define param_check_bufsize(name, p) __param_check(name, p, unsigned int) |
53 | |
54 | module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644); |
55 | MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size" ); |
56 | |
57 | static struct crypto_shash *ima_shash_tfm; |
58 | static struct crypto_ahash *ima_ahash_tfm; |
59 | |
60 | struct ima_algo_desc { |
61 | struct crypto_shash *tfm; |
62 | enum hash_algo algo; |
63 | }; |
64 | |
65 | int ima_sha1_idx __ro_after_init; |
66 | int ima_hash_algo_idx __ro_after_init; |
67 | /* |
68 | * Additional number of slots reserved, as needed, for SHA1 |
69 | * and IMA default algo. |
70 | */ |
71 | int __ro_after_init; |
72 | |
73 | static struct ima_algo_desc *ima_algo_array; |
74 | |
75 | static int __init ima_init_ima_crypto(void) |
76 | { |
77 | long rc; |
78 | |
79 | ima_shash_tfm = crypto_alloc_shash(alg_name: hash_algo_name[ima_hash_algo], type: 0, mask: 0); |
80 | if (IS_ERR(ptr: ima_shash_tfm)) { |
81 | rc = PTR_ERR(ptr: ima_shash_tfm); |
82 | pr_err("Can not allocate %s (reason: %ld)\n" , |
83 | hash_algo_name[ima_hash_algo], rc); |
84 | return rc; |
85 | } |
86 | pr_info("Allocated hash algorithm: %s\n" , |
87 | hash_algo_name[ima_hash_algo]); |
88 | return 0; |
89 | } |
90 | |
91 | static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo) |
92 | { |
93 | struct crypto_shash *tfm = ima_shash_tfm; |
94 | int rc, i; |
95 | |
96 | if (algo < 0 || algo >= HASH_ALGO__LAST) |
97 | algo = ima_hash_algo; |
98 | |
99 | if (algo == ima_hash_algo) |
100 | return tfm; |
101 | |
102 | for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) |
103 | if (ima_algo_array[i].tfm && ima_algo_array[i].algo == algo) |
104 | return ima_algo_array[i].tfm; |
105 | |
106 | tfm = crypto_alloc_shash(alg_name: hash_algo_name[algo], type: 0, mask: 0); |
107 | if (IS_ERR(ptr: tfm)) { |
108 | rc = PTR_ERR(ptr: tfm); |
109 | pr_err("Can not allocate %s (reason: %d)\n" , |
110 | hash_algo_name[algo], rc); |
111 | } |
112 | return tfm; |
113 | } |
114 | |
115 | int __init ima_init_crypto(void) |
116 | { |
117 | enum hash_algo algo; |
118 | long rc; |
119 | int i; |
120 | |
121 | rc = ima_init_ima_crypto(); |
122 | if (rc) |
123 | return rc; |
124 | |
125 | ima_sha1_idx = -1; |
126 | ima_hash_algo_idx = -1; |
127 | |
128 | for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) { |
129 | algo = ima_tpm_chip->allocated_banks[i].crypto_id; |
130 | if (algo == HASH_ALGO_SHA1) |
131 | ima_sha1_idx = i; |
132 | |
133 | if (algo == ima_hash_algo) |
134 | ima_hash_algo_idx = i; |
135 | } |
136 | |
137 | if (ima_sha1_idx < 0) { |
138 | ima_sha1_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++; |
139 | if (ima_hash_algo == HASH_ALGO_SHA1) |
140 | ima_hash_algo_idx = ima_sha1_idx; |
141 | } |
142 | |
143 | if (ima_hash_algo_idx < 0) |
144 | ima_hash_algo_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++; |
145 | |
146 | ima_algo_array = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots, |
147 | size: sizeof(*ima_algo_array), GFP_KERNEL); |
148 | if (!ima_algo_array) { |
149 | rc = -ENOMEM; |
150 | goto out; |
151 | } |
152 | |
153 | for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) { |
154 | algo = ima_tpm_chip->allocated_banks[i].crypto_id; |
155 | ima_algo_array[i].algo = algo; |
156 | |
157 | /* unknown TPM algorithm */ |
158 | if (algo == HASH_ALGO__LAST) |
159 | continue; |
160 | |
161 | if (algo == ima_hash_algo) { |
162 | ima_algo_array[i].tfm = ima_shash_tfm; |
163 | continue; |
164 | } |
165 | |
166 | ima_algo_array[i].tfm = ima_alloc_tfm(algo); |
167 | if (IS_ERR(ptr: ima_algo_array[i].tfm)) { |
168 | if (algo == HASH_ALGO_SHA1) { |
169 | rc = PTR_ERR(ptr: ima_algo_array[i].tfm); |
170 | ima_algo_array[i].tfm = NULL; |
171 | goto out_array; |
172 | } |
173 | |
174 | ima_algo_array[i].tfm = NULL; |
175 | } |
176 | } |
177 | |
178 | if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip)) { |
179 | if (ima_hash_algo == HASH_ALGO_SHA1) { |
180 | ima_algo_array[ima_sha1_idx].tfm = ima_shash_tfm; |
181 | } else { |
182 | ima_algo_array[ima_sha1_idx].tfm = |
183 | ima_alloc_tfm(algo: HASH_ALGO_SHA1); |
184 | if (IS_ERR(ptr: ima_algo_array[ima_sha1_idx].tfm)) { |
185 | rc = PTR_ERR(ptr: ima_algo_array[ima_sha1_idx].tfm); |
186 | goto out_array; |
187 | } |
188 | } |
189 | |
190 | ima_algo_array[ima_sha1_idx].algo = HASH_ALGO_SHA1; |
191 | } |
192 | |
193 | if (ima_hash_algo_idx >= NR_BANKS(ima_tpm_chip) && |
194 | ima_hash_algo_idx != ima_sha1_idx) { |
195 | ima_algo_array[ima_hash_algo_idx].tfm = ima_shash_tfm; |
196 | ima_algo_array[ima_hash_algo_idx].algo = ima_hash_algo; |
197 | } |
198 | |
199 | return 0; |
200 | out_array: |
201 | for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) { |
202 | if (!ima_algo_array[i].tfm || |
203 | ima_algo_array[i].tfm == ima_shash_tfm) |
204 | continue; |
205 | |
206 | crypto_free_shash(tfm: ima_algo_array[i].tfm); |
207 | } |
208 | kfree(objp: ima_algo_array); |
209 | out: |
210 | crypto_free_shash(tfm: ima_shash_tfm); |
211 | return rc; |
212 | } |
213 | |
214 | static void ima_free_tfm(struct crypto_shash *tfm) |
215 | { |
216 | int i; |
217 | |
218 | if (tfm == ima_shash_tfm) |
219 | return; |
220 | |
221 | for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) |
222 | if (ima_algo_array[i].tfm == tfm) |
223 | return; |
224 | |
225 | crypto_free_shash(tfm); |
226 | } |
227 | |
228 | /** |
229 | * ima_alloc_pages() - Allocate contiguous pages. |
230 | * @max_size: Maximum amount of memory to allocate. |
231 | * @allocated_size: Returned size of actual allocation. |
232 | * @last_warn: Should the min_size allocation warn or not. |
233 | * |
234 | * Tries to do opportunistic allocation for memory first trying to allocate |
235 | * max_size amount of memory and then splitting that until zero order is |
236 | * reached. Allocation is tried without generating allocation warnings unless |
237 | * last_warn is set. Last_warn set affects only last allocation of zero order. |
238 | * |
239 | * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL) |
240 | * |
241 | * Return pointer to allocated memory, or NULL on failure. |
242 | */ |
243 | static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size, |
244 | int last_warn) |
245 | { |
246 | void *ptr; |
247 | int order = ima_maxorder; |
248 | gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY; |
249 | |
250 | if (order) |
251 | order = min(get_order(max_size), order); |
252 | |
253 | for (; order; order--) { |
254 | ptr = (void *)__get_free_pages(gfp_mask, order); |
255 | if (ptr) { |
256 | *allocated_size = PAGE_SIZE << order; |
257 | return ptr; |
258 | } |
259 | } |
260 | |
261 | /* order is zero - one page */ |
262 | |
263 | gfp_mask = GFP_KERNEL; |
264 | |
265 | if (!last_warn) |
266 | gfp_mask |= __GFP_NOWARN; |
267 | |
268 | ptr = (void *)__get_free_pages(gfp_mask, order: 0); |
269 | if (ptr) { |
270 | *allocated_size = PAGE_SIZE; |
271 | return ptr; |
272 | } |
273 | |
274 | *allocated_size = 0; |
275 | return NULL; |
276 | } |
277 | |
278 | /** |
279 | * ima_free_pages() - Free pages allocated by ima_alloc_pages(). |
280 | * @ptr: Pointer to allocated pages. |
281 | * @size: Size of allocated buffer. |
282 | */ |
283 | static void ima_free_pages(void *ptr, size_t size) |
284 | { |
285 | if (!ptr) |
286 | return; |
287 | free_pages(addr: (unsigned long)ptr, order: get_order(size)); |
288 | } |
289 | |
290 | static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo) |
291 | { |
292 | struct crypto_ahash *tfm = ima_ahash_tfm; |
293 | int rc; |
294 | |
295 | if (algo < 0 || algo >= HASH_ALGO__LAST) |
296 | algo = ima_hash_algo; |
297 | |
298 | if (algo != ima_hash_algo || !tfm) { |
299 | tfm = crypto_alloc_ahash(alg_name: hash_algo_name[algo], type: 0, mask: 0); |
300 | if (!IS_ERR(ptr: tfm)) { |
301 | if (algo == ima_hash_algo) |
302 | ima_ahash_tfm = tfm; |
303 | } else { |
304 | rc = PTR_ERR(ptr: tfm); |
305 | pr_err("Can not allocate %s (reason: %d)\n" , |
306 | hash_algo_name[algo], rc); |
307 | } |
308 | } |
309 | return tfm; |
310 | } |
311 | |
312 | static void ima_free_atfm(struct crypto_ahash *tfm) |
313 | { |
314 | if (tfm != ima_ahash_tfm) |
315 | crypto_free_ahash(tfm); |
316 | } |
317 | |
318 | static inline int ahash_wait(int err, struct crypto_wait *wait) |
319 | { |
320 | |
321 | err = crypto_wait_req(err, wait); |
322 | |
323 | if (err) |
324 | pr_crit_ratelimited("ahash calculation failed: err: %d\n" , err); |
325 | |
326 | return err; |
327 | } |
328 | |
329 | static int ima_calc_file_hash_atfm(struct file *file, |
330 | struct ima_digest_data *hash, |
331 | struct crypto_ahash *tfm) |
332 | { |
333 | loff_t i_size, offset; |
334 | char *rbuf[2] = { NULL, }; |
335 | int rc, rbuf_len, active = 0, ahash_rc = 0; |
336 | struct ahash_request *req; |
337 | struct scatterlist sg[1]; |
338 | struct crypto_wait wait; |
339 | size_t rbuf_size[2]; |
340 | |
341 | hash->length = crypto_ahash_digestsize(tfm); |
342 | |
343 | req = ahash_request_alloc(tfm, GFP_KERNEL); |
344 | if (!req) |
345 | return -ENOMEM; |
346 | |
347 | crypto_init_wait(wait: &wait); |
348 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | |
349 | CRYPTO_TFM_REQ_MAY_SLEEP, |
350 | compl: crypto_req_done, data: &wait); |
351 | |
352 | rc = ahash_wait(err: crypto_ahash_init(req), wait: &wait); |
353 | if (rc) |
354 | goto out1; |
355 | |
356 | i_size = i_size_read(inode: file_inode(f: file)); |
357 | |
358 | if (i_size == 0) |
359 | goto out2; |
360 | |
361 | /* |
362 | * Try to allocate maximum size of memory. |
363 | * Fail if even a single page cannot be allocated. |
364 | */ |
365 | rbuf[0] = ima_alloc_pages(max_size: i_size, allocated_size: &rbuf_size[0], last_warn: 1); |
366 | if (!rbuf[0]) { |
367 | rc = -ENOMEM; |
368 | goto out1; |
369 | } |
370 | |
371 | /* Only allocate one buffer if that is enough. */ |
372 | if (i_size > rbuf_size[0]) { |
373 | /* |
374 | * Try to allocate secondary buffer. If that fails fallback to |
375 | * using single buffering. Use previous memory allocation size |
376 | * as baseline for possible allocation size. |
377 | */ |
378 | rbuf[1] = ima_alloc_pages(max_size: i_size - rbuf_size[0], |
379 | allocated_size: &rbuf_size[1], last_warn: 0); |
380 | } |
381 | |
382 | for (offset = 0; offset < i_size; offset += rbuf_len) { |
383 | if (!rbuf[1] && offset) { |
384 | /* Not using two buffers, and it is not the first |
385 | * read/request, wait for the completion of the |
386 | * previous ahash_update() request. |
387 | */ |
388 | rc = ahash_wait(err: ahash_rc, wait: &wait); |
389 | if (rc) |
390 | goto out3; |
391 | } |
392 | /* read buffer */ |
393 | rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]); |
394 | rc = integrity_kernel_read(file, offset, addr: rbuf[active], |
395 | count: rbuf_len); |
396 | if (rc != rbuf_len) { |
397 | if (rc >= 0) |
398 | rc = -EINVAL; |
399 | /* |
400 | * Forward current rc, do not overwrite with return value |
401 | * from ahash_wait() |
402 | */ |
403 | ahash_wait(err: ahash_rc, wait: &wait); |
404 | goto out3; |
405 | } |
406 | |
407 | if (rbuf[1] && offset) { |
408 | /* Using two buffers, and it is not the first |
409 | * read/request, wait for the completion of the |
410 | * previous ahash_update() request. |
411 | */ |
412 | rc = ahash_wait(err: ahash_rc, wait: &wait); |
413 | if (rc) |
414 | goto out3; |
415 | } |
416 | |
417 | sg_init_one(&sg[0], rbuf[active], rbuf_len); |
418 | ahash_request_set_crypt(req, src: sg, NULL, nbytes: rbuf_len); |
419 | |
420 | ahash_rc = crypto_ahash_update(req); |
421 | |
422 | if (rbuf[1]) |
423 | active = !active; /* swap buffers, if we use two */ |
424 | } |
425 | /* wait for the last update request to complete */ |
426 | rc = ahash_wait(err: ahash_rc, wait: &wait); |
427 | out3: |
428 | ima_free_pages(ptr: rbuf[0], size: rbuf_size[0]); |
429 | ima_free_pages(ptr: rbuf[1], size: rbuf_size[1]); |
430 | out2: |
431 | if (!rc) { |
432 | ahash_request_set_crypt(req, NULL, result: hash->digest, nbytes: 0); |
433 | rc = ahash_wait(err: crypto_ahash_final(req), wait: &wait); |
434 | } |
435 | out1: |
436 | ahash_request_free(req); |
437 | return rc; |
438 | } |
439 | |
440 | static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash) |
441 | { |
442 | struct crypto_ahash *tfm; |
443 | int rc; |
444 | |
445 | tfm = ima_alloc_atfm(algo: hash->algo); |
446 | if (IS_ERR(ptr: tfm)) |
447 | return PTR_ERR(ptr: tfm); |
448 | |
449 | rc = ima_calc_file_hash_atfm(file, hash, tfm); |
450 | |
451 | ima_free_atfm(tfm); |
452 | |
453 | return rc; |
454 | } |
455 | |
456 | static int ima_calc_file_hash_tfm(struct file *file, |
457 | struct ima_digest_data *hash, |
458 | struct crypto_shash *tfm) |
459 | { |
460 | loff_t i_size, offset = 0; |
461 | char *rbuf; |
462 | int rc; |
463 | SHASH_DESC_ON_STACK(shash, tfm); |
464 | |
465 | shash->tfm = tfm; |
466 | |
467 | hash->length = crypto_shash_digestsize(tfm); |
468 | |
469 | rc = crypto_shash_init(desc: shash); |
470 | if (rc != 0) |
471 | return rc; |
472 | |
473 | i_size = i_size_read(inode: file_inode(f: file)); |
474 | |
475 | if (i_size == 0) |
476 | goto out; |
477 | |
478 | rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); |
479 | if (!rbuf) |
480 | return -ENOMEM; |
481 | |
482 | while (offset < i_size) { |
483 | int rbuf_len; |
484 | |
485 | rbuf_len = integrity_kernel_read(file, offset, addr: rbuf, PAGE_SIZE); |
486 | if (rbuf_len < 0) { |
487 | rc = rbuf_len; |
488 | break; |
489 | } |
490 | if (rbuf_len == 0) { /* unexpected EOF */ |
491 | rc = -EINVAL; |
492 | break; |
493 | } |
494 | offset += rbuf_len; |
495 | |
496 | rc = crypto_shash_update(desc: shash, data: rbuf, len: rbuf_len); |
497 | if (rc) |
498 | break; |
499 | } |
500 | kfree(objp: rbuf); |
501 | out: |
502 | if (!rc) |
503 | rc = crypto_shash_final(desc: shash, out: hash->digest); |
504 | return rc; |
505 | } |
506 | |
507 | static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash) |
508 | { |
509 | struct crypto_shash *tfm; |
510 | int rc; |
511 | |
512 | tfm = ima_alloc_tfm(algo: hash->algo); |
513 | if (IS_ERR(ptr: tfm)) |
514 | return PTR_ERR(ptr: tfm); |
515 | |
516 | rc = ima_calc_file_hash_tfm(file, hash, tfm); |
517 | |
518 | ima_free_tfm(tfm); |
519 | |
520 | return rc; |
521 | } |
522 | |
523 | /* |
524 | * ima_calc_file_hash - calculate file hash |
525 | * |
526 | * Asynchronous hash (ahash) allows using HW acceleration for calculating |
527 | * a hash. ahash performance varies for different data sizes on different |
528 | * crypto accelerators. shash performance might be better for smaller files. |
529 | * The 'ima.ahash_minsize' module parameter allows specifying the best |
530 | * minimum file size for using ahash on the system. |
531 | * |
532 | * If the ima.ahash_minsize parameter is not specified, this function uses |
533 | * shash for the hash calculation. If ahash fails, it falls back to using |
534 | * shash. |
535 | */ |
536 | int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) |
537 | { |
538 | loff_t i_size; |
539 | int rc; |
540 | struct file *f = file; |
541 | bool new_file_instance = false; |
542 | |
543 | /* |
544 | * For consistency, fail file's opened with the O_DIRECT flag on |
545 | * filesystems mounted with/without DAX option. |
546 | */ |
547 | if (file->f_flags & O_DIRECT) { |
548 | hash->length = hash_digest_size[ima_hash_algo]; |
549 | hash->algo = ima_hash_algo; |
550 | return -EINVAL; |
551 | } |
552 | |
553 | /* Open a new file instance in O_RDONLY if we cannot read */ |
554 | if (!(file->f_mode & FMODE_READ)) { |
555 | int flags = file->f_flags & ~(O_WRONLY | O_APPEND | |
556 | O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL); |
557 | flags |= O_RDONLY; |
558 | f = dentry_open(path: &file->f_path, flags, creds: file->f_cred); |
559 | if (IS_ERR(ptr: f)) |
560 | return PTR_ERR(ptr: f); |
561 | |
562 | new_file_instance = true; |
563 | } |
564 | |
565 | i_size = i_size_read(inode: file_inode(f)); |
566 | |
567 | if (ima_ahash_minsize && i_size >= ima_ahash_minsize) { |
568 | rc = ima_calc_file_ahash(file: f, hash); |
569 | if (!rc) |
570 | goto out; |
571 | } |
572 | |
573 | rc = ima_calc_file_shash(file: f, hash); |
574 | out: |
575 | if (new_file_instance) |
576 | fput(f); |
577 | return rc; |
578 | } |
579 | |
580 | /* |
581 | * Calculate the hash of template data |
582 | */ |
583 | static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data, |
584 | struct ima_template_entry *entry, |
585 | int tfm_idx) |
586 | { |
587 | SHASH_DESC_ON_STACK(shash, ima_algo_array[tfm_idx].tfm); |
588 | struct ima_template_desc *td = entry->template_desc; |
589 | int num_fields = entry->template_desc->num_fields; |
590 | int rc, i; |
591 | |
592 | shash->tfm = ima_algo_array[tfm_idx].tfm; |
593 | |
594 | rc = crypto_shash_init(desc: shash); |
595 | if (rc != 0) |
596 | return rc; |
597 | |
598 | for (i = 0; i < num_fields; i++) { |
599 | u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 }; |
600 | u8 *data_to_hash = field_data[i].data; |
601 | u32 datalen = field_data[i].len; |
602 | u32 datalen_to_hash = !ima_canonical_fmt ? |
603 | datalen : (__force u32)cpu_to_le32(datalen); |
604 | |
605 | if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) { |
606 | rc = crypto_shash_update(desc: shash, |
607 | data: (const u8 *) &datalen_to_hash, |
608 | len: sizeof(datalen_to_hash)); |
609 | if (rc) |
610 | break; |
611 | } else if (strcmp(td->fields[i]->field_id, "n" ) == 0) { |
612 | memcpy(buffer, data_to_hash, datalen); |
613 | data_to_hash = buffer; |
614 | datalen = IMA_EVENT_NAME_LEN_MAX + 1; |
615 | } |
616 | rc = crypto_shash_update(desc: shash, data: data_to_hash, len: datalen); |
617 | if (rc) |
618 | break; |
619 | } |
620 | |
621 | if (!rc) |
622 | rc = crypto_shash_final(desc: shash, out: entry->digests[tfm_idx].digest); |
623 | |
624 | return rc; |
625 | } |
626 | |
627 | int ima_calc_field_array_hash(struct ima_field_data *field_data, |
628 | struct ima_template_entry *entry) |
629 | { |
630 | u16 alg_id; |
631 | int rc, i; |
632 | |
633 | rc = ima_calc_field_array_hash_tfm(field_data, entry, tfm_idx: ima_sha1_idx); |
634 | if (rc) |
635 | return rc; |
636 | |
637 | entry->digests[ima_sha1_idx].alg_id = TPM_ALG_SHA1; |
638 | |
639 | for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) { |
640 | if (i == ima_sha1_idx) |
641 | continue; |
642 | |
643 | if (i < NR_BANKS(ima_tpm_chip)) { |
644 | alg_id = ima_tpm_chip->allocated_banks[i].alg_id; |
645 | entry->digests[i].alg_id = alg_id; |
646 | } |
647 | |
648 | /* for unmapped TPM algorithms digest is still a padded SHA1 */ |
649 | if (!ima_algo_array[i].tfm) { |
650 | memcpy(entry->digests[i].digest, |
651 | entry->digests[ima_sha1_idx].digest, |
652 | TPM_DIGEST_SIZE); |
653 | continue; |
654 | } |
655 | |
656 | rc = ima_calc_field_array_hash_tfm(field_data, entry, tfm_idx: i); |
657 | if (rc) |
658 | return rc; |
659 | } |
660 | return rc; |
661 | } |
662 | |
663 | static int calc_buffer_ahash_atfm(const void *buf, loff_t len, |
664 | struct ima_digest_data *hash, |
665 | struct crypto_ahash *tfm) |
666 | { |
667 | struct ahash_request *req; |
668 | struct scatterlist sg; |
669 | struct crypto_wait wait; |
670 | int rc, ahash_rc = 0; |
671 | |
672 | hash->length = crypto_ahash_digestsize(tfm); |
673 | |
674 | req = ahash_request_alloc(tfm, GFP_KERNEL); |
675 | if (!req) |
676 | return -ENOMEM; |
677 | |
678 | crypto_init_wait(wait: &wait); |
679 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | |
680 | CRYPTO_TFM_REQ_MAY_SLEEP, |
681 | compl: crypto_req_done, data: &wait); |
682 | |
683 | rc = ahash_wait(err: crypto_ahash_init(req), wait: &wait); |
684 | if (rc) |
685 | goto out; |
686 | |
687 | sg_init_one(&sg, buf, len); |
688 | ahash_request_set_crypt(req, src: &sg, NULL, nbytes: len); |
689 | |
690 | ahash_rc = crypto_ahash_update(req); |
691 | |
692 | /* wait for the update request to complete */ |
693 | rc = ahash_wait(err: ahash_rc, wait: &wait); |
694 | if (!rc) { |
695 | ahash_request_set_crypt(req, NULL, result: hash->digest, nbytes: 0); |
696 | rc = ahash_wait(err: crypto_ahash_final(req), wait: &wait); |
697 | } |
698 | out: |
699 | ahash_request_free(req); |
700 | return rc; |
701 | } |
702 | |
703 | static int calc_buffer_ahash(const void *buf, loff_t len, |
704 | struct ima_digest_data *hash) |
705 | { |
706 | struct crypto_ahash *tfm; |
707 | int rc; |
708 | |
709 | tfm = ima_alloc_atfm(algo: hash->algo); |
710 | if (IS_ERR(ptr: tfm)) |
711 | return PTR_ERR(ptr: tfm); |
712 | |
713 | rc = calc_buffer_ahash_atfm(buf, len, hash, tfm); |
714 | |
715 | ima_free_atfm(tfm); |
716 | |
717 | return rc; |
718 | } |
719 | |
720 | static int calc_buffer_shash_tfm(const void *buf, loff_t size, |
721 | struct ima_digest_data *hash, |
722 | struct crypto_shash *tfm) |
723 | { |
724 | SHASH_DESC_ON_STACK(shash, tfm); |
725 | unsigned int len; |
726 | int rc; |
727 | |
728 | shash->tfm = tfm; |
729 | |
730 | hash->length = crypto_shash_digestsize(tfm); |
731 | |
732 | rc = crypto_shash_init(desc: shash); |
733 | if (rc != 0) |
734 | return rc; |
735 | |
736 | while (size) { |
737 | len = size < PAGE_SIZE ? size : PAGE_SIZE; |
738 | rc = crypto_shash_update(desc: shash, data: buf, len); |
739 | if (rc) |
740 | break; |
741 | buf += len; |
742 | size -= len; |
743 | } |
744 | |
745 | if (!rc) |
746 | rc = crypto_shash_final(desc: shash, out: hash->digest); |
747 | return rc; |
748 | } |
749 | |
750 | static int calc_buffer_shash(const void *buf, loff_t len, |
751 | struct ima_digest_data *hash) |
752 | { |
753 | struct crypto_shash *tfm; |
754 | int rc; |
755 | |
756 | tfm = ima_alloc_tfm(algo: hash->algo); |
757 | if (IS_ERR(ptr: tfm)) |
758 | return PTR_ERR(ptr: tfm); |
759 | |
760 | rc = calc_buffer_shash_tfm(buf, size: len, hash, tfm); |
761 | |
762 | ima_free_tfm(tfm); |
763 | return rc; |
764 | } |
765 | |
766 | int ima_calc_buffer_hash(const void *buf, loff_t len, |
767 | struct ima_digest_data *hash) |
768 | { |
769 | int rc; |
770 | |
771 | if (ima_ahash_minsize && len >= ima_ahash_minsize) { |
772 | rc = calc_buffer_ahash(buf, len, hash); |
773 | if (!rc) |
774 | return 0; |
775 | } |
776 | |
777 | return calc_buffer_shash(buf, len, hash); |
778 | } |
779 | |
780 | static void ima_pcrread(u32 idx, struct tpm_digest *d) |
781 | { |
782 | if (!ima_tpm_chip) |
783 | return; |
784 | |
785 | if (tpm_pcr_read(chip: ima_tpm_chip, pcr_idx: idx, digest: d) != 0) |
786 | pr_err("Error Communicating to TPM chip\n" ); |
787 | } |
788 | |
789 | /* |
790 | * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With |
791 | * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with |
792 | * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks, |
793 | * allowing firmware to configure and enable different banks. |
794 | * |
795 | * Knowing which TPM bank is read to calculate the boot_aggregate digest |
796 | * needs to be conveyed to a verifier. For this reason, use the same |
797 | * hash algorithm for reading the TPM PCRs as for calculating the boot |
798 | * aggregate digest as stored in the measurement list. |
799 | */ |
800 | static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id, |
801 | struct crypto_shash *tfm) |
802 | { |
803 | struct tpm_digest d = { .alg_id = alg_id, .digest = {0} }; |
804 | int rc; |
805 | u32 i; |
806 | SHASH_DESC_ON_STACK(shash, tfm); |
807 | |
808 | shash->tfm = tfm; |
809 | |
810 | pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n" , |
811 | d.alg_id); |
812 | |
813 | rc = crypto_shash_init(desc: shash); |
814 | if (rc != 0) |
815 | return rc; |
816 | |
817 | /* cumulative digest over TPM registers 0-7 */ |
818 | for (i = TPM_PCR0; i < TPM_PCR8; i++) { |
819 | ima_pcrread(idx: i, d: &d); |
820 | /* now accumulate with current aggregate */ |
821 | rc = crypto_shash_update(desc: shash, data: d.digest, |
822 | len: crypto_shash_digestsize(tfm)); |
823 | if (rc != 0) |
824 | return rc; |
825 | } |
826 | /* |
827 | * Extend cumulative digest over TPM registers 8-9, which contain |
828 | * measurement for the kernel command line (reg. 8) and image (reg. 9) |
829 | * in a typical PCR allocation. Registers 8-9 are only included in |
830 | * non-SHA1 boot_aggregate digests to avoid ambiguity. |
831 | */ |
832 | if (alg_id != TPM_ALG_SHA1) { |
833 | for (i = TPM_PCR8; i < TPM_PCR10; i++) { |
834 | ima_pcrread(idx: i, d: &d); |
835 | rc = crypto_shash_update(desc: shash, data: d.digest, |
836 | len: crypto_shash_digestsize(tfm)); |
837 | } |
838 | } |
839 | if (!rc) |
840 | crypto_shash_final(desc: shash, out: digest); |
841 | return rc; |
842 | } |
843 | |
844 | int ima_calc_boot_aggregate(struct ima_digest_data *hash) |
845 | { |
846 | struct crypto_shash *tfm; |
847 | u16 crypto_id, alg_id; |
848 | int rc, i, bank_idx = -1; |
849 | |
850 | for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) { |
851 | crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id; |
852 | if (crypto_id == hash->algo) { |
853 | bank_idx = i; |
854 | break; |
855 | } |
856 | |
857 | if (crypto_id == HASH_ALGO_SHA256) |
858 | bank_idx = i; |
859 | |
860 | if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1) |
861 | bank_idx = i; |
862 | } |
863 | |
864 | if (bank_idx == -1) { |
865 | pr_err("No suitable TPM algorithm for boot aggregate\n" ); |
866 | return 0; |
867 | } |
868 | |
869 | hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id; |
870 | |
871 | tfm = ima_alloc_tfm(algo: hash->algo); |
872 | if (IS_ERR(ptr: tfm)) |
873 | return PTR_ERR(ptr: tfm); |
874 | |
875 | hash->length = crypto_shash_digestsize(tfm); |
876 | alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id; |
877 | rc = ima_calc_boot_aggregate_tfm(digest: hash->digest, alg_id, tfm); |
878 | |
879 | ima_free_tfm(tfm); |
880 | |
881 | return rc; |
882 | } |
883 | |