blob: e15f8d37d1f2826615751672f5e4d78d2d5c1b74 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (C) 2005,2006,2007,2008 IBM Corporation
4 *
5 * Authors:
6 * Mimi Zohar <zohar@us.ibm.com>
7 * Kylene Hall <kjhall@us.ibm.com>
8 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009 * File: ima_crypto.c
10 * Calculates md5/sha1 file hash, template hash, boot-aggreate hash
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/kernel.h>
16#include <linux/moduleparam.h>
17#include <linux/ratelimit.h>
18#include <linux/file.h>
19#include <linux/crypto.h>
20#include <linux/scatterlist.h>
21#include <linux/err.h>
22#include <linux/slab.h>
23#include <crypto/hash.h>
24
25#include "ima.h"
26
27/* minimum file size for ahash use */
28static unsigned long ima_ahash_minsize;
29module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
30MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");
31
32/* default is 0 - 1 page. */
33static int ima_maxorder;
34static unsigned int ima_bufsize = PAGE_SIZE;
35
36static int param_set_bufsize(const char *val, const struct kernel_param *kp)
37{
38 unsigned long long size;
39 int order;
40
41 size = memparse(val, NULL);
42 order = get_order(size);
43 if (order >= MAX_ORDER)
44 return -EINVAL;
45 ima_maxorder = order;
46 ima_bufsize = PAGE_SIZE << order;
47 return 0;
48}
49
50static const struct kernel_param_ops param_ops_bufsize = {
51 .set = param_set_bufsize,
52 .get = param_get_uint,
53};
54#define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
55
56module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644);
57MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
58
59static struct crypto_shash *ima_shash_tfm;
60static struct crypto_ahash *ima_ahash_tfm;
61
62int __init ima_init_crypto(void)
63{
64 long rc;
65
66 ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
67 if (IS_ERR(ima_shash_tfm)) {
68 rc = PTR_ERR(ima_shash_tfm);
69 pr_err("Can not allocate %s (reason: %ld)\n",
70 hash_algo_name[ima_hash_algo], rc);
71 return rc;
72 }
73 pr_info("Allocated hash algorithm: %s\n",
74 hash_algo_name[ima_hash_algo]);
75 return 0;
76}
77
78static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
79{
80 struct crypto_shash *tfm = ima_shash_tfm;
81 int rc;
82
83 if (algo < 0 || algo >= HASH_ALGO__LAST)
84 algo = ima_hash_algo;
85
86 if (algo != ima_hash_algo) {
87 tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
88 if (IS_ERR(tfm)) {
89 rc = PTR_ERR(tfm);
90 pr_err("Can not allocate %s (reason: %d)\n",
91 hash_algo_name[algo], rc);
92 }
93 }
94 return tfm;
95}
96
97static void ima_free_tfm(struct crypto_shash *tfm)
98{
99 if (tfm != ima_shash_tfm)
100 crypto_free_shash(tfm);
101}
102
103/**
104 * ima_alloc_pages() - Allocate contiguous pages.
105 * @max_size: Maximum amount of memory to allocate.
106 * @allocated_size: Returned size of actual allocation.
107 * @last_warn: Should the min_size allocation warn or not.
108 *
109 * Tries to do opportunistic allocation for memory first trying to allocate
110 * max_size amount of memory and then splitting that until zero order is
111 * reached. Allocation is tried without generating allocation warnings unless
112 * last_warn is set. Last_warn set affects only last allocation of zero order.
113 *
114 * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL)
115 *
116 * Return pointer to allocated memory, or NULL on failure.
117 */
118static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
119 int last_warn)
120{
121 void *ptr;
122 int order = ima_maxorder;
123 gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
124
125 if (order)
126 order = min(get_order(max_size), order);
127
128 for (; order; order--) {
129 ptr = (void *)__get_free_pages(gfp_mask, order);
130 if (ptr) {
131 *allocated_size = PAGE_SIZE << order;
132 return ptr;
133 }
134 }
135
136 /* order is zero - one page */
137
138 gfp_mask = GFP_KERNEL;
139
140 if (!last_warn)
141 gfp_mask |= __GFP_NOWARN;
142
143 ptr = (void *)__get_free_pages(gfp_mask, 0);
144 if (ptr) {
145 *allocated_size = PAGE_SIZE;
146 return ptr;
147 }
148
149 *allocated_size = 0;
150 return NULL;
151}
152
153/**
154 * ima_free_pages() - Free pages allocated by ima_alloc_pages().
155 * @ptr: Pointer to allocated pages.
156 * @size: Size of allocated buffer.
157 */
158static void ima_free_pages(void *ptr, size_t size)
159{
160 if (!ptr)
161 return;
162 free_pages((unsigned long)ptr, get_order(size));
163}
164
165static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
166{
167 struct crypto_ahash *tfm = ima_ahash_tfm;
168 int rc;
169
170 if (algo < 0 || algo >= HASH_ALGO__LAST)
171 algo = ima_hash_algo;
172
173 if (algo != ima_hash_algo || !tfm) {
174 tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
175 if (!IS_ERR(tfm)) {
176 if (algo == ima_hash_algo)
177 ima_ahash_tfm = tfm;
178 } else {
179 rc = PTR_ERR(tfm);
180 pr_err("Can not allocate %s (reason: %d)\n",
181 hash_algo_name[algo], rc);
182 }
183 }
184 return tfm;
185}
186
187static void ima_free_atfm(struct crypto_ahash *tfm)
188{
189 if (tfm != ima_ahash_tfm)
190 crypto_free_ahash(tfm);
191}
192
193static inline int ahash_wait(int err, struct crypto_wait *wait)
194{
195
196 err = crypto_wait_req(err, wait);
197
198 if (err)
199 pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
200
201 return err;
202}
203
204static int ima_calc_file_hash_atfm(struct file *file,
205 struct ima_digest_data *hash,
206 struct crypto_ahash *tfm)
207{
208 loff_t i_size, offset;
209 char *rbuf[2] = { NULL, };
210 int rc, rbuf_len, active = 0, ahash_rc = 0;
211 struct ahash_request *req;
212 struct scatterlist sg[1];
213 struct crypto_wait wait;
214 size_t rbuf_size[2];
215
216 hash->length = crypto_ahash_digestsize(tfm);
217
218 req = ahash_request_alloc(tfm, GFP_KERNEL);
219 if (!req)
220 return -ENOMEM;
221
222 crypto_init_wait(&wait);
223 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
224 CRYPTO_TFM_REQ_MAY_SLEEP,
225 crypto_req_done, &wait);
226
227 rc = ahash_wait(crypto_ahash_init(req), &wait);
228 if (rc)
229 goto out1;
230
231 i_size = i_size_read(file_inode(file));
232
233 if (i_size == 0)
234 goto out2;
235
236 /*
237 * Try to allocate maximum size of memory.
238 * Fail if even a single page cannot be allocated.
239 */
240 rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
241 if (!rbuf[0]) {
242 rc = -ENOMEM;
243 goto out1;
244 }
245
246 /* Only allocate one buffer if that is enough. */
247 if (i_size > rbuf_size[0]) {
248 /*
249 * Try to allocate secondary buffer. If that fails fallback to
250 * using single buffering. Use previous memory allocation size
251 * as baseline for possible allocation size.
252 */
253 rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
254 &rbuf_size[1], 0);
255 }
256
257 for (offset = 0; offset < i_size; offset += rbuf_len) {
258 if (!rbuf[1] && offset) {
259 /* Not using two buffers, and it is not the first
260 * read/request, wait for the completion of the
261 * previous ahash_update() request.
262 */
263 rc = ahash_wait(ahash_rc, &wait);
264 if (rc)
265 goto out3;
266 }
267 /* read buffer */
268 rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
269 rc = integrity_kernel_read(file, offset, rbuf[active],
270 rbuf_len);
David Brazdil0f672f62019-12-10 10:32:29 +0000271 if (rc != rbuf_len) {
272 if (rc >= 0)
273 rc = -EINVAL;
274 /*
275 * Forward current rc, do not overwrite with return value
276 * from ahash_wait()
277 */
278 ahash_wait(ahash_rc, &wait);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000279 goto out3;
David Brazdil0f672f62019-12-10 10:32:29 +0000280 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000281
282 if (rbuf[1] && offset) {
283 /* Using two buffers, and it is not the first
284 * read/request, wait for the completion of the
285 * previous ahash_update() request.
286 */
287 rc = ahash_wait(ahash_rc, &wait);
288 if (rc)
289 goto out3;
290 }
291
292 sg_init_one(&sg[0], rbuf[active], rbuf_len);
293 ahash_request_set_crypt(req, sg, NULL, rbuf_len);
294
295 ahash_rc = crypto_ahash_update(req);
296
297 if (rbuf[1])
298 active = !active; /* swap buffers, if we use two */
299 }
300 /* wait for the last update request to complete */
301 rc = ahash_wait(ahash_rc, &wait);
302out3:
303 ima_free_pages(rbuf[0], rbuf_size[0]);
304 ima_free_pages(rbuf[1], rbuf_size[1]);
305out2:
306 if (!rc) {
307 ahash_request_set_crypt(req, NULL, hash->digest, 0);
308 rc = ahash_wait(crypto_ahash_final(req), &wait);
309 }
310out1:
311 ahash_request_free(req);
312 return rc;
313}
314
315static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
316{
317 struct crypto_ahash *tfm;
318 int rc;
319
320 tfm = ima_alloc_atfm(hash->algo);
321 if (IS_ERR(tfm))
322 return PTR_ERR(tfm);
323
324 rc = ima_calc_file_hash_atfm(file, hash, tfm);
325
326 ima_free_atfm(tfm);
327
328 return rc;
329}
330
331static int ima_calc_file_hash_tfm(struct file *file,
332 struct ima_digest_data *hash,
333 struct crypto_shash *tfm)
334{
335 loff_t i_size, offset = 0;
336 char *rbuf;
337 int rc;
338 SHASH_DESC_ON_STACK(shash, tfm);
339
340 shash->tfm = tfm;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000341
342 hash->length = crypto_shash_digestsize(tfm);
343
344 rc = crypto_shash_init(shash);
345 if (rc != 0)
346 return rc;
347
348 i_size = i_size_read(file_inode(file));
349
350 if (i_size == 0)
351 goto out;
352
353 rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
354 if (!rbuf)
355 return -ENOMEM;
356
357 while (offset < i_size) {
358 int rbuf_len;
359
360 rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
361 if (rbuf_len < 0) {
362 rc = rbuf_len;
363 break;
364 }
365 if (rbuf_len == 0)
366 break;
367 offset += rbuf_len;
368
369 rc = crypto_shash_update(shash, rbuf, rbuf_len);
370 if (rc)
371 break;
372 }
373 kfree(rbuf);
374out:
375 if (!rc)
376 rc = crypto_shash_final(shash, hash->digest);
377 return rc;
378}
379
380static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
381{
382 struct crypto_shash *tfm;
383 int rc;
384
385 tfm = ima_alloc_tfm(hash->algo);
386 if (IS_ERR(tfm))
387 return PTR_ERR(tfm);
388
389 rc = ima_calc_file_hash_tfm(file, hash, tfm);
390
391 ima_free_tfm(tfm);
392
393 return rc;
394}
395
396/*
397 * ima_calc_file_hash - calculate file hash
398 *
399 * Asynchronous hash (ahash) allows using HW acceleration for calculating
400 * a hash. ahash performance varies for different data sizes on different
401 * crypto accelerators. shash performance might be better for smaller files.
402 * The 'ima.ahash_minsize' module parameter allows specifying the best
403 * minimum file size for using ahash on the system.
404 *
405 * If the ima.ahash_minsize parameter is not specified, this function uses
406 * shash for the hash calculation. If ahash fails, it falls back to using
407 * shash.
408 */
409int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
410{
411 loff_t i_size;
412 int rc;
413 struct file *f = file;
Olivier Deprez0e641232021-09-23 10:07:05 +0200414 bool new_file_instance = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000415
416 /*
417 * For consistency, fail file's opened with the O_DIRECT flag on
418 * filesystems mounted with/without DAX option.
419 */
420 if (file->f_flags & O_DIRECT) {
421 hash->length = hash_digest_size[ima_hash_algo];
422 hash->algo = ima_hash_algo;
423 return -EINVAL;
424 }
425
426 /* Open a new file instance in O_RDONLY if we cannot read */
427 if (!(file->f_mode & FMODE_READ)) {
428 int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
429 O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
430 flags |= O_RDONLY;
431 f = dentry_open(&file->f_path, flags, file->f_cred);
Olivier Deprez0e641232021-09-23 10:07:05 +0200432 if (IS_ERR(f))
433 return PTR_ERR(f);
434
435 new_file_instance = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000436 }
437
438 i_size = i_size_read(file_inode(f));
439
440 if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
441 rc = ima_calc_file_ahash(f, hash);
442 if (!rc)
443 goto out;
444 }
445
446 rc = ima_calc_file_shash(f, hash);
447out:
448 if (new_file_instance)
449 fput(f);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000450 return rc;
451}
452
453/*
454 * Calculate the hash of template data
455 */
456static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
457 struct ima_template_desc *td,
458 int num_fields,
459 struct ima_digest_data *hash,
460 struct crypto_shash *tfm)
461{
462 SHASH_DESC_ON_STACK(shash, tfm);
463 int rc, i;
464
465 shash->tfm = tfm;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000466
467 hash->length = crypto_shash_digestsize(tfm);
468
469 rc = crypto_shash_init(shash);
470 if (rc != 0)
471 return rc;
472
473 for (i = 0; i < num_fields; i++) {
474 u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
475 u8 *data_to_hash = field_data[i].data;
476 u32 datalen = field_data[i].len;
477 u32 datalen_to_hash =
478 !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);
479
480 if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
481 rc = crypto_shash_update(shash,
482 (const u8 *) &datalen_to_hash,
483 sizeof(datalen_to_hash));
484 if (rc)
485 break;
486 } else if (strcmp(td->fields[i]->field_id, "n") == 0) {
487 memcpy(buffer, data_to_hash, datalen);
488 data_to_hash = buffer;
489 datalen = IMA_EVENT_NAME_LEN_MAX + 1;
490 }
491 rc = crypto_shash_update(shash, data_to_hash, datalen);
492 if (rc)
493 break;
494 }
495
496 if (!rc)
497 rc = crypto_shash_final(shash, hash->digest);
498
499 return rc;
500}
501
502int ima_calc_field_array_hash(struct ima_field_data *field_data,
503 struct ima_template_desc *desc, int num_fields,
504 struct ima_digest_data *hash)
505{
506 struct crypto_shash *tfm;
507 int rc;
508
509 tfm = ima_alloc_tfm(hash->algo);
510 if (IS_ERR(tfm))
511 return PTR_ERR(tfm);
512
513 rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
514 hash, tfm);
515
516 ima_free_tfm(tfm);
517
518 return rc;
519}
520
521static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
522 struct ima_digest_data *hash,
523 struct crypto_ahash *tfm)
524{
525 struct ahash_request *req;
526 struct scatterlist sg;
527 struct crypto_wait wait;
528 int rc, ahash_rc = 0;
529
530 hash->length = crypto_ahash_digestsize(tfm);
531
532 req = ahash_request_alloc(tfm, GFP_KERNEL);
533 if (!req)
534 return -ENOMEM;
535
536 crypto_init_wait(&wait);
537 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
538 CRYPTO_TFM_REQ_MAY_SLEEP,
539 crypto_req_done, &wait);
540
541 rc = ahash_wait(crypto_ahash_init(req), &wait);
542 if (rc)
543 goto out;
544
545 sg_init_one(&sg, buf, len);
546 ahash_request_set_crypt(req, &sg, NULL, len);
547
548 ahash_rc = crypto_ahash_update(req);
549
550 /* wait for the update request to complete */
551 rc = ahash_wait(ahash_rc, &wait);
552 if (!rc) {
553 ahash_request_set_crypt(req, NULL, hash->digest, 0);
554 rc = ahash_wait(crypto_ahash_final(req), &wait);
555 }
556out:
557 ahash_request_free(req);
558 return rc;
559}
560
561static int calc_buffer_ahash(const void *buf, loff_t len,
562 struct ima_digest_data *hash)
563{
564 struct crypto_ahash *tfm;
565 int rc;
566
567 tfm = ima_alloc_atfm(hash->algo);
568 if (IS_ERR(tfm))
569 return PTR_ERR(tfm);
570
571 rc = calc_buffer_ahash_atfm(buf, len, hash, tfm);
572
573 ima_free_atfm(tfm);
574
575 return rc;
576}
577
578static int calc_buffer_shash_tfm(const void *buf, loff_t size,
579 struct ima_digest_data *hash,
580 struct crypto_shash *tfm)
581{
582 SHASH_DESC_ON_STACK(shash, tfm);
583 unsigned int len;
584 int rc;
585
586 shash->tfm = tfm;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000587
588 hash->length = crypto_shash_digestsize(tfm);
589
590 rc = crypto_shash_init(shash);
591 if (rc != 0)
592 return rc;
593
594 while (size) {
595 len = size < PAGE_SIZE ? size : PAGE_SIZE;
596 rc = crypto_shash_update(shash, buf, len);
597 if (rc)
598 break;
599 buf += len;
600 size -= len;
601 }
602
603 if (!rc)
604 rc = crypto_shash_final(shash, hash->digest);
605 return rc;
606}
607
608static int calc_buffer_shash(const void *buf, loff_t len,
609 struct ima_digest_data *hash)
610{
611 struct crypto_shash *tfm;
612 int rc;
613
614 tfm = ima_alloc_tfm(hash->algo);
615 if (IS_ERR(tfm))
616 return PTR_ERR(tfm);
617
618 rc = calc_buffer_shash_tfm(buf, len, hash, tfm);
619
620 ima_free_tfm(tfm);
621 return rc;
622}
623
624int ima_calc_buffer_hash(const void *buf, loff_t len,
625 struct ima_digest_data *hash)
626{
627 int rc;
628
629 if (ima_ahash_minsize && len >= ima_ahash_minsize) {
630 rc = calc_buffer_ahash(buf, len, hash);
631 if (!rc)
632 return 0;
633 }
634
635 return calc_buffer_shash(buf, len, hash);
636}
637
Olivier Deprez0e641232021-09-23 10:07:05 +0200638static void ima_pcrread(u32 idx, struct tpm_digest *d)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000639{
640 if (!ima_tpm_chip)
641 return;
642
David Brazdil0f672f62019-12-10 10:32:29 +0000643 if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000644 pr_err("Error Communicating to TPM chip\n");
645}
646
647/*
Olivier Deprez0e641232021-09-23 10:07:05 +0200648 * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With
649 * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with
650 * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks,
651 * allowing firmware to configure and enable different banks.
652 *
653 * Knowing which TPM bank is read to calculate the boot_aggregate digest
654 * needs to be conveyed to a verifier. For this reason, use the same
655 * hash algorithm for reading the TPM PCRs as for calculating the boot
656 * aggregate digest as stored in the measurement list.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000657 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200658static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
659 struct crypto_shash *tfm)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000660{
Olivier Deprez0e641232021-09-23 10:07:05 +0200661 struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
David Brazdil0f672f62019-12-10 10:32:29 +0000662 int rc;
663 u32 i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000664 SHASH_DESC_ON_STACK(shash, tfm);
665
666 shash->tfm = tfm;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000667
Olivier Deprez0e641232021-09-23 10:07:05 +0200668 pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
669 d.alg_id);
670
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000671 rc = crypto_shash_init(shash);
672 if (rc != 0)
673 return rc;
674
Olivier Deprez0e641232021-09-23 10:07:05 +0200675 /* cumulative digest over TPM registers 0-7 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000676 for (i = TPM_PCR0; i < TPM_PCR8; i++) {
David Brazdil0f672f62019-12-10 10:32:29 +0000677 ima_pcrread(i, &d);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000678 /* now accumulate with current aggregate */
Olivier Deprez0e641232021-09-23 10:07:05 +0200679 rc = crypto_shash_update(shash, d.digest,
680 crypto_shash_digestsize(tfm));
681 if (rc != 0)
682 return rc;
683 }
684 /*
685 * Extend cumulative digest over TPM registers 8-9, which contain
686 * measurement for the kernel command line (reg. 8) and image (reg. 9)
687 * in a typical PCR allocation. Registers 8-9 are only included in
688 * non-SHA1 boot_aggregate digests to avoid ambiguity.
689 */
690 if (alg_id != TPM_ALG_SHA1) {
691 for (i = TPM_PCR8; i < TPM_PCR10; i++) {
692 ima_pcrread(i, &d);
693 rc = crypto_shash_update(shash, d.digest,
694 crypto_shash_digestsize(tfm));
695 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000696 }
697 if (!rc)
698 crypto_shash_final(shash, digest);
699 return rc;
700}
701
Olivier Deprez0e641232021-09-23 10:07:05 +0200702int ima_calc_boot_aggregate(struct ima_digest_data *hash)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000703{
704 struct crypto_shash *tfm;
Olivier Deprez0e641232021-09-23 10:07:05 +0200705 u16 crypto_id, alg_id;
706 int rc, i, bank_idx = -1;
707
708 for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
709 crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
710 if (crypto_id == hash->algo) {
711 bank_idx = i;
712 break;
713 }
714
715 if (crypto_id == HASH_ALGO_SHA256)
716 bank_idx = i;
717
718 if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1)
719 bank_idx = i;
720 }
721
722 if (bank_idx == -1) {
723 pr_err("No suitable TPM algorithm for boot aggregate\n");
724 return 0;
725 }
726
727 hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000728
729 tfm = ima_alloc_tfm(hash->algo);
730 if (IS_ERR(tfm))
731 return PTR_ERR(tfm);
732
733 hash->length = crypto_shash_digestsize(tfm);
Olivier Deprez0e641232021-09-23 10:07:05 +0200734 alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id;
735 rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000736
737 ima_free_tfm(tfm);
738
739 return rc;
740}