1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2005,2006,2007,2008 IBM Corporation
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors:
6*4882a593Smuzhiyun * Serge Hallyn <serue@us.ibm.com>
7*4882a593Smuzhiyun * Reiner Sailer <sailer@watson.ibm.com>
8*4882a593Smuzhiyun * Mimi Zohar <zohar@us.ibm.com>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * File: ima_queue.c
11*4882a593Smuzhiyun * Implements queues that store template measurements and
12*4882a593Smuzhiyun * maintains aggregate over the stored measurements
13*4882a593Smuzhiyun * in the pre-configured TPM PCR (if available).
14*4882a593Smuzhiyun * The measurement list is append-only. No entry is
15*4882a593Smuzhiyun * ever removed or changed during the boot-cycle.
16*4882a593Smuzhiyun */
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/rculist.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include "ima.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define AUDIT_CAUSE_LEN_MAX 32
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* pre-allocated array of tpm_digest structures to extend a PCR */
25*4882a593Smuzhiyun static struct tpm_digest *digests;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun LIST_HEAD(ima_measurements); /* list of all measurements */
28*4882a593Smuzhiyun #ifdef CONFIG_IMA_KEXEC
29*4882a593Smuzhiyun static unsigned long binary_runtime_size;
30*4882a593Smuzhiyun #else
31*4882a593Smuzhiyun static unsigned long binary_runtime_size = ULONG_MAX;
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* key: inode (before secure-hashing a file) */
35*4882a593Smuzhiyun struct ima_h_table ima_htable = {
36*4882a593Smuzhiyun .len = ATOMIC_LONG_INIT(0),
37*4882a593Smuzhiyun .violations = ATOMIC_LONG_INIT(0),
38*4882a593Smuzhiyun .queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* mutex protects atomicity of extending measurement list
42*4882a593Smuzhiyun * and extending the TPM PCR aggregate. Since tpm_extend can take
43*4882a593Smuzhiyun * long (and the tpm driver uses a mutex), we can't use the spinlock.
44*4882a593Smuzhiyun */
45*4882a593Smuzhiyun static DEFINE_MUTEX(ima_extend_list_mutex);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* lookup up the digest value in the hash table, and return the entry */
ima_lookup_digest_entry(u8 * digest_value,int pcr)48*4882a593Smuzhiyun static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
49*4882a593Smuzhiyun int pcr)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun struct ima_queue_entry *qe, *ret = NULL;
52*4882a593Smuzhiyun unsigned int key;
53*4882a593Smuzhiyun int rc;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun key = ima_hash_key(digest_value);
56*4882a593Smuzhiyun rcu_read_lock();
57*4882a593Smuzhiyun hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
58*4882a593Smuzhiyun rc = memcmp(qe->entry->digests[ima_hash_algo_idx].digest,
59*4882a593Smuzhiyun digest_value, hash_digest_size[ima_hash_algo]);
60*4882a593Smuzhiyun if ((rc == 0) && (qe->entry->pcr == pcr)) {
61*4882a593Smuzhiyun ret = qe;
62*4882a593Smuzhiyun break;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun rcu_read_unlock();
66*4882a593Smuzhiyun return ret;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * Calculate the memory required for serializing a single
71*4882a593Smuzhiyun * binary_runtime_measurement list entry, which contains a
72*4882a593Smuzhiyun * couple of variable length fields (e.g template name and data).
73*4882a593Smuzhiyun */
get_binary_runtime_size(struct ima_template_entry * entry)74*4882a593Smuzhiyun static int get_binary_runtime_size(struct ima_template_entry *entry)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun int size = 0;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun size += sizeof(u32); /* pcr */
79*4882a593Smuzhiyun size += TPM_DIGEST_SIZE;
80*4882a593Smuzhiyun size += sizeof(int); /* template name size field */
81*4882a593Smuzhiyun size += strlen(entry->template_desc->name);
82*4882a593Smuzhiyun size += sizeof(entry->template_data_len);
83*4882a593Smuzhiyun size += entry->template_data_len;
84*4882a593Smuzhiyun return size;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* ima_add_template_entry helper function:
88*4882a593Smuzhiyun * - Add template entry to the measurement list and hash table, for
89*4882a593Smuzhiyun * all entries except those carried across kexec.
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun * (Called with ima_extend_list_mutex held.)
92*4882a593Smuzhiyun */
ima_add_digest_entry(struct ima_template_entry * entry,bool update_htable)93*4882a593Smuzhiyun static int ima_add_digest_entry(struct ima_template_entry *entry,
94*4882a593Smuzhiyun bool update_htable)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun struct ima_queue_entry *qe;
97*4882a593Smuzhiyun unsigned int key;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun qe = kmalloc(sizeof(*qe), GFP_KERNEL);
100*4882a593Smuzhiyun if (qe == NULL) {
101*4882a593Smuzhiyun pr_err("OUT OF MEMORY ERROR creating queue entry\n");
102*4882a593Smuzhiyun return -ENOMEM;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun qe->entry = entry;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun INIT_LIST_HEAD(&qe->later);
107*4882a593Smuzhiyun list_add_tail_rcu(&qe->later, &ima_measurements);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun atomic_long_inc(&ima_htable.len);
110*4882a593Smuzhiyun if (update_htable) {
111*4882a593Smuzhiyun key = ima_hash_key(entry->digests[ima_hash_algo_idx].digest);
112*4882a593Smuzhiyun hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun if (binary_runtime_size != ULONG_MAX) {
116*4882a593Smuzhiyun int size;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun size = get_binary_runtime_size(entry);
119*4882a593Smuzhiyun binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ?
120*4882a593Smuzhiyun binary_runtime_size + size : ULONG_MAX;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun return 0;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * Return the amount of memory required for serializing the
127*4882a593Smuzhiyun * entire binary_runtime_measurement list, including the ima_kexec_hdr
128*4882a593Smuzhiyun * structure.
129*4882a593Smuzhiyun */
ima_get_binary_runtime_size(void)130*4882a593Smuzhiyun unsigned long ima_get_binary_runtime_size(void)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr)))
133*4882a593Smuzhiyun return ULONG_MAX;
134*4882a593Smuzhiyun else
135*4882a593Smuzhiyun return binary_runtime_size + sizeof(struct ima_kexec_hdr);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
ima_pcr_extend(struct tpm_digest * digests_arg,int pcr)138*4882a593Smuzhiyun static int ima_pcr_extend(struct tpm_digest *digests_arg, int pcr)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun int result = 0;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if (!ima_tpm_chip)
143*4882a593Smuzhiyun return result;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun result = tpm_pcr_extend(ima_tpm_chip, pcr, digests_arg);
146*4882a593Smuzhiyun if (result != 0)
147*4882a593Smuzhiyun pr_err("Error Communicating to TPM chip, result: %d\n", result);
148*4882a593Smuzhiyun return result;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * Add template entry to the measurement list and hash table, and
153*4882a593Smuzhiyun * extend the pcr.
154*4882a593Smuzhiyun *
155*4882a593Smuzhiyun * On systems which support carrying the IMA measurement list across
156*4882a593Smuzhiyun * kexec, maintain the total memory size required for serializing the
157*4882a593Smuzhiyun * binary_runtime_measurements.
158*4882a593Smuzhiyun */
ima_add_template_entry(struct ima_template_entry * entry,int violation,const char * op,struct inode * inode,const unsigned char * filename)159*4882a593Smuzhiyun int ima_add_template_entry(struct ima_template_entry *entry, int violation,
160*4882a593Smuzhiyun const char *op, struct inode *inode,
161*4882a593Smuzhiyun const unsigned char *filename)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun u8 *digest = entry->digests[ima_hash_algo_idx].digest;
164*4882a593Smuzhiyun struct tpm_digest *digests_arg = entry->digests;
165*4882a593Smuzhiyun const char *audit_cause = "hash_added";
166*4882a593Smuzhiyun char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
167*4882a593Smuzhiyun int audit_info = 1;
168*4882a593Smuzhiyun int result = 0, tpmresult = 0;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun mutex_lock(&ima_extend_list_mutex);
171*4882a593Smuzhiyun if (!violation) {
172*4882a593Smuzhiyun if (ima_lookup_digest_entry(digest, entry->pcr)) {
173*4882a593Smuzhiyun audit_cause = "hash_exists";
174*4882a593Smuzhiyun result = -EEXIST;
175*4882a593Smuzhiyun goto out;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun result = ima_add_digest_entry(entry, 1);
180*4882a593Smuzhiyun if (result < 0) {
181*4882a593Smuzhiyun audit_cause = "ENOMEM";
182*4882a593Smuzhiyun audit_info = 0;
183*4882a593Smuzhiyun goto out;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun if (violation) /* invalidate pcr */
187*4882a593Smuzhiyun digests_arg = digests;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun tpmresult = ima_pcr_extend(digests_arg, entry->pcr);
190*4882a593Smuzhiyun if (tpmresult != 0) {
191*4882a593Smuzhiyun snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
192*4882a593Smuzhiyun tpmresult);
193*4882a593Smuzhiyun audit_cause = tpm_audit_cause;
194*4882a593Smuzhiyun audit_info = 0;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun out:
197*4882a593Smuzhiyun mutex_unlock(&ima_extend_list_mutex);
198*4882a593Smuzhiyun integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
199*4882a593Smuzhiyun op, audit_cause, result, audit_info);
200*4882a593Smuzhiyun return result;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
ima_restore_measurement_entry(struct ima_template_entry * entry)203*4882a593Smuzhiyun int ima_restore_measurement_entry(struct ima_template_entry *entry)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun int result = 0;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun mutex_lock(&ima_extend_list_mutex);
208*4882a593Smuzhiyun result = ima_add_digest_entry(entry, 0);
209*4882a593Smuzhiyun mutex_unlock(&ima_extend_list_mutex);
210*4882a593Smuzhiyun return result;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
ima_init_digests(void)213*4882a593Smuzhiyun int __init ima_init_digests(void)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun u16 digest_size;
216*4882a593Smuzhiyun u16 crypto_id;
217*4882a593Smuzhiyun int i;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun if (!ima_tpm_chip)
220*4882a593Smuzhiyun return 0;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun digests = kcalloc(ima_tpm_chip->nr_allocated_banks, sizeof(*digests),
223*4882a593Smuzhiyun GFP_NOFS);
224*4882a593Smuzhiyun if (!digests)
225*4882a593Smuzhiyun return -ENOMEM;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
228*4882a593Smuzhiyun digests[i].alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
229*4882a593Smuzhiyun digest_size = ima_tpm_chip->allocated_banks[i].digest_size;
230*4882a593Smuzhiyun crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* for unmapped TPM algorithms digest is still a padded SHA1 */
233*4882a593Smuzhiyun if (crypto_id == HASH_ALGO__LAST)
234*4882a593Smuzhiyun digest_size = SHA1_DIGEST_SIZE;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun memset(digests[i].digest, 0xff, digest_size);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun return 0;
240*4882a593Smuzhiyun }
241