1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2021 Google LLC
4*4882a593Smuzhiyun * Author: Ard Biesheuvel <ardb@google.com>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This file is the core of fips140.ko, which contains various crypto algorithms
7*4882a593Smuzhiyun * that are also built into vmlinux. At load time, this module overrides the
8*4882a593Smuzhiyun * built-in implementations of these algorithms with its implementations. It
9*4882a593Smuzhiyun * also runs self-tests on these algorithms and verifies the integrity of its
10*4882a593Smuzhiyun * code and data. If either of these steps fails, the kernel will panic.
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * This module is intended to be loaded at early boot time in order to meet
13*4882a593Smuzhiyun * FIPS 140 and NIAP FPT_TST_EXT.1 requirements. It shouldn't be used if you
14*4882a593Smuzhiyun * don't need to meet these requirements.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #undef __DISABLE_EXPORTS
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/ctype.h>
20*4882a593Smuzhiyun #include <linux/module.h>
21*4882a593Smuzhiyun #include <crypto/aead.h>
22*4882a593Smuzhiyun #include <crypto/aes.h>
23*4882a593Smuzhiyun #include <crypto/hash.h>
24*4882a593Smuzhiyun #include <crypto/sha.h>
25*4882a593Smuzhiyun #include <crypto/skcipher.h>
26*4882a593Smuzhiyun #include <crypto/rng.h>
27*4882a593Smuzhiyun #include <trace/hooks/fips140.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include "fips140-module.h"
30*4882a593Smuzhiyun #include "internal.h"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * FIPS 140-2 prefers the use of HMAC with a public key over a plain hash.
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun u8 __initdata fips140_integ_hmac_key[] = "The quick brown fox jumps over the lazy dog";
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* this is populated by the build tool */
38*4882a593Smuzhiyun u8 __initdata fips140_integ_hmac_digest[SHA256_DIGEST_SIZE];
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun const u32 __initcall_start_marker __section(".initcalls._start");
41*4882a593Smuzhiyun const u32 __initcall_end_marker __section(".initcalls._end");
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun const u8 __fips140_text_start __section(".text.._start");
44*4882a593Smuzhiyun const u8 __fips140_text_end __section(".text.._end");
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun const u8 __fips140_rodata_start __section(".rodata.._start");
47*4882a593Smuzhiyun const u8 __fips140_rodata_end __section(".rodata.._end");
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun * We need this little detour to prevent Clang from detecting out of bounds
51*4882a593Smuzhiyun * accesses to __fips140_text_start and __fips140_rodata_start, which only exist
52*4882a593Smuzhiyun * to delineate the section, and so their sizes are not relevant to us.
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun const u32 *__initcall_start = &__initcall_start_marker;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun const u8 *__text_start = &__fips140_text_start;
57*4882a593Smuzhiyun const u8 *__rodata_start = &__fips140_rodata_start;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun * The list of the crypto API algorithms (by cra_name) that will be unregistered
61*4882a593Smuzhiyun * by this module, in preparation for the module registering its own
62*4882a593Smuzhiyun * implementation(s) of them.
63*4882a593Smuzhiyun *
64*4882a593Smuzhiyun * All algorithms that will be declared as FIPS-approved in the module
65*4882a593Smuzhiyun * certification must be listed here, to ensure that the non-FIPS-approved
66*4882a593Smuzhiyun * implementations of these algorithms in the kernel image aren't used.
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * For every algorithm in this list, the module should contain all the "same"
69*4882a593Smuzhiyun * implementations that the kernel image does, including the C implementation as
70*4882a593Smuzhiyun * well as any architecture-specific implementations. This is needed to avoid
71*4882a593Smuzhiyun * performance regressions as well as the possibility of an algorithm being
72*4882a593Smuzhiyun * unavailable on some CPUs. E.g., "xcbc(aes)" isn't in this list, as the
73*4882a593Smuzhiyun * module doesn't have a C implementation of it (and it won't be FIPS-approved).
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun * Due to a quirk in the FIPS requirements, "gcm(aes)" isn't actually able to be
76*4882a593Smuzhiyun * FIPS-approved. However, we otherwise treat it the same as the algorithms
77*4882a593Smuzhiyun * that will be FIPS-approved, and therefore it's included in this list.
78*4882a593Smuzhiyun *
79*4882a593Smuzhiyun * When adding a new algorithm here, make sure to consider whether it needs a
80*4882a593Smuzhiyun * self-test added to fips140_selftests[] as well.
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun static const struct {
83*4882a593Smuzhiyun const char *name;
84*4882a593Smuzhiyun bool approved;
85*4882a593Smuzhiyun } fips140_algs_to_replace[] = {
86*4882a593Smuzhiyun {"aes", true},
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun {"cmac(aes)", true},
89*4882a593Smuzhiyun {"ecb(aes)", true},
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun {"cbc(aes)", true},
92*4882a593Smuzhiyun {"cts(cbc(aes))", true},
93*4882a593Smuzhiyun {"ctr(aes)", true},
94*4882a593Smuzhiyun {"xts(aes)", true},
95*4882a593Smuzhiyun {"gcm(aes)", false},
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun {"hmac(sha1)", true},
98*4882a593Smuzhiyun {"hmac(sha224)", true},
99*4882a593Smuzhiyun {"hmac(sha256)", true},
100*4882a593Smuzhiyun {"hmac(sha384)", true},
101*4882a593Smuzhiyun {"hmac(sha512)", true},
102*4882a593Smuzhiyun {"sha1", true},
103*4882a593Smuzhiyun {"sha224", true},
104*4882a593Smuzhiyun {"sha256", true},
105*4882a593Smuzhiyun {"sha384", true},
106*4882a593Smuzhiyun {"sha512", true},
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun {"stdrng", true},
109*4882a593Smuzhiyun {"jitterentropy_rng", false},
110*4882a593Smuzhiyun };
111*4882a593Smuzhiyun
fips140_should_unregister_alg(struct crypto_alg * alg)112*4882a593Smuzhiyun static bool __init fips140_should_unregister_alg(struct crypto_alg *alg)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun int i;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun * All software algorithms are synchronous, hardware algorithms must
118*4882a593Smuzhiyun * be covered by their own FIPS 140 certification.
119*4882a593Smuzhiyun */
120*4882a593Smuzhiyun if (alg->cra_flags & CRYPTO_ALG_ASYNC)
121*4882a593Smuzhiyun return false;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(fips140_algs_to_replace); i++) {
124*4882a593Smuzhiyun if (!strcmp(alg->cra_name, fips140_algs_to_replace[i].name))
125*4882a593Smuzhiyun return true;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun return false;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun * FIPS 140-3 service indicators. FIPS 140-3 requires that all services
132*4882a593Smuzhiyun * "provide an indicator when the service utilises an approved cryptographic
133*4882a593Smuzhiyun * algorithm, security function or process in an approved manner". What this
134*4882a593Smuzhiyun * means is very debatable, even with the help of the FIPS 140-3 Implementation
135*4882a593Smuzhiyun * Guidance document. However, it was decided that a function that takes in an
136*4882a593Smuzhiyun * algorithm name and returns whether that algorithm is approved or not will
137*4882a593Smuzhiyun * meet this requirement. Note, this relies on some properties of the module:
138*4882a593Smuzhiyun *
139*4882a593Smuzhiyun * - The module doesn't distinguish between "services" and "algorithms"; its
140*4882a593Smuzhiyun * services are simply its algorithms.
141*4882a593Smuzhiyun *
142*4882a593Smuzhiyun * - The status of an approved algorithm is never non-approved, since (a) the
143*4882a593Smuzhiyun * module doesn't support operating in a non-approved mode, such as a mode
144*4882a593Smuzhiyun * where the self-tests are skipped; (b) there are no cases where the module
145*4882a593Smuzhiyun * supports non-approved settings for approved algorithms, e.g.
146*4882a593Smuzhiyun * non-approved key sizes; and (c) this function isn't available to be
147*4882a593Smuzhiyun * called until the module_init function has completed, so it's guaranteed
148*4882a593Smuzhiyun * that the self-tests and integrity check have already passed.
149*4882a593Smuzhiyun *
150*4882a593Smuzhiyun * - The module does support some non-approved algorithms, so a single static
151*4882a593Smuzhiyun * indicator ("return true;") would not be acceptable.
152*4882a593Smuzhiyun */
fips140_is_approved_service(const char * name)153*4882a593Smuzhiyun bool fips140_is_approved_service(const char *name)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun size_t i;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(fips140_algs_to_replace); i++) {
158*4882a593Smuzhiyun if (!strcmp(name, fips140_algs_to_replace[i].name))
159*4882a593Smuzhiyun return fips140_algs_to_replace[i].approved;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun return false;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fips140_is_approved_service);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /*
166*4882a593Smuzhiyun * FIPS 140-3 requires that modules provide a "service" that outputs "the name
167*4882a593Smuzhiyun * or module identifier and the versioning information that can be correlated
168*4882a593Smuzhiyun * with a validation record". This function meets that requirement.
169*4882a593Smuzhiyun *
170*4882a593Smuzhiyun * Note: the module also prints this same information to the kernel log when it
171*4882a593Smuzhiyun * is loaded. That might meet the requirement by itself. However, given the
172*4882a593Smuzhiyun * vagueness of what counts as a "service", we provide this function too, just
173*4882a593Smuzhiyun * in case the certification lab or CMVP is happier with an explicit function.
174*4882a593Smuzhiyun *
175*4882a593Smuzhiyun * Note: /sys/modules/fips140/scmversion also provides versioning information
176*4882a593Smuzhiyun * about the module. However that file just shows the bare git commit ID, so it
177*4882a593Smuzhiyun * probably isn't sufficient to meet the FIPS requirement, which seems to want
178*4882a593Smuzhiyun * the "official" module name and version number used in the FIPS certificate.
179*4882a593Smuzhiyun */
fips140_module_version(void)180*4882a593Smuzhiyun const char *fips140_module_version(void)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun return FIPS140_MODULE_NAME " " FIPS140_MODULE_VERSION;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fips140_module_version);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun static LIST_HEAD(existing_live_algos);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /*
189*4882a593Smuzhiyun * Release a list of algorithms which have been removed from crypto_alg_list.
190*4882a593Smuzhiyun *
191*4882a593Smuzhiyun * Note that even though the list is a private list, we have to hold
192*4882a593Smuzhiyun * crypto_alg_sem while iterating through it because crypto_unregister_alg() may
193*4882a593Smuzhiyun * run concurrently (as we haven't taken a reference to the algorithms on the
194*4882a593Smuzhiyun * list), and crypto_unregister_alg() will remove the algorithm from whichever
195*4882a593Smuzhiyun * list it happens to be on, while holding crypto_alg_sem. That's okay, since
196*4882a593Smuzhiyun * in that case crypto_unregister_alg() will handle the crypto_alg_put().
197*4882a593Smuzhiyun */
fips140_remove_final(struct list_head * list)198*4882a593Smuzhiyun static void fips140_remove_final(struct list_head *list)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun struct crypto_alg *alg;
201*4882a593Smuzhiyun struct crypto_alg *n;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * We need to take crypto_alg_sem to safely traverse the list (see
205*4882a593Smuzhiyun * comment above), but we have to drop it when doing each
206*4882a593Smuzhiyun * crypto_alg_put() as that may take crypto_alg_sem again.
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun down_write(&crypto_alg_sem);
209*4882a593Smuzhiyun list_for_each_entry_safe(alg, n, list, cra_list) {
210*4882a593Smuzhiyun list_del_init(&alg->cra_list);
211*4882a593Smuzhiyun up_write(&crypto_alg_sem);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun crypto_alg_put(alg);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun down_write(&crypto_alg_sem);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun up_write(&crypto_alg_sem);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
unregister_existing_fips140_algos(void)220*4882a593Smuzhiyun static void __init unregister_existing_fips140_algos(void)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun struct crypto_alg *alg, *tmp;
223*4882a593Smuzhiyun LIST_HEAD(remove_list);
224*4882a593Smuzhiyun LIST_HEAD(spawns);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun down_write(&crypto_alg_sem);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /*
229*4882a593Smuzhiyun * Find all registered algorithms that we care about, and move them to a
230*4882a593Smuzhiyun * private list so that they are no longer exposed via the algo lookup
231*4882a593Smuzhiyun * API. Subsequently, we will unregister them if they are not in active
232*4882a593Smuzhiyun * use. If they are, we can't fully unregister them but we can ensure
233*4882a593Smuzhiyun * that new users won't use them.
234*4882a593Smuzhiyun */
235*4882a593Smuzhiyun list_for_each_entry_safe(alg, tmp, &crypto_alg_list, cra_list) {
236*4882a593Smuzhiyun if (!fips140_should_unregister_alg(alg))
237*4882a593Smuzhiyun continue;
238*4882a593Smuzhiyun if (refcount_read(&alg->cra_refcnt) == 1) {
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun * This algorithm is not currently in use, but there may
241*4882a593Smuzhiyun * be template instances holding references to it via
242*4882a593Smuzhiyun * spawns. So let's tear it down like
243*4882a593Smuzhiyun * crypto_unregister_alg() would, but without releasing
244*4882a593Smuzhiyun * the lock, to prevent races with concurrent TFM
245*4882a593Smuzhiyun * allocations.
246*4882a593Smuzhiyun */
247*4882a593Smuzhiyun alg->cra_flags |= CRYPTO_ALG_DEAD;
248*4882a593Smuzhiyun list_move(&alg->cra_list, &remove_list);
249*4882a593Smuzhiyun crypto_remove_spawns(alg, &spawns, NULL);
250*4882a593Smuzhiyun } else {
251*4882a593Smuzhiyun /*
252*4882a593Smuzhiyun * This algorithm is live, i.e. it has TFMs allocated,
253*4882a593Smuzhiyun * so we can't fully unregister it. It's not necessary
254*4882a593Smuzhiyun * to dynamically redirect existing users to the FIPS
255*4882a593Smuzhiyun * code, given that they can't be relying on FIPS
256*4882a593Smuzhiyun * certified crypto in the first place. However, we do
257*4882a593Smuzhiyun * need to ensure that new users will get the FIPS code.
258*4882a593Smuzhiyun *
259*4882a593Smuzhiyun * In most cases, setting alg->cra_priority to 0
260*4882a593Smuzhiyun * achieves this. However, that isn't enough for
261*4882a593Smuzhiyun * algorithms like "hmac(sha256)" that need to be
262*4882a593Smuzhiyun * instantiated from a template, since existing
263*4882a593Smuzhiyun * algorithms always take priority over a template being
264*4882a593Smuzhiyun * instantiated. Therefore, we move the algorithm to
265*4882a593Smuzhiyun * a private list so that algorithm lookups won't find
266*4882a593Smuzhiyun * it anymore. To further distinguish it from the FIPS
267*4882a593Smuzhiyun * algorithms, we also append "+orig" to its name.
268*4882a593Smuzhiyun */
269*4882a593Smuzhiyun pr_info("found already-live algorithm '%s' ('%s')\n",
270*4882a593Smuzhiyun alg->cra_name, alg->cra_driver_name);
271*4882a593Smuzhiyun alg->cra_priority = 0;
272*4882a593Smuzhiyun strlcat(alg->cra_name, "+orig", CRYPTO_MAX_ALG_NAME);
273*4882a593Smuzhiyun strlcat(alg->cra_driver_name, "+orig",
274*4882a593Smuzhiyun CRYPTO_MAX_ALG_NAME);
275*4882a593Smuzhiyun list_move(&alg->cra_list, &existing_live_algos);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun up_write(&crypto_alg_sem);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun fips140_remove_final(&remove_list);
281*4882a593Smuzhiyun fips140_remove_final(&spawns);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
unapply_text_relocations(void * section,int section_size,const Elf64_Rela * rela,int numrels)284*4882a593Smuzhiyun static void __init unapply_text_relocations(void *section, int section_size,
285*4882a593Smuzhiyun const Elf64_Rela *rela, int numrels)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun while (numrels--) {
288*4882a593Smuzhiyun u32 *place = (u32 *)(section + rela->r_offset);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun BUG_ON(rela->r_offset >= section_size);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun switch (ELF64_R_TYPE(rela->r_info)) {
293*4882a593Smuzhiyun #ifdef CONFIG_ARM64
294*4882a593Smuzhiyun case R_AARCH64_JUMP26:
295*4882a593Smuzhiyun case R_AARCH64_CALL26:
296*4882a593Smuzhiyun *place &= ~GENMASK(25, 0);
297*4882a593Smuzhiyun break;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun case R_AARCH64_ADR_PREL_LO21:
300*4882a593Smuzhiyun case R_AARCH64_ADR_PREL_PG_HI21:
301*4882a593Smuzhiyun case R_AARCH64_ADR_PREL_PG_HI21_NC:
302*4882a593Smuzhiyun *place &= ~(GENMASK(30, 29) | GENMASK(23, 5));
303*4882a593Smuzhiyun break;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun case R_AARCH64_ADD_ABS_LO12_NC:
306*4882a593Smuzhiyun case R_AARCH64_LDST8_ABS_LO12_NC:
307*4882a593Smuzhiyun case R_AARCH64_LDST16_ABS_LO12_NC:
308*4882a593Smuzhiyun case R_AARCH64_LDST32_ABS_LO12_NC:
309*4882a593Smuzhiyun case R_AARCH64_LDST64_ABS_LO12_NC:
310*4882a593Smuzhiyun case R_AARCH64_LDST128_ABS_LO12_NC:
311*4882a593Smuzhiyun *place &= ~GENMASK(21, 10);
312*4882a593Smuzhiyun break;
313*4882a593Smuzhiyun default:
314*4882a593Smuzhiyun pr_err("unhandled relocation type %llu\n",
315*4882a593Smuzhiyun ELF64_R_TYPE(rela->r_info));
316*4882a593Smuzhiyun BUG();
317*4882a593Smuzhiyun #else
318*4882a593Smuzhiyun #error
319*4882a593Smuzhiyun #endif
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun rela++;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
unapply_rodata_relocations(void * section,int section_size,const Elf64_Rela * rela,int numrels)325*4882a593Smuzhiyun static void __init unapply_rodata_relocations(void *section, int section_size,
326*4882a593Smuzhiyun const Elf64_Rela *rela, int numrels)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun while (numrels--) {
329*4882a593Smuzhiyun void *place = section + rela->r_offset;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun BUG_ON(rela->r_offset >= section_size);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun switch (ELF64_R_TYPE(rela->r_info)) {
334*4882a593Smuzhiyun #ifdef CONFIG_ARM64
335*4882a593Smuzhiyun case R_AARCH64_ABS64:
336*4882a593Smuzhiyun *(u64 *)place = 0;
337*4882a593Smuzhiyun break;
338*4882a593Smuzhiyun default:
339*4882a593Smuzhiyun pr_err("unhandled relocation type %llu\n",
340*4882a593Smuzhiyun ELF64_R_TYPE(rela->r_info));
341*4882a593Smuzhiyun BUG();
342*4882a593Smuzhiyun #else
343*4882a593Smuzhiyun #error
344*4882a593Smuzhiyun #endif
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun rela++;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun extern struct {
351*4882a593Smuzhiyun u32 offset;
352*4882a593Smuzhiyun u32 count;
353*4882a593Smuzhiyun } fips140_rela_text, fips140_rela_rodata;
354*4882a593Smuzhiyun
check_fips140_module_hmac(void)355*4882a593Smuzhiyun static bool __init check_fips140_module_hmac(void)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun struct crypto_shash *tfm = NULL;
358*4882a593Smuzhiyun SHASH_DESC_ON_STACK(desc, dontcare);
359*4882a593Smuzhiyun u8 digest[SHA256_DIGEST_SIZE];
360*4882a593Smuzhiyun void *textcopy, *rodatacopy;
361*4882a593Smuzhiyun int textsize, rodatasize;
362*4882a593Smuzhiyun bool ok = false;
363*4882a593Smuzhiyun int err;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun textsize = &__fips140_text_end - &__fips140_text_start;
366*4882a593Smuzhiyun rodatasize = &__fips140_rodata_end - &__fips140_rodata_start;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun pr_info("text size : 0x%x\n", textsize);
369*4882a593Smuzhiyun pr_info("rodata size: 0x%x\n", rodatasize);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun textcopy = kmalloc(textsize + rodatasize, GFP_KERNEL);
372*4882a593Smuzhiyun if (!textcopy) {
373*4882a593Smuzhiyun pr_err("Failed to allocate memory for copy of .text\n");
374*4882a593Smuzhiyun goto out;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun rodatacopy = textcopy + textsize;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun memcpy(textcopy, __text_start, textsize);
380*4882a593Smuzhiyun memcpy(rodatacopy, __rodata_start, rodatasize);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun // apply the relocations in reverse on the copies of .text and .rodata
383*4882a593Smuzhiyun unapply_text_relocations(textcopy, textsize,
384*4882a593Smuzhiyun offset_to_ptr(&fips140_rela_text.offset),
385*4882a593Smuzhiyun fips140_rela_text.count);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun unapply_rodata_relocations(rodatacopy, rodatasize,
388*4882a593Smuzhiyun offset_to_ptr(&fips140_rela_rodata.offset),
389*4882a593Smuzhiyun fips140_rela_rodata.count);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun fips140_inject_integrity_failure(textcopy);
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
394*4882a593Smuzhiyun if (IS_ERR(tfm)) {
395*4882a593Smuzhiyun pr_err("failed to allocate hmac tfm (%ld)\n", PTR_ERR(tfm));
396*4882a593Smuzhiyun tfm = NULL;
397*4882a593Smuzhiyun goto out;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun desc->tfm = tfm;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun pr_info("using '%s' for integrity check\n",
402*4882a593Smuzhiyun crypto_shash_driver_name(tfm));
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun err = crypto_shash_setkey(tfm, fips140_integ_hmac_key,
405*4882a593Smuzhiyun strlen(fips140_integ_hmac_key)) ?:
406*4882a593Smuzhiyun crypto_shash_init(desc) ?:
407*4882a593Smuzhiyun crypto_shash_update(desc, textcopy, textsize) ?:
408*4882a593Smuzhiyun crypto_shash_finup(desc, rodatacopy, rodatasize, digest);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /* Zeroizing this is important; see the comment below. */
411*4882a593Smuzhiyun shash_desc_zero(desc);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (err) {
414*4882a593Smuzhiyun pr_err("failed to calculate hmac shash (%d)\n", err);
415*4882a593Smuzhiyun goto out;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if (memcmp(digest, fips140_integ_hmac_digest, sizeof(digest))) {
419*4882a593Smuzhiyun pr_err("provided_digest : %*phN\n", (int)sizeof(digest),
420*4882a593Smuzhiyun fips140_integ_hmac_digest);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun pr_err("calculated digest: %*phN\n", (int)sizeof(digest),
423*4882a593Smuzhiyun digest);
424*4882a593Smuzhiyun goto out;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun ok = true;
427*4882a593Smuzhiyun out:
428*4882a593Smuzhiyun /*
429*4882a593Smuzhiyun * FIPS 140-3 requires that all "temporary value(s) generated during the
430*4882a593Smuzhiyun * integrity test" be zeroized (ref: FIPS 140-3 IG 9.7.B). There is no
431*4882a593Smuzhiyun * technical reason to do this given that these values are public
432*4882a593Smuzhiyun * information, but this is the requirement so we follow it.
433*4882a593Smuzhiyun */
434*4882a593Smuzhiyun crypto_free_shash(tfm);
435*4882a593Smuzhiyun memzero_explicit(digest, sizeof(digest));
436*4882a593Smuzhiyun kfree_sensitive(textcopy);
437*4882a593Smuzhiyun return ok;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
fips140_sha256(void * p,const u8 * data,unsigned int len,u8 * out,int * hook_inuse)440*4882a593Smuzhiyun static void fips140_sha256(void *p, const u8 *data, unsigned int len, u8 *out,
441*4882a593Smuzhiyun int *hook_inuse)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun sha256(data, len, out);
444*4882a593Smuzhiyun *hook_inuse = 1;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
fips140_aes_expandkey(void * p,struct crypto_aes_ctx * ctx,const u8 * in_key,unsigned int key_len,int * err)447*4882a593Smuzhiyun static void fips140_aes_expandkey(void *p, struct crypto_aes_ctx *ctx,
448*4882a593Smuzhiyun const u8 *in_key, unsigned int key_len,
449*4882a593Smuzhiyun int *err)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun *err = aes_expandkey(ctx, in_key, key_len);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
fips140_aes_encrypt(void * priv,const struct crypto_aes_ctx * ctx,u8 * out,const u8 * in,int * hook_inuse)454*4882a593Smuzhiyun static void fips140_aes_encrypt(void *priv, const struct crypto_aes_ctx *ctx,
455*4882a593Smuzhiyun u8 *out, const u8 *in, int *hook_inuse)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun aes_encrypt(ctx, out, in);
458*4882a593Smuzhiyun *hook_inuse = 1;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
fips140_aes_decrypt(void * priv,const struct crypto_aes_ctx * ctx,u8 * out,const u8 * in,int * hook_inuse)461*4882a593Smuzhiyun static void fips140_aes_decrypt(void *priv, const struct crypto_aes_ctx *ctx,
462*4882a593Smuzhiyun u8 *out, const u8 *in, int *hook_inuse)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun aes_decrypt(ctx, out, in);
465*4882a593Smuzhiyun *hook_inuse = 1;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
update_fips140_library_routines(void)468*4882a593Smuzhiyun static bool update_fips140_library_routines(void)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun int ret;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun ret = register_trace_android_vh_sha256(fips140_sha256, NULL) ?:
473*4882a593Smuzhiyun register_trace_android_vh_aes_expandkey(fips140_aes_expandkey, NULL) ?:
474*4882a593Smuzhiyun register_trace_android_vh_aes_encrypt(fips140_aes_encrypt, NULL) ?:
475*4882a593Smuzhiyun register_trace_android_vh_aes_decrypt(fips140_aes_decrypt, NULL);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun return ret == 0;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun /*
481*4882a593Smuzhiyun * Initialize the FIPS 140 module.
482*4882a593Smuzhiyun *
483*4882a593Smuzhiyun * Note: this routine iterates over the contents of the initcall section, which
484*4882a593Smuzhiyun * consists of an array of function pointers that was emitted by the linker
485*4882a593Smuzhiyun * rather than the compiler. This means that these function pointers lack the
486*4882a593Smuzhiyun * usual CFI stubs that the compiler emits when CFI codegen is enabled. So
487*4882a593Smuzhiyun * let's disable CFI locally when handling the initcall array, to avoid
488*4882a593Smuzhiyun * surpises.
489*4882a593Smuzhiyun */
490*4882a593Smuzhiyun static int __init __attribute__((__no_sanitize__("cfi")))
fips140_init(void)491*4882a593Smuzhiyun fips140_init(void)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun const u32 *initcall;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun pr_info("loading " FIPS140_MODULE_NAME " " FIPS140_MODULE_VERSION "\n");
496*4882a593Smuzhiyun fips140_init_thread = current;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun unregister_existing_fips140_algos();
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /* iterate over all init routines present in this module and call them */
501*4882a593Smuzhiyun for (initcall = __initcall_start + 1;
502*4882a593Smuzhiyun initcall < &__initcall_end_marker;
503*4882a593Smuzhiyun initcall++) {
504*4882a593Smuzhiyun int (*init)(void) = offset_to_ptr(initcall);
505*4882a593Smuzhiyun int err = init();
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /*
508*4882a593Smuzhiyun * ENODEV is expected from initcalls that only register
509*4882a593Smuzhiyun * algorithms that depend on non-present CPU features. Besides
510*4882a593Smuzhiyun * that, errors aren't expected here.
511*4882a593Smuzhiyun */
512*4882a593Smuzhiyun if (err && err != -ENODEV) {
513*4882a593Smuzhiyun pr_err("initcall %ps() failed: %d\n", init, err);
514*4882a593Smuzhiyun goto panic;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun if (!fips140_run_selftests())
519*4882a593Smuzhiyun goto panic;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /*
522*4882a593Smuzhiyun * It may seem backward to perform the integrity check last, but this
523*4882a593Smuzhiyun * is intentional: the check itself uses hmac(sha256) which is one of
524*4882a593Smuzhiyun * the algorithms that are replaced with versions from this module, and
525*4882a593Smuzhiyun * the integrity check must use the replacement version. Also, to be
526*4882a593Smuzhiyun * ready for FIPS 140-3, the integrity check algorithm must have already
527*4882a593Smuzhiyun * been self-tested.
528*4882a593Smuzhiyun */
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun if (!check_fips140_module_hmac()) {
531*4882a593Smuzhiyun pr_crit("integrity check failed -- giving up!\n");
532*4882a593Smuzhiyun goto panic;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun pr_info("integrity check passed\n");
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun complete_all(&fips140_tests_done);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (!update_fips140_library_routines())
539*4882a593Smuzhiyun goto panic;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun if (!fips140_eval_testing_init())
542*4882a593Smuzhiyun goto panic;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun pr_info("module successfully loaded\n");
545*4882a593Smuzhiyun return 0;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun panic:
548*4882a593Smuzhiyun panic("FIPS 140 module load failure");
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun module_init(fips140_init);
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun MODULE_IMPORT_NS(CRYPTO_INTERNAL);
554*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /*
557*4882a593Smuzhiyun * Crypto-related helper functions, reproduced here so that they will be
558*4882a593Smuzhiyun * covered by the FIPS 140 integrity check.
559*4882a593Smuzhiyun *
560*4882a593Smuzhiyun * Non-cryptographic helper functions such as memcpy() can be excluded from the
561*4882a593Smuzhiyun * FIPS module, but there is ambiguity about other helper functions like
562*4882a593Smuzhiyun * __crypto_xor() and crypto_inc() which aren't cryptographic by themselves,
563*4882a593Smuzhiyun * but are more closely associated with cryptography than e.g. memcpy(). To
564*4882a593Smuzhiyun * err on the side of caution, we include copies of these in the FIPS module.
565*4882a593Smuzhiyun */
__crypto_xor(u8 * dst,const u8 * src1,const u8 * src2,unsigned int len)566*4882a593Smuzhiyun void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun while (len >= 8) {
569*4882a593Smuzhiyun *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
570*4882a593Smuzhiyun dst += 8;
571*4882a593Smuzhiyun src1 += 8;
572*4882a593Smuzhiyun src2 += 8;
573*4882a593Smuzhiyun len -= 8;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun while (len >= 4) {
577*4882a593Smuzhiyun *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
578*4882a593Smuzhiyun dst += 4;
579*4882a593Smuzhiyun src1 += 4;
580*4882a593Smuzhiyun src2 += 4;
581*4882a593Smuzhiyun len -= 4;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun while (len >= 2) {
585*4882a593Smuzhiyun *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
586*4882a593Smuzhiyun dst += 2;
587*4882a593Smuzhiyun src1 += 2;
588*4882a593Smuzhiyun src2 += 2;
589*4882a593Smuzhiyun len -= 2;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun while (len--)
593*4882a593Smuzhiyun *dst++ = *src1++ ^ *src2++;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
crypto_inc(u8 * a,unsigned int size)596*4882a593Smuzhiyun void crypto_inc(u8 *a, unsigned int size)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun a += size;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun while (size--)
601*4882a593Smuzhiyun if (++*--a)
602*4882a593Smuzhiyun break;
603*4882a593Smuzhiyun }
604