xref: /OK3568_Linux_fs/kernel/drivers/char/hw_random/via-rng.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * RNG driver for VIA RNGs
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright 2005 (c) MontaVista Software, Inc.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * with the majority of the code coming from:
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
9*4882a593Smuzhiyun  * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * derived from
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * Hardware driver for the AMD 768 Random Number Generator (RNG)
14*4882a593Smuzhiyun  * (c) Copyright 2001 Red Hat Inc
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * derived from
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * Hardware driver for Intel i810 Random Number Generator (RNG)
19*4882a593Smuzhiyun  * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
20*4882a593Smuzhiyun  * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * This file is licensed under  the terms of the GNU General Public
23*4882a593Smuzhiyun  * License version 2. This program is licensed "as is" without any
24*4882a593Smuzhiyun  * warranty of any kind, whether express or implied.
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <crypto/padlock.h>
28*4882a593Smuzhiyun #include <linux/module.h>
29*4882a593Smuzhiyun #include <linux/kernel.h>
30*4882a593Smuzhiyun #include <linux/hw_random.h>
31*4882a593Smuzhiyun #include <linux/delay.h>
32*4882a593Smuzhiyun #include <asm/cpu_device_id.h>
33*4882a593Smuzhiyun #include <asm/io.h>
34*4882a593Smuzhiyun #include <asm/msr.h>
35*4882a593Smuzhiyun #include <asm/cpufeature.h>
36*4882a593Smuzhiyun #include <asm/fpu/api.h>
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun enum {
42*4882a593Smuzhiyun 	VIA_STRFILT_CNT_SHIFT	= 16,
43*4882a593Smuzhiyun 	VIA_STRFILT_FAIL	= (1 << 15),
44*4882a593Smuzhiyun 	VIA_STRFILT_ENABLE	= (1 << 14),
45*4882a593Smuzhiyun 	VIA_RAWBITS_ENABLE	= (1 << 13),
46*4882a593Smuzhiyun 	VIA_RNG_ENABLE		= (1 << 6),
47*4882a593Smuzhiyun 	VIA_NOISESRC1		= (1 << 8),
48*4882a593Smuzhiyun 	VIA_NOISESRC2		= (1 << 9),
49*4882a593Smuzhiyun 	VIA_XSTORE_CNT_MASK	= 0x0F,
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	VIA_RNG_CHUNK_8		= 0x00,	/* 64 rand bits, 64 stored bits */
52*4882a593Smuzhiyun 	VIA_RNG_CHUNK_4		= 0x01,	/* 32 rand bits, 32 stored bits */
53*4882a593Smuzhiyun 	VIA_RNG_CHUNK_4_MASK	= 0xFFFFFFFF,
54*4882a593Smuzhiyun 	VIA_RNG_CHUNK_2		= 0x02,	/* 16 rand bits, 32 stored bits */
55*4882a593Smuzhiyun 	VIA_RNG_CHUNK_2_MASK	= 0xFFFF,
56*4882a593Smuzhiyun 	VIA_RNG_CHUNK_1		= 0x03,	/* 8 rand bits, 32 stored bits */
57*4882a593Smuzhiyun 	VIA_RNG_CHUNK_1_MASK	= 0xFF,
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * Investigate using the 'rep' prefix to obtain 32 bits of random data
62*4882a593Smuzhiyun  * in one insn.  The upside is potentially better performance.  The
63*4882a593Smuzhiyun  * downside is that the instruction becomes no longer atomic.  Due to
64*4882a593Smuzhiyun  * this, just like familiar issues with /dev/random itself, the worst
65*4882a593Smuzhiyun  * case of a 'rep xstore' could potentially pause a cpu for an
66*4882a593Smuzhiyun  * unreasonably long time.  In practice, this condition would likely
67*4882a593Smuzhiyun  * only occur when the hardware is failing.  (or so we hope :))
68*4882a593Smuzhiyun  *
69*4882a593Smuzhiyun  * Another possible performance boost may come from simply buffering
70*4882a593Smuzhiyun  * until we have 4 bytes, thus returning a u32 at a time,
71*4882a593Smuzhiyun  * instead of the current u8-at-a-time.
72*4882a593Smuzhiyun  *
73*4882a593Smuzhiyun  * Padlock instructions can generate a spurious DNA fault, but the
74*4882a593Smuzhiyun  * kernel doesn't use CR0.TS, so this doesn't matter.
75*4882a593Smuzhiyun  */
76*4882a593Smuzhiyun 
xstore(u32 * addr,u32 edx_in)77*4882a593Smuzhiyun static inline u32 xstore(u32 *addr, u32 edx_in)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	u32 eax_out;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
82*4882a593Smuzhiyun 		: "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr));
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	return eax_out;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
via_rng_data_present(struct hwrng * rng,int wait)87*4882a593Smuzhiyun static int via_rng_data_present(struct hwrng *rng, int wait)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
90*4882a593Smuzhiyun 		((aligned(STACK_ALIGN)));
91*4882a593Smuzhiyun 	u32 *via_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
92*4882a593Smuzhiyun 	u32 bytes_out;
93*4882a593Smuzhiyun 	int i;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	/* We choose the recommended 1-byte-per-instruction RNG rate,
96*4882a593Smuzhiyun 	 * for greater randomness at the expense of speed.  Larger
97*4882a593Smuzhiyun 	 * values 2, 4, or 8 bytes-per-instruction yield greater
98*4882a593Smuzhiyun 	 * speed at lesser randomness.
99*4882a593Smuzhiyun 	 *
100*4882a593Smuzhiyun 	 * If you change this to another VIA_CHUNK_n, you must also
101*4882a593Smuzhiyun 	 * change the ->n_bytes values in rng_vendor_ops[] tables.
102*4882a593Smuzhiyun 	 * VIA_CHUNK_8 requires further code changes.
103*4882a593Smuzhiyun 	 *
104*4882a593Smuzhiyun 	 * A copy of MSR_VIA_RNG is placed in eax_out when xstore
105*4882a593Smuzhiyun 	 * completes.
106*4882a593Smuzhiyun 	 */
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	for (i = 0; i < 20; i++) {
109*4882a593Smuzhiyun 		*via_rng_datum = 0; /* paranoia, not really necessary */
110*4882a593Smuzhiyun 		bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1);
111*4882a593Smuzhiyun 		bytes_out &= VIA_XSTORE_CNT_MASK;
112*4882a593Smuzhiyun 		if (bytes_out || !wait)
113*4882a593Smuzhiyun 			break;
114*4882a593Smuzhiyun 		udelay(10);
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 	rng->priv = *via_rng_datum;
117*4882a593Smuzhiyun 	return bytes_out ? 1 : 0;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
via_rng_data_read(struct hwrng * rng,u32 * data)120*4882a593Smuzhiyun static int via_rng_data_read(struct hwrng *rng, u32 *data)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	u32 via_rng_datum = (u32)rng->priv;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	*data = via_rng_datum;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	return 1;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
via_rng_init(struct hwrng * rng)129*4882a593Smuzhiyun static int via_rng_init(struct hwrng *rng)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	struct cpuinfo_x86 *c = &cpu_data(0);
132*4882a593Smuzhiyun 	u32 lo, hi, old_lo;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	/* VIA Nano CPUs don't have the MSR_VIA_RNG anymore.  The RNG
135*4882a593Smuzhiyun 	 * is always enabled if CPUID rng_en is set.  There is no
136*4882a593Smuzhiyun 	 * RNG configuration like it used to be the case in this
137*4882a593Smuzhiyun 	 * register */
138*4882a593Smuzhiyun 	if (((c->x86 == 6) && (c->x86_model >= 0x0f))  || (c->x86 > 6)){
139*4882a593Smuzhiyun 		if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) {
140*4882a593Smuzhiyun 			pr_err(PFX "can't enable hardware RNG "
141*4882a593Smuzhiyun 				"if XSTORE is not enabled\n");
142*4882a593Smuzhiyun 			return -ENODEV;
143*4882a593Smuzhiyun 		}
144*4882a593Smuzhiyun 		return 0;
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	/* Control the RNG via MSR.  Tread lightly and pay very close
148*4882a593Smuzhiyun 	 * close attention to values written, as the reserved fields
149*4882a593Smuzhiyun 	 * are documented to be "undefined and unpredictable"; but it
150*4882a593Smuzhiyun 	 * does not say to write them as zero, so I make a guess that
151*4882a593Smuzhiyun 	 * we restore the values we find in the register.
152*4882a593Smuzhiyun 	 */
153*4882a593Smuzhiyun 	rdmsr(MSR_VIA_RNG, lo, hi);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	old_lo = lo;
156*4882a593Smuzhiyun 	lo &= ~(0x7f << VIA_STRFILT_CNT_SHIFT);
157*4882a593Smuzhiyun 	lo &= ~VIA_XSTORE_CNT_MASK;
158*4882a593Smuzhiyun 	lo &= ~(VIA_STRFILT_ENABLE | VIA_STRFILT_FAIL | VIA_RAWBITS_ENABLE);
159*4882a593Smuzhiyun 	lo |= VIA_RNG_ENABLE;
160*4882a593Smuzhiyun 	lo |= VIA_NOISESRC1;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	/* Enable secondary noise source on CPUs where it is present. */
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	/* Nehemiah stepping 8 and higher */
165*4882a593Smuzhiyun 	if ((c->x86_model == 9) && (c->x86_stepping > 7))
166*4882a593Smuzhiyun 		lo |= VIA_NOISESRC2;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	/* Esther */
169*4882a593Smuzhiyun 	if (c->x86_model >= 10)
170*4882a593Smuzhiyun 		lo |= VIA_NOISESRC2;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	if (lo != old_lo)
173*4882a593Smuzhiyun 		wrmsr(MSR_VIA_RNG, lo, hi);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/* perhaps-unnecessary sanity check; remove after testing if
176*4882a593Smuzhiyun 	   unneeded */
177*4882a593Smuzhiyun 	rdmsr(MSR_VIA_RNG, lo, hi);
178*4882a593Smuzhiyun 	if ((lo & VIA_RNG_ENABLE) == 0) {
179*4882a593Smuzhiyun 		pr_err(PFX "cannot enable VIA C3 RNG, aborting\n");
180*4882a593Smuzhiyun 		return -ENODEV;
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	return 0;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun static struct hwrng via_rng = {
188*4882a593Smuzhiyun 	.name		= "via",
189*4882a593Smuzhiyun 	.init		= via_rng_init,
190*4882a593Smuzhiyun 	.data_present	= via_rng_data_present,
191*4882a593Smuzhiyun 	.data_read	= via_rng_data_read,
192*4882a593Smuzhiyun };
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 
mod_init(void)195*4882a593Smuzhiyun static int __init mod_init(void)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	int err;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	if (!boot_cpu_has(X86_FEATURE_XSTORE))
200*4882a593Smuzhiyun 		return -ENODEV;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	pr_info("VIA RNG detected\n");
203*4882a593Smuzhiyun 	err = hwrng_register(&via_rng);
204*4882a593Smuzhiyun 	if (err) {
205*4882a593Smuzhiyun 		pr_err(PFX "RNG registering failed (%d)\n",
206*4882a593Smuzhiyun 		       err);
207*4882a593Smuzhiyun 		goto out;
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun out:
210*4882a593Smuzhiyun 	return err;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun module_init(mod_init);
213*4882a593Smuzhiyun 
mod_exit(void)214*4882a593Smuzhiyun static void __exit mod_exit(void)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	hwrng_unregister(&via_rng);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun module_exit(mod_exit);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
221*4882a593Smuzhiyun 	X86_MATCH_FEATURE(X86_FEATURE_XSTORE, NULL),
222*4882a593Smuzhiyun 	{}
223*4882a593Smuzhiyun };
224*4882a593Smuzhiyun MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock");
227*4882a593Smuzhiyun MODULE_LICENSE("GPL");
228