xref: /OK3568_Linux_fs/kernel/drivers/char/random.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4*4882a593Smuzhiyun  * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5*4882a593Smuzhiyun  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This driver produces cryptographically secure pseudorandom data. It is divided
8*4882a593Smuzhiyun  * into roughly six sections, each with a section header:
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *   - Initialization and readiness waiting.
11*4882a593Smuzhiyun  *   - Fast key erasure RNG, the "crng".
12*4882a593Smuzhiyun  *   - Entropy accumulation and extraction routines.
13*4882a593Smuzhiyun  *   - Entropy collection routines.
14*4882a593Smuzhiyun  *   - Userspace reader/writer interfaces.
15*4882a593Smuzhiyun  *   - Sysctl interface.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * The high level overview is that there is one input pool, into which
18*4882a593Smuzhiyun  * various pieces of data are hashed. Prior to initialization, some of that
19*4882a593Smuzhiyun  * data is then "credited" as having a certain number of bits of entropy.
20*4882a593Smuzhiyun  * When enough bits of entropy are available, the hash is finalized and
21*4882a593Smuzhiyun  * handed as a key to a stream cipher that expands it indefinitely for
22*4882a593Smuzhiyun  * various consumers. This key is periodically refreshed as the various
23*4882a593Smuzhiyun  * entropy collectors, described below, add data to the input pool.
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include <linux/utsname.h>
29*4882a593Smuzhiyun #include <linux/module.h>
30*4882a593Smuzhiyun #include <linux/kernel.h>
31*4882a593Smuzhiyun #include <linux/major.h>
32*4882a593Smuzhiyun #include <linux/string.h>
33*4882a593Smuzhiyun #include <linux/fcntl.h>
34*4882a593Smuzhiyun #include <linux/slab.h>
35*4882a593Smuzhiyun #include <linux/random.h>
36*4882a593Smuzhiyun #include <linux/poll.h>
37*4882a593Smuzhiyun #include <linux/init.h>
38*4882a593Smuzhiyun #include <linux/fs.h>
39*4882a593Smuzhiyun #include <linux/genhd.h>
40*4882a593Smuzhiyun #include <linux/interrupt.h>
41*4882a593Smuzhiyun #include <linux/mm.h>
42*4882a593Smuzhiyun #include <linux/nodemask.h>
43*4882a593Smuzhiyun #include <linux/spinlock.h>
44*4882a593Smuzhiyun #include <linux/kthread.h>
45*4882a593Smuzhiyun #include <linux/percpu.h>
46*4882a593Smuzhiyun #include <linux/ptrace.h>
47*4882a593Smuzhiyun #include <linux/workqueue.h>
48*4882a593Smuzhiyun #include <linux/irq.h>
49*4882a593Smuzhiyun #include <linux/ratelimit.h>
50*4882a593Smuzhiyun #include <linux/syscalls.h>
51*4882a593Smuzhiyun #include <linux/completion.h>
52*4882a593Smuzhiyun #include <linux/uuid.h>
53*4882a593Smuzhiyun #include <linux/uaccess.h>
54*4882a593Smuzhiyun #include <linux/siphash.h>
55*4882a593Smuzhiyun #include <linux/uio.h>
56*4882a593Smuzhiyun #include <crypto/chacha.h>
57*4882a593Smuzhiyun #include <crypto/blake2s.h>
58*4882a593Smuzhiyun #include <asm/processor.h>
59*4882a593Smuzhiyun #include <asm/irq.h>
60*4882a593Smuzhiyun #include <asm/irq_regs.h>
61*4882a593Smuzhiyun #include <asm/io.h>
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun // GKI: Keep this header to retain the original CRC that previously used the
64*4882a593Smuzhiyun // random.h tracepoints.
65*4882a593Smuzhiyun #include <linux/writeback.h>
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /*********************************************************************
68*4882a593Smuzhiyun  *
69*4882a593Smuzhiyun  * Initialization and readiness waiting.
70*4882a593Smuzhiyun  *
71*4882a593Smuzhiyun  * Much of the RNG infrastructure is devoted to various dependencies
72*4882a593Smuzhiyun  * being able to wait until the RNG has collected enough entropy and
73*4882a593Smuzhiyun  * is ready for safe consumption.
74*4882a593Smuzhiyun  *
75*4882a593Smuzhiyun  *********************************************************************/
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun  * crng_init is protected by base_crng->lock, and only increases
79*4882a593Smuzhiyun  * its value (from empty->early->ready).
80*4882a593Smuzhiyun  */
81*4882a593Smuzhiyun static enum {
82*4882a593Smuzhiyun 	CRNG_EMPTY = 0, /* Little to no entropy collected */
83*4882a593Smuzhiyun 	CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
84*4882a593Smuzhiyun 	CRNG_READY = 2  /* Fully initialized with POOL_READY_BITS collected */
85*4882a593Smuzhiyun } crng_init __read_mostly = CRNG_EMPTY;
86*4882a593Smuzhiyun #define crng_ready() (likely(crng_init >= CRNG_READY))
87*4882a593Smuzhiyun /* Various types of waiters for crng_init->CRNG_READY transition. */
88*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
89*4882a593Smuzhiyun static struct fasync_struct *fasync;
90*4882a593Smuzhiyun static DEFINE_SPINLOCK(random_ready_chain_lock);
91*4882a593Smuzhiyun static RAW_NOTIFIER_HEAD(random_ready_chain);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /* Control how we warn userspace. */
94*4882a593Smuzhiyun static struct ratelimit_state urandom_warning =
95*4882a593Smuzhiyun 	RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
96*4882a593Smuzhiyun static int ratelimit_disable __read_mostly =
97*4882a593Smuzhiyun 	IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
98*4882a593Smuzhiyun module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
99*4882a593Smuzhiyun MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun  * Returns whether or not the input pool has been seeded and thus guaranteed
103*4882a593Smuzhiyun  * to supply cryptographically secure random numbers. This applies to: the
104*4882a593Smuzhiyun  * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
105*4882a593Smuzhiyun  * ,u64,int,long} family of functions.
106*4882a593Smuzhiyun  *
107*4882a593Smuzhiyun  * Returns: true if the input pool has been seeded.
108*4882a593Smuzhiyun  *          false if the input pool has not been seeded.
109*4882a593Smuzhiyun  */
rng_is_initialized(void)110*4882a593Smuzhiyun bool rng_is_initialized(void)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	return crng_ready();
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun EXPORT_SYMBOL(rng_is_initialized);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
117*4882a593Smuzhiyun static void try_to_generate_entropy(void);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun  * Wait for the input pool to be seeded and thus guaranteed to supply
121*4882a593Smuzhiyun  * cryptographically secure random numbers. This applies to: the /dev/urandom
122*4882a593Smuzhiyun  * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
123*4882a593Smuzhiyun  * family of functions. Using any of these functions without first calling
124*4882a593Smuzhiyun  * this function forfeits the guarantee of security.
125*4882a593Smuzhiyun  *
126*4882a593Smuzhiyun  * Returns: 0 if the input pool has been seeded.
127*4882a593Smuzhiyun  *          -ERESTARTSYS if the function was interrupted by a signal.
128*4882a593Smuzhiyun  */
wait_for_random_bytes(void)129*4882a593Smuzhiyun int wait_for_random_bytes(void)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	while (!crng_ready()) {
132*4882a593Smuzhiyun 		int ret;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 		try_to_generate_entropy();
135*4882a593Smuzhiyun 		ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
136*4882a593Smuzhiyun 		if (ret)
137*4882a593Smuzhiyun 			return ret > 0 ? 0 : ret;
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 	return 0;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun EXPORT_SYMBOL(wait_for_random_bytes);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun  * Add a callback function that will be invoked when the input
145*4882a593Smuzhiyun  * pool is initialised.
146*4882a593Smuzhiyun  *
147*4882a593Smuzhiyun  * returns: 0 if callback is successfully added
148*4882a593Smuzhiyun  *	    -EALREADY if pool is already initialised (callback not called)
149*4882a593Smuzhiyun  */
register_random_ready_notifier(struct notifier_block * nb)150*4882a593Smuzhiyun int __cold register_random_ready_notifier(struct notifier_block *nb)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	unsigned long flags;
153*4882a593Smuzhiyun 	int ret = -EALREADY;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (crng_ready())
156*4882a593Smuzhiyun 		return ret;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	spin_lock_irqsave(&random_ready_chain_lock, flags);
159*4882a593Smuzhiyun 	if (!crng_ready())
160*4882a593Smuzhiyun 		ret = raw_notifier_chain_register(&random_ready_chain, nb);
161*4882a593Smuzhiyun 	spin_unlock_irqrestore(&random_ready_chain_lock, flags);
162*4882a593Smuzhiyun 	return ret;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /*
166*4882a593Smuzhiyun  * Delete a previously registered readiness callback function.
167*4882a593Smuzhiyun  */
unregister_random_ready_notifier(struct notifier_block * nb)168*4882a593Smuzhiyun int __cold unregister_random_ready_notifier(struct notifier_block *nb)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	unsigned long flags;
171*4882a593Smuzhiyun 	int ret;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	spin_lock_irqsave(&random_ready_chain_lock, flags);
174*4882a593Smuzhiyun 	ret = raw_notifier_chain_unregister(&random_ready_chain, nb);
175*4882a593Smuzhiyun 	spin_unlock_irqrestore(&random_ready_chain_lock, flags);
176*4882a593Smuzhiyun 	return ret;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun static void process_oldschool_random_ready_list(void);
process_random_ready_list(void)180*4882a593Smuzhiyun static void __cold process_random_ready_list(void)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	unsigned long flags;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	spin_lock_irqsave(&random_ready_chain_lock, flags);
185*4882a593Smuzhiyun 	raw_notifier_call_chain(&random_ready_chain, 0, NULL);
186*4882a593Smuzhiyun 	spin_unlock_irqrestore(&random_ready_chain_lock, flags);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	process_oldschool_random_ready_list();
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun #define warn_unseeded_randomness() \
192*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
193*4882a593Smuzhiyun 		printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
194*4882a593Smuzhiyun 				__func__, (void *)_RET_IP_, crng_init)
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun /*********************************************************************
198*4882a593Smuzhiyun  *
199*4882a593Smuzhiyun  * Fast key erasure RNG, the "crng".
200*4882a593Smuzhiyun  *
201*4882a593Smuzhiyun  * These functions expand entropy from the entropy extractor into
202*4882a593Smuzhiyun  * long streams for external consumption using the "fast key erasure"
203*4882a593Smuzhiyun  * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
204*4882a593Smuzhiyun  *
205*4882a593Smuzhiyun  * There are a few exported interfaces for use by other drivers:
206*4882a593Smuzhiyun  *
207*4882a593Smuzhiyun  *	void get_random_bytes(void *buf, size_t len)
208*4882a593Smuzhiyun  *	u32 get_random_u32()
209*4882a593Smuzhiyun  *	u64 get_random_u64()
210*4882a593Smuzhiyun  *	unsigned int get_random_int()
211*4882a593Smuzhiyun  *	unsigned long get_random_long()
212*4882a593Smuzhiyun  *
213*4882a593Smuzhiyun  * These interfaces will return the requested number of random bytes
214*4882a593Smuzhiyun  * into the given buffer or as a return value. This is equivalent to
215*4882a593Smuzhiyun  * a read from /dev/urandom. The u32, u64, int, and long family of
216*4882a593Smuzhiyun  * functions may be higher performance for one-off random integers,
217*4882a593Smuzhiyun  * because they do a bit of buffering and do not invoke reseeding
218*4882a593Smuzhiyun  * until the buffer is emptied.
219*4882a593Smuzhiyun  *
220*4882a593Smuzhiyun  *********************************************************************/
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun enum {
223*4882a593Smuzhiyun 	CRNG_RESEED_START_INTERVAL = HZ,
224*4882a593Smuzhiyun 	CRNG_RESEED_INTERVAL = 60 * HZ
225*4882a593Smuzhiyun };
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun static struct {
228*4882a593Smuzhiyun 	u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
229*4882a593Smuzhiyun 	unsigned long birth;
230*4882a593Smuzhiyun 	unsigned long generation;
231*4882a593Smuzhiyun 	spinlock_t lock;
232*4882a593Smuzhiyun } base_crng = {
233*4882a593Smuzhiyun 	.lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
234*4882a593Smuzhiyun };
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun struct crng {
237*4882a593Smuzhiyun 	u8 key[CHACHA_KEY_SIZE];
238*4882a593Smuzhiyun 	unsigned long generation;
239*4882a593Smuzhiyun 	local_lock_t lock;
240*4882a593Smuzhiyun };
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun static DEFINE_PER_CPU(struct crng, crngs) = {
243*4882a593Smuzhiyun 	.generation = ULONG_MAX,
244*4882a593Smuzhiyun 	.lock = INIT_LOCAL_LOCK(crngs.lock),
245*4882a593Smuzhiyun };
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun /* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
248*4882a593Smuzhiyun static void extract_entropy(void *buf, size_t len);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /* This extracts a new crng key from the input pool. */
crng_reseed(void)251*4882a593Smuzhiyun static void crng_reseed(void)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	unsigned long flags;
254*4882a593Smuzhiyun 	unsigned long next_gen;
255*4882a593Smuzhiyun 	u8 key[CHACHA_KEY_SIZE];
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	extract_entropy(key, sizeof(key));
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	/*
260*4882a593Smuzhiyun 	 * We copy the new key into the base_crng, overwriting the old one,
261*4882a593Smuzhiyun 	 * and update the generation counter. We avoid hitting ULONG_MAX,
262*4882a593Smuzhiyun 	 * because the per-cpu crngs are initialized to ULONG_MAX, so this
263*4882a593Smuzhiyun 	 * forces new CPUs that come online to always initialize.
264*4882a593Smuzhiyun 	 */
265*4882a593Smuzhiyun 	spin_lock_irqsave(&base_crng.lock, flags);
266*4882a593Smuzhiyun 	memcpy(base_crng.key, key, sizeof(base_crng.key));
267*4882a593Smuzhiyun 	next_gen = base_crng.generation + 1;
268*4882a593Smuzhiyun 	if (next_gen == ULONG_MAX)
269*4882a593Smuzhiyun 		++next_gen;
270*4882a593Smuzhiyun 	WRITE_ONCE(base_crng.generation, next_gen);
271*4882a593Smuzhiyun 	WRITE_ONCE(base_crng.birth, jiffies);
272*4882a593Smuzhiyun 	if (!crng_ready())
273*4882a593Smuzhiyun 		crng_init = CRNG_READY;
274*4882a593Smuzhiyun 	spin_unlock_irqrestore(&base_crng.lock, flags);
275*4882a593Smuzhiyun 	memzero_explicit(key, sizeof(key));
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun /*
279*4882a593Smuzhiyun  * This generates a ChaCha block using the provided key, and then
280*4882a593Smuzhiyun  * immediately overwites that key with half the block. It returns
281*4882a593Smuzhiyun  * the resultant ChaCha state to the user, along with the second
282*4882a593Smuzhiyun  * half of the block containing 32 bytes of random data that may
283*4882a593Smuzhiyun  * be used; random_data_len may not be greater than 32.
284*4882a593Smuzhiyun  *
285*4882a593Smuzhiyun  * The returned ChaCha state contains within it a copy of the old
286*4882a593Smuzhiyun  * key value, at index 4, so the state should always be zeroed out
287*4882a593Smuzhiyun  * immediately after using in order to maintain forward secrecy.
288*4882a593Smuzhiyun  * If the state cannot be erased in a timely manner, then it is
289*4882a593Smuzhiyun  * safer to set the random_data parameter to &chacha_state[4] so
290*4882a593Smuzhiyun  * that this function overwrites it before returning.
291*4882a593Smuzhiyun  */
crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],u32 chacha_state[CHACHA_STATE_WORDS],u8 * random_data,size_t random_data_len)292*4882a593Smuzhiyun static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
293*4882a593Smuzhiyun 				  u32 chacha_state[CHACHA_STATE_WORDS],
294*4882a593Smuzhiyun 				  u8 *random_data, size_t random_data_len)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	u8 first_block[CHACHA_BLOCK_SIZE];
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	BUG_ON(random_data_len > 32);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	chacha_init_consts(chacha_state);
301*4882a593Smuzhiyun 	memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
302*4882a593Smuzhiyun 	memset(&chacha_state[12], 0, sizeof(u32) * 4);
303*4882a593Smuzhiyun 	chacha20_block(chacha_state, first_block);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	memcpy(key, first_block, CHACHA_KEY_SIZE);
306*4882a593Smuzhiyun 	memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
307*4882a593Smuzhiyun 	memzero_explicit(first_block, sizeof(first_block));
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun /*
311*4882a593Smuzhiyun  * Return whether the crng seed is considered to be sufficiently old
312*4882a593Smuzhiyun  * that a reseeding is needed. This happens if the last reseeding
313*4882a593Smuzhiyun  * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval
314*4882a593Smuzhiyun  * proportional to the uptime.
315*4882a593Smuzhiyun  */
crng_has_old_seed(void)316*4882a593Smuzhiyun static bool crng_has_old_seed(void)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	static bool early_boot = true;
319*4882a593Smuzhiyun 	unsigned long interval = CRNG_RESEED_INTERVAL;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	if (unlikely(READ_ONCE(early_boot))) {
322*4882a593Smuzhiyun 		time64_t uptime = ktime_get_seconds();
323*4882a593Smuzhiyun 		if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
324*4882a593Smuzhiyun 			WRITE_ONCE(early_boot, false);
325*4882a593Smuzhiyun 		else
326*4882a593Smuzhiyun 			interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
327*4882a593Smuzhiyun 					 (unsigned int)uptime / 2 * HZ);
328*4882a593Smuzhiyun 	}
329*4882a593Smuzhiyun 	return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun /*
333*4882a593Smuzhiyun  * This function returns a ChaCha state that you may use for generating
334*4882a593Smuzhiyun  * random data. It also returns up to 32 bytes on its own of random data
335*4882a593Smuzhiyun  * that may be used; random_data_len may not be greater than 32.
336*4882a593Smuzhiyun  */
crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],u8 * random_data,size_t random_data_len)337*4882a593Smuzhiyun static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
338*4882a593Smuzhiyun 			    u8 *random_data, size_t random_data_len)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	unsigned long flags;
341*4882a593Smuzhiyun 	struct crng *crng;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	BUG_ON(random_data_len > 32);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	/*
346*4882a593Smuzhiyun 	 * For the fast path, we check whether we're ready, unlocked first, and
347*4882a593Smuzhiyun 	 * then re-check once locked later. In the case where we're really not
348*4882a593Smuzhiyun 	 * ready, we do fast key erasure with the base_crng directly, extracting
349*4882a593Smuzhiyun 	 * when crng_init is CRNG_EMPTY.
350*4882a593Smuzhiyun 	 */
351*4882a593Smuzhiyun 	if (!crng_ready()) {
352*4882a593Smuzhiyun 		bool ready;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 		spin_lock_irqsave(&base_crng.lock, flags);
355*4882a593Smuzhiyun 		ready = crng_ready();
356*4882a593Smuzhiyun 		if (!ready) {
357*4882a593Smuzhiyun 			if (crng_init == CRNG_EMPTY)
358*4882a593Smuzhiyun 				extract_entropy(base_crng.key, sizeof(base_crng.key));
359*4882a593Smuzhiyun 			crng_fast_key_erasure(base_crng.key, chacha_state,
360*4882a593Smuzhiyun 					      random_data, random_data_len);
361*4882a593Smuzhiyun 		}
362*4882a593Smuzhiyun 		spin_unlock_irqrestore(&base_crng.lock, flags);
363*4882a593Smuzhiyun 		if (!ready)
364*4882a593Smuzhiyun 			return;
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/*
368*4882a593Smuzhiyun 	 * If the base_crng is old enough, we reseed, which in turn bumps the
369*4882a593Smuzhiyun 	 * generation counter that we check below.
370*4882a593Smuzhiyun 	 */
371*4882a593Smuzhiyun 	if (unlikely(crng_has_old_seed()))
372*4882a593Smuzhiyun 		crng_reseed();
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	local_lock_irqsave(&crngs.lock, flags);
375*4882a593Smuzhiyun 	crng = raw_cpu_ptr(&crngs);
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	/*
378*4882a593Smuzhiyun 	 * If our per-cpu crng is older than the base_crng, then it means
379*4882a593Smuzhiyun 	 * somebody reseeded the base_crng. In that case, we do fast key
380*4882a593Smuzhiyun 	 * erasure on the base_crng, and use its output as the new key
381*4882a593Smuzhiyun 	 * for our per-cpu crng. This brings us up to date with base_crng.
382*4882a593Smuzhiyun 	 */
383*4882a593Smuzhiyun 	if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
384*4882a593Smuzhiyun 		spin_lock(&base_crng.lock);
385*4882a593Smuzhiyun 		crng_fast_key_erasure(base_crng.key, chacha_state,
386*4882a593Smuzhiyun 				      crng->key, sizeof(crng->key));
387*4882a593Smuzhiyun 		crng->generation = base_crng.generation;
388*4882a593Smuzhiyun 		spin_unlock(&base_crng.lock);
389*4882a593Smuzhiyun 	}
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	/*
392*4882a593Smuzhiyun 	 * Finally, when we've made it this far, our per-cpu crng has an up
393*4882a593Smuzhiyun 	 * to date key, and we can do fast key erasure with it to produce
394*4882a593Smuzhiyun 	 * some random data and a ChaCha state for the caller. All other
395*4882a593Smuzhiyun 	 * branches of this function are "unlikely", so most of the time we
396*4882a593Smuzhiyun 	 * should wind up here immediately.
397*4882a593Smuzhiyun 	 */
398*4882a593Smuzhiyun 	crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
399*4882a593Smuzhiyun 	local_unlock_irqrestore(&crngs.lock, flags);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
_get_random_bytes(void * buf,size_t len)402*4882a593Smuzhiyun static void _get_random_bytes(void *buf, size_t len)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	u32 chacha_state[CHACHA_STATE_WORDS];
405*4882a593Smuzhiyun 	u8 tmp[CHACHA_BLOCK_SIZE];
406*4882a593Smuzhiyun 	size_t first_block_len;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	if (!len)
409*4882a593Smuzhiyun 		return;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	first_block_len = min_t(size_t, 32, len);
412*4882a593Smuzhiyun 	crng_make_state(chacha_state, buf, first_block_len);
413*4882a593Smuzhiyun 	len -= first_block_len;
414*4882a593Smuzhiyun 	buf += first_block_len;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	while (len) {
417*4882a593Smuzhiyun 		if (len < CHACHA_BLOCK_SIZE) {
418*4882a593Smuzhiyun 			chacha20_block(chacha_state, tmp);
419*4882a593Smuzhiyun 			memcpy(buf, tmp, len);
420*4882a593Smuzhiyun 			memzero_explicit(tmp, sizeof(tmp));
421*4882a593Smuzhiyun 			break;
422*4882a593Smuzhiyun 		}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 		chacha20_block(chacha_state, buf);
425*4882a593Smuzhiyun 		if (unlikely(chacha_state[12] == 0))
426*4882a593Smuzhiyun 			++chacha_state[13];
427*4882a593Smuzhiyun 		len -= CHACHA_BLOCK_SIZE;
428*4882a593Smuzhiyun 		buf += CHACHA_BLOCK_SIZE;
429*4882a593Smuzhiyun 	}
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	memzero_explicit(chacha_state, sizeof(chacha_state));
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun  * This function is the exported kernel interface.  It returns some
436*4882a593Smuzhiyun  * number of good random numbers, suitable for key generation, seeding
437*4882a593Smuzhiyun  * TCP sequence numbers, etc.  It does not rely on the hardware random
438*4882a593Smuzhiyun  * number generator.  For random bytes direct from the hardware RNG
439*4882a593Smuzhiyun  * (when available), use get_random_bytes_arch(). In order to ensure
440*4882a593Smuzhiyun  * that the randomness provided by this function is okay, the function
441*4882a593Smuzhiyun  * wait_for_random_bytes() should be called and return 0 at least once
442*4882a593Smuzhiyun  * at any point prior.
443*4882a593Smuzhiyun  */
get_random_bytes(void * buf,int len)444*4882a593Smuzhiyun void get_random_bytes(void *buf, int len)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	warn_unseeded_randomness();
447*4882a593Smuzhiyun 	_get_random_bytes(buf, len);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun EXPORT_SYMBOL(get_random_bytes);
450*4882a593Smuzhiyun 
get_random_bytes_user(struct iov_iter * iter)451*4882a593Smuzhiyun static ssize_t get_random_bytes_user(struct iov_iter *iter)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun 	u32 chacha_state[CHACHA_STATE_WORDS];
454*4882a593Smuzhiyun 	u8 block[CHACHA_BLOCK_SIZE];
455*4882a593Smuzhiyun 	size_t ret = 0, copied;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	if (unlikely(!iov_iter_count(iter)))
458*4882a593Smuzhiyun 		return 0;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	/*
461*4882a593Smuzhiyun 	 * Immediately overwrite the ChaCha key at index 4 with random
462*4882a593Smuzhiyun 	 * bytes, in case userspace causes copy_to_iter() below to sleep
463*4882a593Smuzhiyun 	 * forever, so that we still retain forward secrecy in that case.
464*4882a593Smuzhiyun 	 */
465*4882a593Smuzhiyun 	crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
466*4882a593Smuzhiyun 	/*
467*4882a593Smuzhiyun 	 * However, if we're doing a read of len <= 32, we don't need to
468*4882a593Smuzhiyun 	 * use chacha_state after, so we can simply return those bytes to
469*4882a593Smuzhiyun 	 * the user directly.
470*4882a593Smuzhiyun 	 */
471*4882a593Smuzhiyun 	if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
472*4882a593Smuzhiyun 		ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
473*4882a593Smuzhiyun 		goto out_zero_chacha;
474*4882a593Smuzhiyun 	}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	for (;;) {
477*4882a593Smuzhiyun 		chacha20_block(chacha_state, block);
478*4882a593Smuzhiyun 		if (unlikely(chacha_state[12] == 0))
479*4882a593Smuzhiyun 			++chacha_state[13];
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 		copied = copy_to_iter(block, sizeof(block), iter);
482*4882a593Smuzhiyun 		ret += copied;
483*4882a593Smuzhiyun 		if (!iov_iter_count(iter) || copied != sizeof(block))
484*4882a593Smuzhiyun 			break;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 		BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
487*4882a593Smuzhiyun 		if (ret % PAGE_SIZE == 0) {
488*4882a593Smuzhiyun 			if (signal_pending(current))
489*4882a593Smuzhiyun 				break;
490*4882a593Smuzhiyun 			cond_resched();
491*4882a593Smuzhiyun 		}
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	memzero_explicit(block, sizeof(block));
495*4882a593Smuzhiyun out_zero_chacha:
496*4882a593Smuzhiyun 	memzero_explicit(chacha_state, sizeof(chacha_state));
497*4882a593Smuzhiyun 	return ret ? ret : -EFAULT;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun /*
501*4882a593Smuzhiyun  * Batched entropy returns random integers. The quality of the random
502*4882a593Smuzhiyun  * number is good as /dev/urandom. In order to ensure that the randomness
503*4882a593Smuzhiyun  * provided by this function is okay, the function wait_for_random_bytes()
504*4882a593Smuzhiyun  * should be called and return 0 at least once at any point prior.
505*4882a593Smuzhiyun  */
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun #define DEFINE_BATCHED_ENTROPY(type)						\
508*4882a593Smuzhiyun struct batch_ ##type {								\
509*4882a593Smuzhiyun 	/*									\
510*4882a593Smuzhiyun 	 * We make this 1.5x a ChaCha block, so that we get the			\
511*4882a593Smuzhiyun 	 * remaining 32 bytes from fast key erasure, plus one full		\
512*4882a593Smuzhiyun 	 * block from the detached ChaCha state. We can increase		\
513*4882a593Smuzhiyun 	 * the size of this later if needed so long as we keep the		\
514*4882a593Smuzhiyun 	 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.		\
515*4882a593Smuzhiyun 	 */									\
516*4882a593Smuzhiyun 	type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))];		\
517*4882a593Smuzhiyun 	local_lock_t lock;							\
518*4882a593Smuzhiyun 	unsigned long generation;						\
519*4882a593Smuzhiyun 	unsigned int position;							\
520*4882a593Smuzhiyun };										\
521*4882a593Smuzhiyun 										\
522*4882a593Smuzhiyun static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = {	\
523*4882a593Smuzhiyun 	.lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock),			\
524*4882a593Smuzhiyun 	.position = UINT_MAX							\
525*4882a593Smuzhiyun };										\
526*4882a593Smuzhiyun 										\
527*4882a593Smuzhiyun type get_random_ ##type(void)							\
528*4882a593Smuzhiyun {										\
529*4882a593Smuzhiyun 	type ret;								\
530*4882a593Smuzhiyun 	unsigned long flags;							\
531*4882a593Smuzhiyun 	struct batch_ ##type *batch;						\
532*4882a593Smuzhiyun 	unsigned long next_gen;							\
533*4882a593Smuzhiyun 										\
534*4882a593Smuzhiyun 	warn_unseeded_randomness();						\
535*4882a593Smuzhiyun 										\
536*4882a593Smuzhiyun 	if  (!crng_ready()) {							\
537*4882a593Smuzhiyun 		_get_random_bytes(&ret, sizeof(ret));				\
538*4882a593Smuzhiyun 		return ret;							\
539*4882a593Smuzhiyun 	}									\
540*4882a593Smuzhiyun 										\
541*4882a593Smuzhiyun 	local_lock_irqsave(&batched_entropy_ ##type.lock, flags);		\
542*4882a593Smuzhiyun 	batch = raw_cpu_ptr(&batched_entropy_##type);				\
543*4882a593Smuzhiyun 										\
544*4882a593Smuzhiyun 	next_gen = READ_ONCE(base_crng.generation);				\
545*4882a593Smuzhiyun 	if (batch->position >= ARRAY_SIZE(batch->entropy) ||			\
546*4882a593Smuzhiyun 	    next_gen != batch->generation) {					\
547*4882a593Smuzhiyun 		_get_random_bytes(batch->entropy, sizeof(batch->entropy));	\
548*4882a593Smuzhiyun 		batch->position = 0;						\
549*4882a593Smuzhiyun 		batch->generation = next_gen;					\
550*4882a593Smuzhiyun 	}									\
551*4882a593Smuzhiyun 										\
552*4882a593Smuzhiyun 	ret = batch->entropy[batch->position];					\
553*4882a593Smuzhiyun 	batch->entropy[batch->position] = 0;					\
554*4882a593Smuzhiyun 	++batch->position;							\
555*4882a593Smuzhiyun 	local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags);		\
556*4882a593Smuzhiyun 	return ret;								\
557*4882a593Smuzhiyun }										\
558*4882a593Smuzhiyun EXPORT_SYMBOL(get_random_ ##type);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun DEFINE_BATCHED_ENTROPY(u64)
DEFINE_BATCHED_ENTROPY(u32)561*4882a593Smuzhiyun DEFINE_BATCHED_ENTROPY(u32)
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun #ifdef CONFIG_SMP
564*4882a593Smuzhiyun /*
565*4882a593Smuzhiyun  * This function is called when the CPU is coming up, with entry
566*4882a593Smuzhiyun  * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
567*4882a593Smuzhiyun  */
568*4882a593Smuzhiyun int __cold random_prepare_cpu(unsigned int cpu)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun 	/*
571*4882a593Smuzhiyun 	 * When the cpu comes back online, immediately invalidate both
572*4882a593Smuzhiyun 	 * the per-cpu crng and all batches, so that we serve fresh
573*4882a593Smuzhiyun 	 * randomness.
574*4882a593Smuzhiyun 	 */
575*4882a593Smuzhiyun 	per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
576*4882a593Smuzhiyun 	per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
577*4882a593Smuzhiyun 	per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
578*4882a593Smuzhiyun 	return 0;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun #endif
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun /*
583*4882a593Smuzhiyun  * This function will use the architecture-specific hardware random
584*4882a593Smuzhiyun  * number generator if it is available. It is not recommended for
585*4882a593Smuzhiyun  * use. Use get_random_bytes() instead. It returns the number of
586*4882a593Smuzhiyun  * bytes filled in.
587*4882a593Smuzhiyun  */
get_random_bytes_arch(void * buf,int len)588*4882a593Smuzhiyun int __must_check get_random_bytes_arch(void *buf, int len)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	size_t left = len;
591*4882a593Smuzhiyun 	u8 *p = buf;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	while (left) {
594*4882a593Smuzhiyun 		unsigned long v;
595*4882a593Smuzhiyun 		size_t block_len = min_t(size_t, left, sizeof(unsigned long));
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 		if (!arch_get_random_long(&v))
598*4882a593Smuzhiyun 			break;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 		memcpy(p, &v, block_len);
601*4882a593Smuzhiyun 		p += block_len;
602*4882a593Smuzhiyun 		left -= block_len;
603*4882a593Smuzhiyun 	}
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	return len - left;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun EXPORT_SYMBOL(get_random_bytes_arch);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun /**********************************************************************
611*4882a593Smuzhiyun  *
612*4882a593Smuzhiyun  * Entropy accumulation and extraction routines.
613*4882a593Smuzhiyun  *
614*4882a593Smuzhiyun  * Callers may add entropy via:
615*4882a593Smuzhiyun  *
616*4882a593Smuzhiyun  *     static void mix_pool_bytes(const void *buf, size_t len)
617*4882a593Smuzhiyun  *
618*4882a593Smuzhiyun  * After which, if added entropy should be credited:
619*4882a593Smuzhiyun  *
620*4882a593Smuzhiyun  *     static void credit_init_bits(size_t bits)
621*4882a593Smuzhiyun  *
622*4882a593Smuzhiyun  * Finally, extract entropy via:
623*4882a593Smuzhiyun  *
624*4882a593Smuzhiyun  *     static void extract_entropy(void *buf, size_t len)
625*4882a593Smuzhiyun  *
626*4882a593Smuzhiyun  **********************************************************************/
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun enum {
629*4882a593Smuzhiyun 	POOL_BITS = BLAKE2S_HASH_SIZE * 8,
630*4882a593Smuzhiyun 	POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
631*4882a593Smuzhiyun 	POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
632*4882a593Smuzhiyun };
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun static struct {
635*4882a593Smuzhiyun 	struct blake2s_state hash;
636*4882a593Smuzhiyun 	spinlock_t lock;
637*4882a593Smuzhiyun 	unsigned int init_bits;
638*4882a593Smuzhiyun } input_pool = {
639*4882a593Smuzhiyun 	.hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
640*4882a593Smuzhiyun 		    BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
641*4882a593Smuzhiyun 		    BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
642*4882a593Smuzhiyun 	.hash.outlen = BLAKE2S_HASH_SIZE,
643*4882a593Smuzhiyun 	.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
644*4882a593Smuzhiyun };
645*4882a593Smuzhiyun 
_mix_pool_bytes(const void * buf,size_t len)646*4882a593Smuzhiyun static void _mix_pool_bytes(const void *buf, size_t len)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun 	blake2s_update(&input_pool.hash, buf, len);
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun /*
652*4882a593Smuzhiyun  * This function adds bytes into the input pool. It does not
653*4882a593Smuzhiyun  * update the initialization bit counter; the caller should call
654*4882a593Smuzhiyun  * credit_init_bits if this is appropriate.
655*4882a593Smuzhiyun  */
mix_pool_bytes(const void * buf,size_t len)656*4882a593Smuzhiyun static void mix_pool_bytes(const void *buf, size_t len)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun 	unsigned long flags;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	spin_lock_irqsave(&input_pool.lock, flags);
661*4882a593Smuzhiyun 	_mix_pool_bytes(buf, len);
662*4882a593Smuzhiyun 	spin_unlock_irqrestore(&input_pool.lock, flags);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun /*
666*4882a593Smuzhiyun  * This is an HKDF-like construction for using the hashed collected entropy
667*4882a593Smuzhiyun  * as a PRF key, that's then expanded block-by-block.
668*4882a593Smuzhiyun  */
extract_entropy(void * buf,size_t len)669*4882a593Smuzhiyun static void extract_entropy(void *buf, size_t len)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun 	unsigned long flags;
672*4882a593Smuzhiyun 	u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
673*4882a593Smuzhiyun 	struct {
674*4882a593Smuzhiyun 		unsigned long rdseed[32 / sizeof(long)];
675*4882a593Smuzhiyun 		size_t counter;
676*4882a593Smuzhiyun 	} block;
677*4882a593Smuzhiyun 	size_t i;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
680*4882a593Smuzhiyun 		if (!arch_get_random_seed_long(&block.rdseed[i]) &&
681*4882a593Smuzhiyun 		    !arch_get_random_long(&block.rdseed[i]))
682*4882a593Smuzhiyun 			block.rdseed[i] = random_get_entropy();
683*4882a593Smuzhiyun 	}
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	spin_lock_irqsave(&input_pool.lock, flags);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	/* seed = HASHPRF(last_key, entropy_input) */
688*4882a593Smuzhiyun 	blake2s_final(&input_pool.hash, seed);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	/* next_key = HASHPRF(seed, RDSEED || 0) */
691*4882a593Smuzhiyun 	block.counter = 0;
692*4882a593Smuzhiyun 	blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
693*4882a593Smuzhiyun 	blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	spin_unlock_irqrestore(&input_pool.lock, flags);
696*4882a593Smuzhiyun 	memzero_explicit(next_key, sizeof(next_key));
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	while (len) {
699*4882a593Smuzhiyun 		i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
700*4882a593Smuzhiyun 		/* output = HASHPRF(seed, RDSEED || ++counter) */
701*4882a593Smuzhiyun 		++block.counter;
702*4882a593Smuzhiyun 		blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
703*4882a593Smuzhiyun 		len -= i;
704*4882a593Smuzhiyun 		buf += i;
705*4882a593Smuzhiyun 	}
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	memzero_explicit(seed, sizeof(seed));
708*4882a593Smuzhiyun 	memzero_explicit(&block, sizeof(block));
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun #define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
712*4882a593Smuzhiyun 
_credit_init_bits(size_t bits)713*4882a593Smuzhiyun static void __cold _credit_init_bits(size_t bits)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun 	unsigned int new, orig, add;
716*4882a593Smuzhiyun 	unsigned long flags;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	if (!bits)
719*4882a593Smuzhiyun 		return;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	add = min_t(size_t, bits, POOL_BITS);
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	do {
724*4882a593Smuzhiyun 		orig = READ_ONCE(input_pool.init_bits);
725*4882a593Smuzhiyun 		new = min_t(unsigned int, POOL_BITS, orig + add);
726*4882a593Smuzhiyun 	} while (cmpxchg(&input_pool.init_bits, orig, new) != orig);
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
729*4882a593Smuzhiyun 		crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
730*4882a593Smuzhiyun 		process_random_ready_list();
731*4882a593Smuzhiyun 		wake_up_interruptible(&crng_init_wait);
732*4882a593Smuzhiyun 		kill_fasync(&fasync, SIGIO, POLL_IN);
733*4882a593Smuzhiyun 		pr_notice("crng init done\n");
734*4882a593Smuzhiyun 		if (urandom_warning.missed)
735*4882a593Smuzhiyun 			pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
736*4882a593Smuzhiyun 				  urandom_warning.missed);
737*4882a593Smuzhiyun 	} else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
738*4882a593Smuzhiyun 		spin_lock_irqsave(&base_crng.lock, flags);
739*4882a593Smuzhiyun 		/* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
740*4882a593Smuzhiyun 		if (crng_init == CRNG_EMPTY) {
741*4882a593Smuzhiyun 			extract_entropy(base_crng.key, sizeof(base_crng.key));
742*4882a593Smuzhiyun 			crng_init = CRNG_EARLY;
743*4882a593Smuzhiyun 		}
744*4882a593Smuzhiyun 		spin_unlock_irqrestore(&base_crng.lock, flags);
745*4882a593Smuzhiyun 	}
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun /**********************************************************************
750*4882a593Smuzhiyun  *
751*4882a593Smuzhiyun  * Entropy collection routines.
752*4882a593Smuzhiyun  *
753*4882a593Smuzhiyun  * The following exported functions are used for pushing entropy into
754*4882a593Smuzhiyun  * the above entropy accumulation routines:
755*4882a593Smuzhiyun  *
756*4882a593Smuzhiyun  *	void add_device_randomness(const void *buf, size_t len);
757*4882a593Smuzhiyun  *	void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
758*4882a593Smuzhiyun  *	void add_bootloader_randomness(const void *buf, size_t len);
759*4882a593Smuzhiyun  *	void add_interrupt_randomness(int irq);
760*4882a593Smuzhiyun  *	void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
761*4882a593Smuzhiyun  *	void add_disk_randomness(struct gendisk *disk);
762*4882a593Smuzhiyun  *
763*4882a593Smuzhiyun  * add_device_randomness() adds data to the input pool that
764*4882a593Smuzhiyun  * is likely to differ between two devices (or possibly even per boot).
765*4882a593Smuzhiyun  * This would be things like MAC addresses or serial numbers, or the
766*4882a593Smuzhiyun  * read-out of the RTC. This does *not* credit any actual entropy to
767*4882a593Smuzhiyun  * the pool, but it initializes the pool to different values for devices
768*4882a593Smuzhiyun  * that might otherwise be identical and have very little entropy
769*4882a593Smuzhiyun  * available to them (particularly common in the embedded world).
770*4882a593Smuzhiyun  *
771*4882a593Smuzhiyun  * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
772*4882a593Smuzhiyun  * entropy as specified by the caller. If the entropy pool is full it will
773*4882a593Smuzhiyun  * block until more entropy is needed.
774*4882a593Smuzhiyun  *
775*4882a593Smuzhiyun  * add_bootloader_randomness() is called by bootloader drivers, such as EFI
776*4882a593Smuzhiyun  * and device tree, and credits its input depending on whether or not the
777*4882a593Smuzhiyun  * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
778*4882a593Smuzhiyun  *
779*4882a593Smuzhiyun  * add_interrupt_randomness() uses the interrupt timing as random
780*4882a593Smuzhiyun  * inputs to the entropy pool. Using the cycle counters and the irq source
781*4882a593Smuzhiyun  * as inputs, it feeds the input pool roughly once a second or after 64
782*4882a593Smuzhiyun  * interrupts, crediting 1 bit of entropy for whichever comes first.
783*4882a593Smuzhiyun  *
784*4882a593Smuzhiyun  * add_input_randomness() uses the input layer interrupt timing, as well
785*4882a593Smuzhiyun  * as the event type information from the hardware.
786*4882a593Smuzhiyun  *
787*4882a593Smuzhiyun  * add_disk_randomness() uses what amounts to the seek time of block
788*4882a593Smuzhiyun  * layer request events, on a per-disk_devt basis, as input to the
789*4882a593Smuzhiyun  * entropy pool. Note that high-speed solid state drives with very low
790*4882a593Smuzhiyun  * seek times do not make for good sources of entropy, as their seek
791*4882a593Smuzhiyun  * times are usually fairly consistent.
792*4882a593Smuzhiyun  *
793*4882a593Smuzhiyun  * The last two routines try to estimate how many bits of entropy
794*4882a593Smuzhiyun  * to credit. They do this by keeping track of the first and second
795*4882a593Smuzhiyun  * order deltas of the event timings.
796*4882a593Smuzhiyun  *
797*4882a593Smuzhiyun  **********************************************************************/
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
800*4882a593Smuzhiyun static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
parse_trust_cpu(char * arg)801*4882a593Smuzhiyun static int __init parse_trust_cpu(char *arg)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	return kstrtobool(arg, &trust_cpu);
804*4882a593Smuzhiyun }
parse_trust_bootloader(char * arg)805*4882a593Smuzhiyun static int __init parse_trust_bootloader(char *arg)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun 	return kstrtobool(arg, &trust_bootloader);
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun early_param("random.trust_cpu", parse_trust_cpu);
810*4882a593Smuzhiyun early_param("random.trust_bootloader", parse_trust_bootloader);
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun /*
813*4882a593Smuzhiyun  * The first collection of entropy occurs at system boot while interrupts
814*4882a593Smuzhiyun  * are still turned off. Here we push in latent entropy, RDSEED, a timestamp,
815*4882a593Smuzhiyun  * utsname(), and the command line. Depending on the above configuration knob,
816*4882a593Smuzhiyun  * RDSEED may be considered sufficient for initialization. Note that much
817*4882a593Smuzhiyun  * earlier setup may already have pushed entropy into the input pool by the
818*4882a593Smuzhiyun  * time we get here.
819*4882a593Smuzhiyun  */
random_init(const char * command_line)820*4882a593Smuzhiyun int __init random_init(const char *command_line)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun 	ktime_t now = ktime_get_real();
823*4882a593Smuzhiyun 	unsigned int i, arch_bytes;
824*4882a593Smuzhiyun 	unsigned long entropy;
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun #if defined(LATENT_ENTROPY_PLUGIN)
827*4882a593Smuzhiyun 	static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
828*4882a593Smuzhiyun 	_mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
829*4882a593Smuzhiyun #endif
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE;
832*4882a593Smuzhiyun 	     i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) {
833*4882a593Smuzhiyun 		if (!arch_get_random_seed_long_early(&entropy) &&
834*4882a593Smuzhiyun 		    !arch_get_random_long_early(&entropy)) {
835*4882a593Smuzhiyun 			entropy = random_get_entropy();
836*4882a593Smuzhiyun 			arch_bytes -= sizeof(entropy);
837*4882a593Smuzhiyun 		}
838*4882a593Smuzhiyun 		_mix_pool_bytes(&entropy, sizeof(entropy));
839*4882a593Smuzhiyun 	}
840*4882a593Smuzhiyun 	_mix_pool_bytes(&now, sizeof(now));
841*4882a593Smuzhiyun 	_mix_pool_bytes(utsname(), sizeof(*(utsname())));
842*4882a593Smuzhiyun 	_mix_pool_bytes(command_line, strlen(command_line));
843*4882a593Smuzhiyun 	add_latent_entropy();
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	if (crng_ready())
846*4882a593Smuzhiyun 		crng_reseed();
847*4882a593Smuzhiyun 	else if (trust_cpu)
848*4882a593Smuzhiyun 		credit_init_bits(arch_bytes * 8);
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	return 0;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun /*
854*4882a593Smuzhiyun  * Add device- or boot-specific data to the input pool to help
855*4882a593Smuzhiyun  * initialize it.
856*4882a593Smuzhiyun  *
857*4882a593Smuzhiyun  * None of this adds any entropy; it is meant to avoid the problem of
858*4882a593Smuzhiyun  * the entropy pool having similar initial state across largely
859*4882a593Smuzhiyun  * identical devices.
860*4882a593Smuzhiyun  */
add_device_randomness(const void * buf,unsigned int len)861*4882a593Smuzhiyun void add_device_randomness(const void *buf, unsigned int len)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun 	unsigned long entropy = random_get_entropy();
864*4882a593Smuzhiyun 	unsigned long flags;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	spin_lock_irqsave(&input_pool.lock, flags);
867*4882a593Smuzhiyun 	_mix_pool_bytes(&entropy, sizeof(entropy));
868*4882a593Smuzhiyun 	_mix_pool_bytes(buf, len);
869*4882a593Smuzhiyun 	spin_unlock_irqrestore(&input_pool.lock, flags);
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun EXPORT_SYMBOL(add_device_randomness);
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun /*
874*4882a593Smuzhiyun  * Interface for in-kernel drivers of true hardware RNGs.
875*4882a593Smuzhiyun  * Those devices may produce endless random bits and will be throttled
876*4882a593Smuzhiyun  * when our pool is full.
877*4882a593Smuzhiyun  */
add_hwgenerator_randomness(const void * buf,size_t len,size_t entropy)878*4882a593Smuzhiyun void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun 	mix_pool_bytes(buf, len);
881*4882a593Smuzhiyun 	credit_init_bits(entropy);
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	/*
884*4882a593Smuzhiyun 	 * Throttle writing to once every CRNG_RESEED_INTERVAL, unless
885*4882a593Smuzhiyun 	 * we're not yet initialized.
886*4882a593Smuzhiyun 	 */
887*4882a593Smuzhiyun 	if (!kthread_should_stop() && crng_ready())
888*4882a593Smuzhiyun 		schedule_timeout_interruptible(CRNG_RESEED_INTERVAL);
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun /*
893*4882a593Smuzhiyun  * Handle random seed passed by bootloader, and credit it if
894*4882a593Smuzhiyun  * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
895*4882a593Smuzhiyun  */
add_bootloader_randomness(const void * buf,size_t len)896*4882a593Smuzhiyun void __cold add_bootloader_randomness(const void *buf, size_t len)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun 	mix_pool_bytes(buf, len);
899*4882a593Smuzhiyun 	if (trust_bootloader)
900*4882a593Smuzhiyun 		credit_init_bits(len * 8);
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(add_bootloader_randomness);
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun struct fast_pool {
905*4882a593Smuzhiyun 	unsigned long pool[4];
906*4882a593Smuzhiyun 	unsigned long last;
907*4882a593Smuzhiyun 	unsigned int count;
908*4882a593Smuzhiyun 	struct timer_list mix;
909*4882a593Smuzhiyun };
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun static void mix_interrupt_randomness(struct timer_list *work);
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
914*4882a593Smuzhiyun #ifdef CONFIG_64BIT
915*4882a593Smuzhiyun #define FASTMIX_PERM SIPHASH_PERMUTATION
916*4882a593Smuzhiyun 	.pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
917*4882a593Smuzhiyun #else
918*4882a593Smuzhiyun #define FASTMIX_PERM HSIPHASH_PERMUTATION
919*4882a593Smuzhiyun 	.pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
920*4882a593Smuzhiyun #endif
921*4882a593Smuzhiyun 	.mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
922*4882a593Smuzhiyun };
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun /*
925*4882a593Smuzhiyun  * This is [Half]SipHash-1-x, starting from an empty key. Because
926*4882a593Smuzhiyun  * the key is fixed, it assumes that its inputs are non-malicious,
927*4882a593Smuzhiyun  * and therefore this has no security on its own. s represents the
928*4882a593Smuzhiyun  * four-word SipHash state, while v represents a two-word input.
929*4882a593Smuzhiyun  */
fast_mix(unsigned long s[4],unsigned long v1,unsigned long v2)930*4882a593Smuzhiyun static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun 	s[3] ^= v1;
933*4882a593Smuzhiyun 	FASTMIX_PERM(s[0], s[1], s[2], s[3]);
934*4882a593Smuzhiyun 	s[0] ^= v1;
935*4882a593Smuzhiyun 	s[3] ^= v2;
936*4882a593Smuzhiyun 	FASTMIX_PERM(s[0], s[1], s[2], s[3]);
937*4882a593Smuzhiyun 	s[0] ^= v2;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun #ifdef CONFIG_SMP
941*4882a593Smuzhiyun /*
942*4882a593Smuzhiyun  * This function is called when the CPU has just come online, with
943*4882a593Smuzhiyun  * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
944*4882a593Smuzhiyun  */
random_online_cpu(unsigned int cpu)945*4882a593Smuzhiyun int __cold random_online_cpu(unsigned int cpu)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun 	/*
948*4882a593Smuzhiyun 	 * During CPU shutdown and before CPU onlining, add_interrupt_
949*4882a593Smuzhiyun 	 * randomness() may schedule mix_interrupt_randomness(), and
950*4882a593Smuzhiyun 	 * set the MIX_INFLIGHT flag. However, because the worker can
951*4882a593Smuzhiyun 	 * be scheduled on a different CPU during this period, that
952*4882a593Smuzhiyun 	 * flag will never be cleared. For that reason, we zero out
953*4882a593Smuzhiyun 	 * the flag here, which runs just after workqueues are onlined
954*4882a593Smuzhiyun 	 * for the CPU again. This also has the effect of setting the
955*4882a593Smuzhiyun 	 * irq randomness count to zero so that new accumulated irqs
956*4882a593Smuzhiyun 	 * are fresh.
957*4882a593Smuzhiyun 	 */
958*4882a593Smuzhiyun 	per_cpu_ptr(&irq_randomness, cpu)->count = 0;
959*4882a593Smuzhiyun 	return 0;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun #endif
962*4882a593Smuzhiyun 
mix_interrupt_randomness(struct timer_list * work)963*4882a593Smuzhiyun static void mix_interrupt_randomness(struct timer_list *work)
964*4882a593Smuzhiyun {
965*4882a593Smuzhiyun 	struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
966*4882a593Smuzhiyun 	/*
967*4882a593Smuzhiyun 	 * The size of the copied stack pool is explicitly 2 longs so that we
968*4882a593Smuzhiyun 	 * only ever ingest half of the siphash output each time, retaining
969*4882a593Smuzhiyun 	 * the other half as the next "key" that carries over. The entropy is
970*4882a593Smuzhiyun 	 * supposed to be sufficiently dispersed between bits so on average
971*4882a593Smuzhiyun 	 * we don't wind up "losing" some.
972*4882a593Smuzhiyun 	 */
973*4882a593Smuzhiyun 	unsigned long pool[2];
974*4882a593Smuzhiyun 	unsigned int count;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	/* Check to see if we're running on the wrong CPU due to hotplug. */
977*4882a593Smuzhiyun 	local_irq_disable();
978*4882a593Smuzhiyun 	if (fast_pool != this_cpu_ptr(&irq_randomness)) {
979*4882a593Smuzhiyun 		local_irq_enable();
980*4882a593Smuzhiyun 		return;
981*4882a593Smuzhiyun 	}
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	/*
984*4882a593Smuzhiyun 	 * Copy the pool to the stack so that the mixer always has a
985*4882a593Smuzhiyun 	 * consistent view, before we reenable irqs again.
986*4882a593Smuzhiyun 	 */
987*4882a593Smuzhiyun 	memcpy(pool, fast_pool->pool, sizeof(pool));
988*4882a593Smuzhiyun 	count = fast_pool->count;
989*4882a593Smuzhiyun 	fast_pool->count = 0;
990*4882a593Smuzhiyun 	fast_pool->last = jiffies;
991*4882a593Smuzhiyun 	local_irq_enable();
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	mix_pool_bytes(pool, sizeof(pool));
994*4882a593Smuzhiyun 	credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	memzero_explicit(pool, sizeof(pool));
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun 
add_interrupt_randomness(int irq)999*4882a593Smuzhiyun void add_interrupt_randomness(int irq)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun 	enum { MIX_INFLIGHT = 1U << 31 };
1002*4882a593Smuzhiyun 	unsigned long entropy = random_get_entropy();
1003*4882a593Smuzhiyun 	struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1004*4882a593Smuzhiyun 	struct pt_regs *regs = get_irq_regs();
1005*4882a593Smuzhiyun 	unsigned int new_count;
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	fast_mix(fast_pool->pool, entropy,
1008*4882a593Smuzhiyun 		 (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
1009*4882a593Smuzhiyun 	new_count = ++fast_pool->count;
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	if (new_count & MIX_INFLIGHT)
1012*4882a593Smuzhiyun 		return;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
1015*4882a593Smuzhiyun 		return;
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	fast_pool->count |= MIX_INFLIGHT;
1018*4882a593Smuzhiyun 	if (!timer_pending(&fast_pool->mix)) {
1019*4882a593Smuzhiyun 		fast_pool->mix.expires = jiffies;
1020*4882a593Smuzhiyun 		add_timer_on(&fast_pool->mix, raw_smp_processor_id());
1021*4882a593Smuzhiyun 	}
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun /* There is one of these per entropy source */
1026*4882a593Smuzhiyun struct timer_rand_state {
1027*4882a593Smuzhiyun 	unsigned long last_time;
1028*4882a593Smuzhiyun 	long last_delta, last_delta2;
1029*4882a593Smuzhiyun };
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun /*
1032*4882a593Smuzhiyun  * This function adds entropy to the entropy "pool" by using timing
1033*4882a593Smuzhiyun  * delays. It uses the timer_rand_state structure to make an estimate
1034*4882a593Smuzhiyun  * of how many bits of entropy this call has added to the pool. The
1035*4882a593Smuzhiyun  * value "num" is also added to the pool; it should somehow describe
1036*4882a593Smuzhiyun  * the type of event that just happened.
1037*4882a593Smuzhiyun  */
add_timer_randomness(struct timer_rand_state * state,unsigned int num)1038*4882a593Smuzhiyun static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun 	unsigned long entropy = random_get_entropy(), now = jiffies, flags;
1041*4882a593Smuzhiyun 	long delta, delta2, delta3;
1042*4882a593Smuzhiyun 	unsigned int bits;
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 	/*
1045*4882a593Smuzhiyun 	 * If we're in a hard IRQ, add_interrupt_randomness() will be called
1046*4882a593Smuzhiyun 	 * sometime after, so mix into the fast pool.
1047*4882a593Smuzhiyun 	 */
1048*4882a593Smuzhiyun 	if (in_irq()) {
1049*4882a593Smuzhiyun 		fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
1050*4882a593Smuzhiyun 	} else {
1051*4882a593Smuzhiyun 		spin_lock_irqsave(&input_pool.lock, flags);
1052*4882a593Smuzhiyun 		_mix_pool_bytes(&entropy, sizeof(entropy));
1053*4882a593Smuzhiyun 		_mix_pool_bytes(&num, sizeof(num));
1054*4882a593Smuzhiyun 		spin_unlock_irqrestore(&input_pool.lock, flags);
1055*4882a593Smuzhiyun 	}
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	if (crng_ready())
1058*4882a593Smuzhiyun 		return;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	/*
1061*4882a593Smuzhiyun 	 * Calculate number of bits of randomness we probably added.
1062*4882a593Smuzhiyun 	 * We take into account the first, second and third-order deltas
1063*4882a593Smuzhiyun 	 * in order to make our estimate.
1064*4882a593Smuzhiyun 	 */
1065*4882a593Smuzhiyun 	delta = now - READ_ONCE(state->last_time);
1066*4882a593Smuzhiyun 	WRITE_ONCE(state->last_time, now);
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	delta2 = delta - READ_ONCE(state->last_delta);
1069*4882a593Smuzhiyun 	WRITE_ONCE(state->last_delta, delta);
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 	delta3 = delta2 - READ_ONCE(state->last_delta2);
1072*4882a593Smuzhiyun 	WRITE_ONCE(state->last_delta2, delta2);
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	if (delta < 0)
1075*4882a593Smuzhiyun 		delta = -delta;
1076*4882a593Smuzhiyun 	if (delta2 < 0)
1077*4882a593Smuzhiyun 		delta2 = -delta2;
1078*4882a593Smuzhiyun 	if (delta3 < 0)
1079*4882a593Smuzhiyun 		delta3 = -delta3;
1080*4882a593Smuzhiyun 	if (delta > delta2)
1081*4882a593Smuzhiyun 		delta = delta2;
1082*4882a593Smuzhiyun 	if (delta > delta3)
1083*4882a593Smuzhiyun 		delta = delta3;
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	/*
1086*4882a593Smuzhiyun 	 * delta is now minimum absolute delta. Round down by 1 bit
1087*4882a593Smuzhiyun 	 * on general principles, and limit entropy estimate to 11 bits.
1088*4882a593Smuzhiyun 	 */
1089*4882a593Smuzhiyun 	bits = min(fls(delta >> 1), 11);
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	/*
1092*4882a593Smuzhiyun 	 * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
1093*4882a593Smuzhiyun 	 * will run after this, which uses a different crediting scheme of 1 bit
1094*4882a593Smuzhiyun 	 * per every 64 interrupts. In order to let that function do accounting
1095*4882a593Smuzhiyun 	 * close to the one in this function, we credit a full 64/64 bit per bit,
1096*4882a593Smuzhiyun 	 * and then subtract one to account for the extra one added.
1097*4882a593Smuzhiyun 	 */
1098*4882a593Smuzhiyun 	if (in_irq())
1099*4882a593Smuzhiyun 		this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
1100*4882a593Smuzhiyun 	else
1101*4882a593Smuzhiyun 		_credit_init_bits(bits);
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun 
add_input_randomness(unsigned int type,unsigned int code,unsigned int value)1104*4882a593Smuzhiyun void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
1105*4882a593Smuzhiyun {
1106*4882a593Smuzhiyun 	static unsigned char last_value;
1107*4882a593Smuzhiyun 	static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	/* Ignore autorepeat and the like. */
1110*4882a593Smuzhiyun 	if (value == last_value)
1111*4882a593Smuzhiyun 		return;
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	last_value = value;
1114*4882a593Smuzhiyun 	add_timer_randomness(&input_timer_state,
1115*4882a593Smuzhiyun 			     (type << 4) ^ code ^ (code >> 4) ^ value);
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(add_input_randomness);
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun #ifdef CONFIG_BLOCK
add_disk_randomness(struct gendisk * disk)1120*4882a593Smuzhiyun void add_disk_randomness(struct gendisk *disk)
1121*4882a593Smuzhiyun {
1122*4882a593Smuzhiyun 	if (!disk || !disk->random)
1123*4882a593Smuzhiyun 		return;
1124*4882a593Smuzhiyun 	/* First major is 1, so we get >= 0x200 here. */
1125*4882a593Smuzhiyun 	add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(add_disk_randomness);
1128*4882a593Smuzhiyun 
rand_initialize_disk(struct gendisk * disk)1129*4882a593Smuzhiyun void __cold rand_initialize_disk(struct gendisk *disk)
1130*4882a593Smuzhiyun {
1131*4882a593Smuzhiyun 	struct timer_rand_state *state;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	/*
1134*4882a593Smuzhiyun 	 * If kzalloc returns null, we just won't use that entropy
1135*4882a593Smuzhiyun 	 * source.
1136*4882a593Smuzhiyun 	 */
1137*4882a593Smuzhiyun 	state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1138*4882a593Smuzhiyun 	if (state) {
1139*4882a593Smuzhiyun 		state->last_time = INITIAL_JIFFIES;
1140*4882a593Smuzhiyun 		disk->random = state;
1141*4882a593Smuzhiyun 	}
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun #endif
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun /*
1146*4882a593Smuzhiyun  * Each time the timer fires, we expect that we got an unpredictable
1147*4882a593Smuzhiyun  * jump in the cycle counter. Even if the timer is running on another
1148*4882a593Smuzhiyun  * CPU, the timer activity will be touching the stack of the CPU that is
1149*4882a593Smuzhiyun  * generating entropy..
1150*4882a593Smuzhiyun  *
1151*4882a593Smuzhiyun  * Note that we don't re-arm the timer in the timer itself - we are
1152*4882a593Smuzhiyun  * happy to be scheduled away, since that just makes the load more
1153*4882a593Smuzhiyun  * complex, but we do not want the timer to keep ticking unless the
1154*4882a593Smuzhiyun  * entropy loop is running.
1155*4882a593Smuzhiyun  *
1156*4882a593Smuzhiyun  * So the re-arming always happens in the entropy loop itself.
1157*4882a593Smuzhiyun  */
entropy_timer(struct timer_list * t)1158*4882a593Smuzhiyun static void __cold entropy_timer(struct timer_list *t)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun 	credit_init_bits(1);
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun /*
1164*4882a593Smuzhiyun  * If we have an actual cycle counter, see if we can
1165*4882a593Smuzhiyun  * generate enough entropy with timing noise
1166*4882a593Smuzhiyun  */
try_to_generate_entropy(void)1167*4882a593Smuzhiyun static void __cold try_to_generate_entropy(void)
1168*4882a593Smuzhiyun {
1169*4882a593Smuzhiyun 	struct {
1170*4882a593Smuzhiyun 		unsigned long entropy;
1171*4882a593Smuzhiyun 		struct timer_list timer;
1172*4882a593Smuzhiyun 	} stack;
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 	stack.entropy = random_get_entropy();
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	/* Slow counter - or none. Don't even bother */
1177*4882a593Smuzhiyun 	if (stack.entropy == random_get_entropy())
1178*4882a593Smuzhiyun 		return;
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1181*4882a593Smuzhiyun 	while (!crng_ready() && !signal_pending(current)) {
1182*4882a593Smuzhiyun 		if (!timer_pending(&stack.timer))
1183*4882a593Smuzhiyun 			mod_timer(&stack.timer, jiffies + 1);
1184*4882a593Smuzhiyun 		mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1185*4882a593Smuzhiyun 		schedule();
1186*4882a593Smuzhiyun 		stack.entropy = random_get_entropy();
1187*4882a593Smuzhiyun 	}
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 	del_timer_sync(&stack.timer);
1190*4882a593Smuzhiyun 	destroy_timer_on_stack(&stack.timer);
1191*4882a593Smuzhiyun 	mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun /**********************************************************************
1196*4882a593Smuzhiyun  *
1197*4882a593Smuzhiyun  * Userspace reader/writer interfaces.
1198*4882a593Smuzhiyun  *
1199*4882a593Smuzhiyun  * getrandom(2) is the primary modern interface into the RNG and should
1200*4882a593Smuzhiyun  * be used in preference to anything else.
1201*4882a593Smuzhiyun  *
1202*4882a593Smuzhiyun  * Reading from /dev/random has the same functionality as calling
1203*4882a593Smuzhiyun  * getrandom(2) with flags=0. In earlier versions, however, it had
1204*4882a593Smuzhiyun  * vastly different semantics and should therefore be avoided, to
1205*4882a593Smuzhiyun  * prevent backwards compatibility issues.
1206*4882a593Smuzhiyun  *
1207*4882a593Smuzhiyun  * Reading from /dev/urandom has the same functionality as calling
1208*4882a593Smuzhiyun  * getrandom(2) with flags=GRND_INSECURE. Because it does not block
1209*4882a593Smuzhiyun  * waiting for the RNG to be ready, it should not be used.
1210*4882a593Smuzhiyun  *
1211*4882a593Smuzhiyun  * Writing to either /dev/random or /dev/urandom adds entropy to
1212*4882a593Smuzhiyun  * the input pool but does not credit it.
1213*4882a593Smuzhiyun  *
1214*4882a593Smuzhiyun  * Polling on /dev/random indicates when the RNG is initialized, on
1215*4882a593Smuzhiyun  * the read side, and when it wants new entropy, on the write side.
1216*4882a593Smuzhiyun  *
1217*4882a593Smuzhiyun  * Both /dev/random and /dev/urandom have the same set of ioctls for
1218*4882a593Smuzhiyun  * adding entropy, getting the entropy count, zeroing the count, and
1219*4882a593Smuzhiyun  * reseeding the crng.
1220*4882a593Smuzhiyun  *
1221*4882a593Smuzhiyun  **********************************************************************/
1222*4882a593Smuzhiyun 
SYSCALL_DEFINE3(getrandom,char __user *,ubuf,size_t,len,unsigned int,flags)1223*4882a593Smuzhiyun SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
1224*4882a593Smuzhiyun {
1225*4882a593Smuzhiyun 	struct iov_iter iter;
1226*4882a593Smuzhiyun 	struct iovec iov;
1227*4882a593Smuzhiyun 	int ret;
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
1230*4882a593Smuzhiyun 		return -EINVAL;
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	/*
1233*4882a593Smuzhiyun 	 * Requesting insecure and blocking randomness at the same time makes
1234*4882a593Smuzhiyun 	 * no sense.
1235*4882a593Smuzhiyun 	 */
1236*4882a593Smuzhiyun 	if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
1237*4882a593Smuzhiyun 		return -EINVAL;
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	if (!crng_ready() && !(flags & GRND_INSECURE)) {
1240*4882a593Smuzhiyun 		if (flags & GRND_NONBLOCK)
1241*4882a593Smuzhiyun 			return -EAGAIN;
1242*4882a593Smuzhiyun 		ret = wait_for_random_bytes();
1243*4882a593Smuzhiyun 		if (unlikely(ret))
1244*4882a593Smuzhiyun 			return ret;
1245*4882a593Smuzhiyun 	}
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	ret = import_single_range(READ, ubuf, len, &iov, &iter);
1248*4882a593Smuzhiyun 	if (unlikely(ret))
1249*4882a593Smuzhiyun 		return ret;
1250*4882a593Smuzhiyun 	return get_random_bytes_user(&iter);
1251*4882a593Smuzhiyun }
1252*4882a593Smuzhiyun 
random_poll(struct file * file,poll_table * wait)1253*4882a593Smuzhiyun static __poll_t random_poll(struct file *file, poll_table *wait)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun 	poll_wait(file, &crng_init_wait, wait);
1256*4882a593Smuzhiyun 	return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun 
write_pool_user(struct iov_iter * iter)1259*4882a593Smuzhiyun static ssize_t write_pool_user(struct iov_iter *iter)
1260*4882a593Smuzhiyun {
1261*4882a593Smuzhiyun 	u8 block[BLAKE2S_BLOCK_SIZE];
1262*4882a593Smuzhiyun 	ssize_t ret = 0;
1263*4882a593Smuzhiyun 	size_t copied;
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun 	if (unlikely(!iov_iter_count(iter)))
1266*4882a593Smuzhiyun 		return 0;
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	for (;;) {
1269*4882a593Smuzhiyun 		copied = copy_from_iter(block, sizeof(block), iter);
1270*4882a593Smuzhiyun 		ret += copied;
1271*4882a593Smuzhiyun 		mix_pool_bytes(block, copied);
1272*4882a593Smuzhiyun 		if (!iov_iter_count(iter) || copied != sizeof(block))
1273*4882a593Smuzhiyun 			break;
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 		BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
1276*4882a593Smuzhiyun 		if (ret % PAGE_SIZE == 0) {
1277*4882a593Smuzhiyun 			if (signal_pending(current))
1278*4882a593Smuzhiyun 				break;
1279*4882a593Smuzhiyun 			cond_resched();
1280*4882a593Smuzhiyun 		}
1281*4882a593Smuzhiyun 	}
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	memzero_explicit(block, sizeof(block));
1284*4882a593Smuzhiyun 	return ret ? ret : -EFAULT;
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun 
random_write_iter(struct kiocb * kiocb,struct iov_iter * iter)1287*4882a593Smuzhiyun static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun 	return write_pool_user(iter);
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun 
urandom_read_iter(struct kiocb * kiocb,struct iov_iter * iter)1292*4882a593Smuzhiyun static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1293*4882a593Smuzhiyun {
1294*4882a593Smuzhiyun 	static int maxwarn = 10;
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	if (!crng_ready()) {
1297*4882a593Smuzhiyun 		if (!ratelimit_disable && maxwarn <= 0)
1298*4882a593Smuzhiyun 			++urandom_warning.missed;
1299*4882a593Smuzhiyun 		else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
1300*4882a593Smuzhiyun 			--maxwarn;
1301*4882a593Smuzhiyun 			pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
1302*4882a593Smuzhiyun 				  current->comm, iov_iter_count(iter));
1303*4882a593Smuzhiyun 		}
1304*4882a593Smuzhiyun 	}
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun 	return get_random_bytes_user(iter);
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun 
random_read_iter(struct kiocb * kiocb,struct iov_iter * iter)1309*4882a593Smuzhiyun static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1310*4882a593Smuzhiyun {
1311*4882a593Smuzhiyun 	int ret;
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	if (!crng_ready() &&
1314*4882a593Smuzhiyun 	    ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
1315*4882a593Smuzhiyun 	     (kiocb->ki_filp->f_flags & O_NONBLOCK)))
1316*4882a593Smuzhiyun 		return -EAGAIN;
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	ret = wait_for_random_bytes();
1319*4882a593Smuzhiyun 	if (ret != 0)
1320*4882a593Smuzhiyun 		return ret;
1321*4882a593Smuzhiyun 	return get_random_bytes_user(iter);
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun 
random_ioctl(struct file * f,unsigned int cmd,unsigned long arg)1324*4882a593Smuzhiyun static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1325*4882a593Smuzhiyun {
1326*4882a593Smuzhiyun 	int __user *p = (int __user *)arg;
1327*4882a593Smuzhiyun 	int ent_count;
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	switch (cmd) {
1330*4882a593Smuzhiyun 	case RNDGETENTCNT:
1331*4882a593Smuzhiyun 		/* Inherently racy, no point locking. */
1332*4882a593Smuzhiyun 		if (put_user(input_pool.init_bits, p))
1333*4882a593Smuzhiyun 			return -EFAULT;
1334*4882a593Smuzhiyun 		return 0;
1335*4882a593Smuzhiyun 	case RNDADDTOENTCNT:
1336*4882a593Smuzhiyun 		if (!capable(CAP_SYS_ADMIN))
1337*4882a593Smuzhiyun 			return -EPERM;
1338*4882a593Smuzhiyun 		if (get_user(ent_count, p))
1339*4882a593Smuzhiyun 			return -EFAULT;
1340*4882a593Smuzhiyun 		if (ent_count < 0)
1341*4882a593Smuzhiyun 			return -EINVAL;
1342*4882a593Smuzhiyun 		credit_init_bits(ent_count);
1343*4882a593Smuzhiyun 		return 0;
1344*4882a593Smuzhiyun 	case RNDADDENTROPY: {
1345*4882a593Smuzhiyun 		struct iov_iter iter;
1346*4882a593Smuzhiyun 		struct iovec iov;
1347*4882a593Smuzhiyun 		ssize_t ret;
1348*4882a593Smuzhiyun 		int len;
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 		if (!capable(CAP_SYS_ADMIN))
1351*4882a593Smuzhiyun 			return -EPERM;
1352*4882a593Smuzhiyun 		if (get_user(ent_count, p++))
1353*4882a593Smuzhiyun 			return -EFAULT;
1354*4882a593Smuzhiyun 		if (ent_count < 0)
1355*4882a593Smuzhiyun 			return -EINVAL;
1356*4882a593Smuzhiyun 		if (get_user(len, p++))
1357*4882a593Smuzhiyun 			return -EFAULT;
1358*4882a593Smuzhiyun 		ret = import_single_range(WRITE, p, len, &iov, &iter);
1359*4882a593Smuzhiyun 		if (unlikely(ret))
1360*4882a593Smuzhiyun 			return ret;
1361*4882a593Smuzhiyun 		ret = write_pool_user(&iter);
1362*4882a593Smuzhiyun 		if (unlikely(ret < 0))
1363*4882a593Smuzhiyun 			return ret;
1364*4882a593Smuzhiyun 		/* Since we're crediting, enforce that it was all written into the pool. */
1365*4882a593Smuzhiyun 		if (unlikely(ret != len))
1366*4882a593Smuzhiyun 			return -EFAULT;
1367*4882a593Smuzhiyun 		credit_init_bits(ent_count);
1368*4882a593Smuzhiyun 		return 0;
1369*4882a593Smuzhiyun 	}
1370*4882a593Smuzhiyun 	case RNDZAPENTCNT:
1371*4882a593Smuzhiyun 	case RNDCLEARPOOL:
1372*4882a593Smuzhiyun 		/* No longer has any effect. */
1373*4882a593Smuzhiyun 		if (!capable(CAP_SYS_ADMIN))
1374*4882a593Smuzhiyun 			return -EPERM;
1375*4882a593Smuzhiyun 		return 0;
1376*4882a593Smuzhiyun 	case RNDRESEEDCRNG:
1377*4882a593Smuzhiyun 		if (!capable(CAP_SYS_ADMIN))
1378*4882a593Smuzhiyun 			return -EPERM;
1379*4882a593Smuzhiyun 		if (!crng_ready())
1380*4882a593Smuzhiyun 			return -ENODATA;
1381*4882a593Smuzhiyun 		crng_reseed();
1382*4882a593Smuzhiyun 		return 0;
1383*4882a593Smuzhiyun 	default:
1384*4882a593Smuzhiyun 		return -EINVAL;
1385*4882a593Smuzhiyun 	}
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun 
random_fasync(int fd,struct file * filp,int on)1388*4882a593Smuzhiyun static int random_fasync(int fd, struct file *filp, int on)
1389*4882a593Smuzhiyun {
1390*4882a593Smuzhiyun 	return fasync_helper(fd, filp, on, &fasync);
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun const struct file_operations random_fops = {
1394*4882a593Smuzhiyun 	.read_iter = random_read_iter,
1395*4882a593Smuzhiyun 	.write_iter = random_write_iter,
1396*4882a593Smuzhiyun 	.poll = random_poll,
1397*4882a593Smuzhiyun 	.unlocked_ioctl = random_ioctl,
1398*4882a593Smuzhiyun 	.compat_ioctl = compat_ptr_ioctl,
1399*4882a593Smuzhiyun 	.fasync = random_fasync,
1400*4882a593Smuzhiyun 	.llseek = noop_llseek,
1401*4882a593Smuzhiyun 	.splice_read = generic_file_splice_read,
1402*4882a593Smuzhiyun 	.splice_write = iter_file_splice_write,
1403*4882a593Smuzhiyun };
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun const struct file_operations urandom_fops = {
1406*4882a593Smuzhiyun 	.read_iter = urandom_read_iter,
1407*4882a593Smuzhiyun 	.write_iter = random_write_iter,
1408*4882a593Smuzhiyun 	.unlocked_ioctl = random_ioctl,
1409*4882a593Smuzhiyun 	.compat_ioctl = compat_ptr_ioctl,
1410*4882a593Smuzhiyun 	.fasync = random_fasync,
1411*4882a593Smuzhiyun 	.llseek = noop_llseek,
1412*4882a593Smuzhiyun 	.splice_read = generic_file_splice_read,
1413*4882a593Smuzhiyun 	.splice_write = iter_file_splice_write,
1414*4882a593Smuzhiyun };
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun /********************************************************************
1418*4882a593Smuzhiyun  *
1419*4882a593Smuzhiyun  * Sysctl interface.
1420*4882a593Smuzhiyun  *
1421*4882a593Smuzhiyun  * These are partly unused legacy knobs with dummy values to not break
1422*4882a593Smuzhiyun  * userspace and partly still useful things. They are usually accessible
1423*4882a593Smuzhiyun  * in /proc/sys/kernel/random/ and are as follows:
1424*4882a593Smuzhiyun  *
1425*4882a593Smuzhiyun  * - boot_id - a UUID representing the current boot.
1426*4882a593Smuzhiyun  *
1427*4882a593Smuzhiyun  * - uuid - a random UUID, different each time the file is read.
1428*4882a593Smuzhiyun  *
1429*4882a593Smuzhiyun  * - poolsize - the number of bits of entropy that the input pool can
1430*4882a593Smuzhiyun  *   hold, tied to the POOL_BITS constant.
1431*4882a593Smuzhiyun  *
1432*4882a593Smuzhiyun  * - entropy_avail - the number of bits of entropy currently in the
1433*4882a593Smuzhiyun  *   input pool. Always <= poolsize.
1434*4882a593Smuzhiyun  *
1435*4882a593Smuzhiyun  * - write_wakeup_threshold - the amount of entropy in the input pool
1436*4882a593Smuzhiyun  *   below which write polls to /dev/random will unblock, requesting
1437*4882a593Smuzhiyun  *   more entropy, tied to the POOL_READY_BITS constant. It is writable
1438*4882a593Smuzhiyun  *   to avoid breaking old userspaces, but writing to it does not
1439*4882a593Smuzhiyun  *   change any behavior of the RNG.
1440*4882a593Smuzhiyun  *
1441*4882a593Smuzhiyun  * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
1442*4882a593Smuzhiyun  *   It is writable to avoid breaking old userspaces, but writing
1443*4882a593Smuzhiyun  *   to it does not change any behavior of the RNG.
1444*4882a593Smuzhiyun  *
1445*4882a593Smuzhiyun  ********************************************************************/
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun #ifdef CONFIG_SYSCTL
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun #include <linux/sysctl.h>
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
1452*4882a593Smuzhiyun static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
1453*4882a593Smuzhiyun static int sysctl_poolsize = POOL_BITS;
1454*4882a593Smuzhiyun static u8 sysctl_bootid[UUID_SIZE];
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun /*
1457*4882a593Smuzhiyun  * This function is used to return both the bootid UUID, and random
1458*4882a593Smuzhiyun  * UUID. The difference is in whether table->data is NULL; if it is,
1459*4882a593Smuzhiyun  * then a new UUID is generated and returned to the user.
1460*4882a593Smuzhiyun  */
proc_do_uuid(struct ctl_table * table,int write,void * buf,size_t * lenp,loff_t * ppos)1461*4882a593Smuzhiyun static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
1462*4882a593Smuzhiyun 			size_t *lenp, loff_t *ppos)
1463*4882a593Smuzhiyun {
1464*4882a593Smuzhiyun 	u8 tmp_uuid[UUID_SIZE], *uuid;
1465*4882a593Smuzhiyun 	char uuid_string[UUID_STRING_LEN + 1];
1466*4882a593Smuzhiyun 	struct ctl_table fake_table = {
1467*4882a593Smuzhiyun 		.data = uuid_string,
1468*4882a593Smuzhiyun 		.maxlen = UUID_STRING_LEN
1469*4882a593Smuzhiyun 	};
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun 	if (write)
1472*4882a593Smuzhiyun 		return -EPERM;
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun 	uuid = table->data;
1475*4882a593Smuzhiyun 	if (!uuid) {
1476*4882a593Smuzhiyun 		uuid = tmp_uuid;
1477*4882a593Smuzhiyun 		generate_random_uuid(uuid);
1478*4882a593Smuzhiyun 	} else {
1479*4882a593Smuzhiyun 		static DEFINE_SPINLOCK(bootid_spinlock);
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 		spin_lock(&bootid_spinlock);
1482*4882a593Smuzhiyun 		if (!uuid[8])
1483*4882a593Smuzhiyun 			generate_random_uuid(uuid);
1484*4882a593Smuzhiyun 		spin_unlock(&bootid_spinlock);
1485*4882a593Smuzhiyun 	}
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun 	snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
1488*4882a593Smuzhiyun 	return proc_dostring(&fake_table, 0, buf, lenp, ppos);
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun /* The same as proc_dointvec, but writes don't change anything. */
proc_do_rointvec(struct ctl_table * table,int write,void * buf,size_t * lenp,loff_t * ppos)1492*4882a593Smuzhiyun static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
1493*4882a593Smuzhiyun 			    size_t *lenp, loff_t *ppos)
1494*4882a593Smuzhiyun {
1495*4882a593Smuzhiyun 	return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
1496*4882a593Smuzhiyun }
1497*4882a593Smuzhiyun 
1498*4882a593Smuzhiyun extern struct ctl_table random_table[];
1499*4882a593Smuzhiyun struct ctl_table random_table[] = {
1500*4882a593Smuzhiyun 	{
1501*4882a593Smuzhiyun 		.procname	= "poolsize",
1502*4882a593Smuzhiyun 		.data		= &sysctl_poolsize,
1503*4882a593Smuzhiyun 		.maxlen		= sizeof(int),
1504*4882a593Smuzhiyun 		.mode		= 0444,
1505*4882a593Smuzhiyun 		.proc_handler	= proc_dointvec,
1506*4882a593Smuzhiyun 	},
1507*4882a593Smuzhiyun 	{
1508*4882a593Smuzhiyun 		.procname	= "entropy_avail",
1509*4882a593Smuzhiyun 		.data		= &input_pool.init_bits,
1510*4882a593Smuzhiyun 		.maxlen		= sizeof(int),
1511*4882a593Smuzhiyun 		.mode		= 0444,
1512*4882a593Smuzhiyun 		.proc_handler	= proc_dointvec,
1513*4882a593Smuzhiyun 	},
1514*4882a593Smuzhiyun 	{
1515*4882a593Smuzhiyun 		.procname	= "write_wakeup_threshold",
1516*4882a593Smuzhiyun 		.data		= &sysctl_random_write_wakeup_bits,
1517*4882a593Smuzhiyun 		.maxlen		= sizeof(int),
1518*4882a593Smuzhiyun 		.mode		= 0644,
1519*4882a593Smuzhiyun 		.proc_handler	= proc_do_rointvec,
1520*4882a593Smuzhiyun 	},
1521*4882a593Smuzhiyun 	{
1522*4882a593Smuzhiyun 		.procname	= "urandom_min_reseed_secs",
1523*4882a593Smuzhiyun 		.data		= &sysctl_random_min_urandom_seed,
1524*4882a593Smuzhiyun 		.maxlen		= sizeof(int),
1525*4882a593Smuzhiyun 		.mode		= 0644,
1526*4882a593Smuzhiyun 		.proc_handler	= proc_do_rointvec,
1527*4882a593Smuzhiyun 	},
1528*4882a593Smuzhiyun 	{
1529*4882a593Smuzhiyun 		.procname	= "boot_id",
1530*4882a593Smuzhiyun 		.data		= &sysctl_bootid,
1531*4882a593Smuzhiyun 		.mode		= 0444,
1532*4882a593Smuzhiyun 		.proc_handler	= proc_do_uuid,
1533*4882a593Smuzhiyun 	},
1534*4882a593Smuzhiyun 	{
1535*4882a593Smuzhiyun 		.procname	= "uuid",
1536*4882a593Smuzhiyun 		.mode		= 0444,
1537*4882a593Smuzhiyun 		.proc_handler	= proc_do_uuid,
1538*4882a593Smuzhiyun 	},
1539*4882a593Smuzhiyun 	{ }
1540*4882a593Smuzhiyun };
1541*4882a593Smuzhiyun #endif	/* CONFIG_SYSCTL */
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun /*
1544*4882a593Smuzhiyun  * Android KABI fixups
1545*4882a593Smuzhiyun  *
1546*4882a593Smuzhiyun  * Add back two functions that were being used by out-of-tree drivers.
1547*4882a593Smuzhiyun  *
1548*4882a593Smuzhiyun  * Yes, horrible hack, the things we do for FIPS "compliance"...
1549*4882a593Smuzhiyun  */
1550*4882a593Smuzhiyun static DEFINE_SPINLOCK(random_ready_list_lock);
1551*4882a593Smuzhiyun static LIST_HEAD(random_ready_list);
1552*4882a593Smuzhiyun 
add_random_ready_callback(struct random_ready_callback * rdy)1553*4882a593Smuzhiyun int add_random_ready_callback(struct random_ready_callback *rdy)
1554*4882a593Smuzhiyun {
1555*4882a593Smuzhiyun 	struct module *owner;
1556*4882a593Smuzhiyun 	unsigned long flags;
1557*4882a593Smuzhiyun 	int err = -EALREADY;
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 	if (crng_ready())
1560*4882a593Smuzhiyun 		return err;
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun 	owner = rdy->owner;
1563*4882a593Smuzhiyun 	if (!try_module_get(owner))
1564*4882a593Smuzhiyun 		return -ENOENT;
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	spin_lock_irqsave(&random_ready_list_lock, flags);
1567*4882a593Smuzhiyun 	if (crng_ready())
1568*4882a593Smuzhiyun 		goto out;
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 	owner = NULL;
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun 	list_add(&rdy->list, &random_ready_list);
1573*4882a593Smuzhiyun 	err = 0;
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun out:
1576*4882a593Smuzhiyun 	spin_unlock_irqrestore(&random_ready_list_lock, flags);
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun 	module_put(owner);
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	return err;
1581*4882a593Smuzhiyun }
1582*4882a593Smuzhiyun EXPORT_SYMBOL(add_random_ready_callback);
1583*4882a593Smuzhiyun 
del_random_ready_callback(struct random_ready_callback * rdy)1584*4882a593Smuzhiyun void del_random_ready_callback(struct random_ready_callback *rdy)
1585*4882a593Smuzhiyun {
1586*4882a593Smuzhiyun 	unsigned long flags;
1587*4882a593Smuzhiyun 	struct module *owner = NULL;
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun 	spin_lock_irqsave(&random_ready_list_lock, flags);
1590*4882a593Smuzhiyun 	if (!list_empty(&rdy->list)) {
1591*4882a593Smuzhiyun 		list_del_init(&rdy->list);
1592*4882a593Smuzhiyun 		owner = rdy->owner;
1593*4882a593Smuzhiyun 	}
1594*4882a593Smuzhiyun 	spin_unlock_irqrestore(&random_ready_list_lock, flags);
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	module_put(owner);
1597*4882a593Smuzhiyun }
1598*4882a593Smuzhiyun EXPORT_SYMBOL(del_random_ready_callback);
1599*4882a593Smuzhiyun 
process_oldschool_random_ready_list(void)1600*4882a593Smuzhiyun static void process_oldschool_random_ready_list(void)
1601*4882a593Smuzhiyun {
1602*4882a593Smuzhiyun 	unsigned long flags;
1603*4882a593Smuzhiyun 	struct random_ready_callback *rdy, *tmp;
1604*4882a593Smuzhiyun 
1605*4882a593Smuzhiyun 	spin_lock_irqsave(&random_ready_list_lock, flags);
1606*4882a593Smuzhiyun 	list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
1607*4882a593Smuzhiyun 		struct module *owner = rdy->owner;
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 		list_del_init(&rdy->list);
1610*4882a593Smuzhiyun 		rdy->func(rdy);
1611*4882a593Smuzhiyun 		module_put(owner);
1612*4882a593Smuzhiyun 	}
1613*4882a593Smuzhiyun 	spin_unlock_irqrestore(&random_ready_list_lock, flags);
1614*4882a593Smuzhiyun }
1615