1/*	$NetBSD: random.c,v 1.10 2021/12/28 13:22:43 riastradh Exp $	*/
2
3/*-
4 * Copyright (c) 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * /dev/random, /dev/urandom -- stateless version
34 *
35 *	For short reads from /dev/urandom, up to 256 bytes, read from a
36 *	per-CPU NIST Hash_DRBG instance that is reseeded as soon as the
37 *	system has enough entropy.
38 *
39 *	For all other reads, instantiate a fresh NIST Hash_DRBG from
40 *	the global entropy pool, and draw from it.
41 *
42 *	Each read is independent; there is no per-open state.
43 *	Concurrent reads from the same open run in parallel.
44 *
45 *	Reading from /dev/random may block until entropy is available.
46 *	Either device may return short reads if interrupted.
47 */
48
49#include <sys/cdefs.h>
50__KERNEL_RCSID(0, "$NetBSD: random.c,v 1.10 2021/12/28 13:22:43 riastradh Exp $");
51
52#include <sys/param.h>
53#include <sys/types.h>
54#include <sys/atomic.h>
55#include <sys/conf.h>
56#include <sys/cprng.h>
57#include <sys/entropy.h>
58#include <sys/errno.h>
59#include <sys/event.h>
60#include <sys/fcntl.h>
61#include <sys/kauth.h>
62#include <sys/kmem.h>
63#include <sys/lwp.h>
64#include <sys/poll.h>
65#include <sys/random.h>
66#include <sys/rnd.h>
67#include <sys/rndsource.h>
68#include <sys/signalvar.h>
69#include <sys/systm.h>
70#include <sys/vnode.h>		/* IO_NDELAY */
71
72#include "ioconf.h"
73
74static dev_type_open(random_open);
75static dev_type_close(random_close);
76static dev_type_ioctl(random_ioctl);
77static dev_type_poll(random_poll);
78static dev_type_kqfilter(random_kqfilter);
79static dev_type_read(random_read);
80static dev_type_write(random_write);
81
82const struct cdevsw rnd_cdevsw = {
83	.d_open = random_open,
84	.d_close = random_close,
85	.d_read = random_read,
86	.d_write = random_write,
87	.d_ioctl = random_ioctl,
88	.d_stop = nostop,
89	.d_tty = notty,
90	.d_poll = random_poll,
91	.d_mmap = nommap,
92	.d_kqfilter = random_kqfilter,
93	.d_discard = nodiscard,
94	.d_flag = D_OTHER|D_MPSAFE,
95};
96
97#define	RANDOM_BUFSIZE	512	/* XXX pulled from arse */
98
99/* Entropy source for writes to /dev/random and /dev/urandom */
100static krndsource_t	user_rndsource;
101
102void
103rndattach(int num)
104{
105
106	rnd_attach_source(&user_rndsource, "/dev/random", RND_TYPE_UNKNOWN,
107	    RND_FLAG_COLLECT_VALUE);
108}
109
110static int
111random_open(dev_t dev, int flags, int fmt, struct lwp *l)
112{
113
114	/* Validate minor.  */
115	switch (minor(dev)) {
116	case RND_DEV_RANDOM:
117	case RND_DEV_URANDOM:
118		break;
119	default:
120		return ENXIO;
121	}
122
123	return 0;
124}
125
126static int
127random_close(dev_t dev, int flags, int fmt, struct lwp *l)
128{
129
130	/* Success!  */
131	return 0;
132}
133
134static int
135random_ioctl(dev_t dev, unsigned long cmd, void *data, int flag, struct lwp *l)
136{
137
138	/*
139	 * No non-blocking/async options; otherwise defer to
140	 * entropy_ioctl.
141	 */
142	switch (cmd) {
143	case FIONBIO:
144	case FIOASYNC:
145		return 0;
146	default:
147		return entropy_ioctl(cmd, data);
148	}
149}
150
151static int
152random_poll(dev_t dev, int events, struct lwp *l)
153{
154
155	/* /dev/random may block; /dev/urandom is always ready.  */
156	switch (minor(dev)) {
157	case RND_DEV_RANDOM:
158		return entropy_poll(events);
159	case RND_DEV_URANDOM:
160		return events & (POLLIN|POLLRDNORM | POLLOUT|POLLWRNORM);
161	default:
162		return 0;
163	}
164}
165
166static int
167random_kqfilter(dev_t dev, struct knote *kn)
168{
169
170	/* Validate the event filter.  */
171	switch (kn->kn_filter) {
172	case EVFILT_READ:
173	case EVFILT_WRITE:
174		break;
175	default:
176		return EINVAL;
177	}
178
179	/* /dev/random may block; /dev/urandom never does.  */
180	switch (minor(dev)) {
181	case RND_DEV_RANDOM:
182		if (kn->kn_filter == EVFILT_READ)
183			return entropy_kqfilter(kn);
184		/* FALLTHROUGH */
185	case RND_DEV_URANDOM:
186		kn->kn_fop = &seltrue_filtops;
187		return 0;
188	default:
189		return ENXIO;
190	}
191}
192
193/*
194 * random_read(dev, uio, flags)
195 *
196 *	Generate data from a PRNG seeded from the entropy pool.
197 *
198 *	- If /dev/random, block until we have full entropy, or fail
199 *	  with EWOULDBLOCK, and if `depleting' entropy, return at most
200 *	  the entropy pool's capacity at once.
201 *
202 *	- If /dev/urandom, generate data from whatever is in the
203 *	  entropy pool now.
204 *
205 *	On interrupt, return a short read, but not shorter than 256
206 *	bytes (actually, no shorter than RANDOM_BUFSIZE bytes, which is
207 *	512 for hysterical raisins).
208 */
209static int
210random_read(dev_t dev, struct uio *uio, int flags)
211{
212	int gflags;
213
214	/* Set the appropriate GRND_* mode.  */
215	switch (minor(dev)) {
216	case RND_DEV_RANDOM:
217		gflags = GRND_RANDOM;
218		break;
219	case RND_DEV_URANDOM:
220		gflags = GRND_INSECURE;
221		break;
222	default:
223		return ENXIO;
224	}
225
226	/*
227	 * Set GRND_NONBLOCK if the user requested IO_NDELAY (i.e., the
228	 * file was opened with O_NONBLOCK).
229	 */
230	if (flags & IO_NDELAY)
231		gflags |= GRND_NONBLOCK;
232
233	/* Defer to getrandom.  */
234	return dogetrandom(uio, gflags);
235}
236
237/*
238 * random_write(dev, uio, flags)
239 *
240 *	Enter data from uio into the entropy pool.
241 *
242 *	Assume privileged users provide full entropy, and unprivileged
243 *	users provide no entropy.  If you have a nonuniform source of
244 *	data with n bytes of min-entropy, hash it with an XOF like
245 *	SHAKE128 into exactly n bytes first.
246 */
247static int
248random_write(dev_t dev, struct uio *uio, int flags)
249{
250	kauth_cred_t cred = kauth_cred_get();
251	uint8_t *buf;
252	bool privileged = false, any = false;
253	int error = 0;
254
255	/* Verify user's authorization to affect the entropy pool.  */
256	error = kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA,
257	    NULL, NULL, NULL, NULL);
258	if (error)
259		return error;
260
261	/*
262	 * Check whether user is privileged.  If so, assume user
263	 * furnishes full-entropy data; if not, accept user's data but
264	 * assume it has zero entropy when we do accounting.  If you
265	 * want to specify less entropy, use ioctl(RNDADDDATA).
266	 */
267	if (kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA_ESTIMATE,
268		NULL, NULL, NULL, NULL) == 0)
269		privileged = true;
270
271	/* Get a buffer for transfers.  */
272	buf = kmem_alloc(RANDOM_BUFSIZE, KM_SLEEP);
273
274	/* Consume data.  */
275	while (uio->uio_resid) {
276		size_t n = MIN(uio->uio_resid, RANDOM_BUFSIZE);
277
278		/* Transfer n bytes in and enter them into the pool.  */
279		error = uiomove(buf, n, uio);
280		if (error)
281			break;
282		rnd_add_data(&user_rndsource, buf, n, privileged ? n*NBBY : 0);
283		any = true;
284
285		/* Now's a good time to yield if needed.  */
286		preempt_point();
287
288		/* Check for interruption.  */
289		if (__predict_false(curlwp->l_flag & LW_PENDSIG) &&
290		    sigispending(curlwp, 0)) {
291			error = EINTR;
292			break;
293		}
294	}
295
296	/* Zero the buffer and free it.  */
297	explicit_memset(buf, 0, RANDOM_BUFSIZE);
298	kmem_free(buf, RANDOM_BUFSIZE);
299
300	/* If we added anything, consolidate entropy now.  */
301	if (any)
302		entropy_consolidate();
303
304	return error;
305}
306