via_padlock.c revision 1.29
1/*	$OpenBSD: via.c,v 1.8 2006/11/17 07:47:56 tom Exp $	*/
2/*	$NetBSD: via_padlock.c,v 1.29 2020/06/14 23:20:15 riastradh Exp $ */
3
4/*-
5 * Copyright (c) 2003 Jason Wright
6 * Copyright (c) 2003, 2004 Theo de Raadt
7 * All rights reserved.
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22#include <sys/cdefs.h>
23__KERNEL_RCSID(0, "$NetBSD: via_padlock.c,v 1.29 2020/06/14 23:20:15 riastradh Exp $");
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/signalvar.h>
28#include <sys/kernel.h>
29#include <sys/device.h>
30#include <sys/module.h>
31#include <sys/malloc.h>
32#include <sys/mbuf.h>
33#include <sys/cpu.h>
34
35#include <x86/specialreg.h>
36
37#include <machine/cpufunc.h>
38#include <machine/cpuvar.h>
39
40#include <opencrypto/cryptodev.h>
41#include <opencrypto/cryptosoft.h>
42#include <opencrypto/xform.h>
43#include <crypto/rijndael/rijndael.h>
44
45#include <opencrypto/cryptosoft_xform.c>
46
47#include <x86/via_padlock.h>
48
49static int	via_padlock_match(device_t, cfdata_t, void *);
50static void	via_padlock_attach(device_t, device_t, void *);
51static int	via_padlock_detach(device_t, int);
52static void	via_padlock_attach_intr(device_t);
53
54CFATTACH_DECL_NEW(
55    padlock,
56    sizeof(struct via_padlock_softc),
57    via_padlock_match,
58    via_padlock_attach,
59    via_padlock_detach,
60    NULL
61);
62
63int	via_padlock_crypto_newsession(void *, uint32_t *, struct cryptoini *);
64int	via_padlock_crypto_process(void *, struct cryptop *, int);
65int	via_padlock_crypto_swauth(struct cryptop *, struct cryptodesc *,
66	    struct swcr_data *, void *);
67int	via_padlock_crypto_encdec(struct cryptop *, struct cryptodesc *,
68	    struct via_padlock_session *, struct via_padlock_softc *, void *);
69int	via_padlock_crypto_freesession(void *, uint64_t);
70static	__inline void via_padlock_cbc(void *, void *, void *, void *, int,
71	    void *);
72
73static void
74via_c3_ace_init(struct via_padlock_softc *sc)
75{
76	/*
77	 * There is no reason to call into the kernel to use this
78	 * driver from userspace, because the crypto instructions can
79	 * be directly accessed there.  Setting CRYPTOCAP_F_SOFTWARE
80	 * has approximately the right semantics though the name is
81	 * confusing (however, consider that crypto via unprivileged
82	 * instructions _is_ "just software" in some sense).
83	 */
84	sc->sc_cid = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
85	if (sc->sc_cid < 0) {
86		aprint_error_dev(sc->sc_dev,
87		    "could not get a crypto driver ID\n");
88		return;
89	}
90
91	sc->sc_cid_attached = true;
92
93	/*
94	 * Ask the opencrypto subsystem to register ourselves. Although
95	 * we don't support hardware offloading for various HMAC algorithms,
96	 * we will handle them, because opencrypto prefers drivers that
97	 * support all requested algorithms.
98	 *
99	 *
100	 * XXX We should actually implement the HMAC modes this hardware
101	 * XXX can accellerate (wrap its plain SHA1/SHA2 as HMAC) and
102	 * XXX strongly consider removing those passed through to cryptosoft.
103	 * XXX As it stands, we can "steal" sessions from drivers which could
104	 * XXX better accellerate them.
105	 *
106	 * XXX Note the ordering dependency between when this (or any
107	 * XXX crypto driver) attaches and when cryptosoft does.  We are
108	 * XXX basically counting on the swcrypto pseudo-device to just
109	 * XXX happen to attach last, or _it_ will steal every session
110	 * XXX from _us_!
111	 */
112#define REGISTER(alg) \
113	crypto_register(sc->sc_cid, alg, 0, 0, \
114	    via_padlock_crypto_newsession, via_padlock_crypto_freesession, \
115	    via_padlock_crypto_process, sc);
116
117	REGISTER(CRYPTO_AES_CBC);
118	REGISTER(CRYPTO_MD5_HMAC_96);
119	REGISTER(CRYPTO_MD5_HMAC);
120	REGISTER(CRYPTO_SHA1_HMAC_96);
121	REGISTER(CRYPTO_SHA1_HMAC);
122	REGISTER(CRYPTO_RIPEMD160_HMAC_96);
123	REGISTER(CRYPTO_RIPEMD160_HMAC);
124	REGISTER(CRYPTO_SHA2_HMAC);
125}
126
127int
128via_padlock_crypto_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
129{
130	struct cryptoini *c;
131	struct via_padlock_softc *sc = arg;
132	struct via_padlock_session *ses = NULL;
133	const struct swcr_auth_hash *axf;
134	struct swcr_data *swd;
135	int sesn, i, cw0;
136
137	KASSERT(sc != NULL /*, ("via_padlock_crypto_freesession: null softc")*/);
138	if (sc == NULL || sidp == NULL || cri == NULL)
139		return (EINVAL);
140
141	if (sc->sc_sessions == NULL) {
142		ses = sc->sc_sessions = malloc(sizeof(*ses), M_DEVBUF,
143		    M_NOWAIT);
144		if (ses == NULL)
145			return (ENOMEM);
146		sesn = 0;
147		sc->sc_nsessions = 1;
148	} else {
149		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
150			if (sc->sc_sessions[sesn].ses_used == 0) {
151				ses = &sc->sc_sessions[sesn];
152				break;
153			}
154		}
155
156		if (ses == NULL) {
157			sesn = sc->sc_nsessions;
158			ses = malloc((sesn + 1) * sizeof(*ses), M_DEVBUF,
159			    M_NOWAIT);
160			if (ses == NULL)
161				return (ENOMEM);
162			memcpy(ses, sc->sc_sessions, sesn * sizeof(*ses));
163			memset(sc->sc_sessions, 0, sesn * sizeof(*ses));
164			free(sc->sc_sessions, M_DEVBUF);
165			sc->sc_sessions = ses;
166			ses = &sc->sc_sessions[sesn];
167			sc->sc_nsessions++;
168		}
169	}
170
171	memset(ses, 0, sizeof(*ses));
172	ses->ses_used = 1;
173
174	for (c = cri; c != NULL; c = c->cri_next) {
175		switch (c->cri_alg) {
176		case CRYPTO_AES_CBC:
177			switch (c->cri_klen) {
178			case 128:
179				cw0 = C3_CRYPT_CWLO_KEY128;
180				break;
181			case 192:
182				cw0 = C3_CRYPT_CWLO_KEY192;
183				break;
184			case 256:
185				cw0 = C3_CRYPT_CWLO_KEY256;
186				break;
187			default:
188				return (EINVAL);
189			}
190			cw0 |= C3_CRYPT_CWLO_ALG_AES |
191				C3_CRYPT_CWLO_KEYGEN_SW |
192				C3_CRYPT_CWLO_NORMAL;
193
194			ses->ses_klen = c->cri_klen;
195			ses->ses_cw0 = cw0;
196
197			/* Build expanded keys for both directions */
198			rijndaelKeySetupEnc(ses->ses_ekey, c->cri_key,
199			    c->cri_klen);
200			rijndaelKeySetupDec(ses->ses_dkey, c->cri_key,
201			    c->cri_klen);
202			for (i = 0; i < 4 * (RIJNDAEL_MAXNR + 1); i++) {
203				ses->ses_ekey[i] = ntohl(ses->ses_ekey[i]);
204				ses->ses_dkey[i] = ntohl(ses->ses_dkey[i]);
205			}
206
207			break;
208
209		/* Use hashing implementations from the cryptosoft code. */
210		case CRYPTO_MD5_HMAC:
211			axf = &swcr_auth_hash_hmac_md5;
212			goto authcommon;
213		case CRYPTO_MD5_HMAC_96:
214			axf = &swcr_auth_hash_hmac_md5_96;
215			goto authcommon;
216		case CRYPTO_SHA1_HMAC:
217			axf = &swcr_auth_hash_hmac_sha1;
218			goto authcommon;
219		case CRYPTO_SHA1_HMAC_96:
220			axf = &swcr_auth_hash_hmac_sha1_96;
221			goto authcommon;
222		case CRYPTO_RIPEMD160_HMAC:
223			axf = &swcr_auth_hash_hmac_ripemd_160;
224			goto authcommon;
225		case CRYPTO_RIPEMD160_HMAC_96:
226			axf = &swcr_auth_hash_hmac_ripemd_160_96;
227			goto authcommon;
228		case CRYPTO_SHA2_HMAC:
229			if (cri->cri_klen == 256)
230				axf = &swcr_auth_hash_hmac_sha2_256;
231			else if (cri->cri_klen == 384)
232				axf = &swcr_auth_hash_hmac_sha2_384;
233			else if (cri->cri_klen == 512)
234				axf = &swcr_auth_hash_hmac_sha2_512;
235			else {
236				return EINVAL;
237			}
238		authcommon:
239			swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
240			    M_NOWAIT|M_ZERO);
241			if (swd == NULL) {
242				via_padlock_crypto_freesession(sc, sesn);
243				return (ENOMEM);
244			}
245			ses->swd = swd;
246
247			swd->sw_ictx = malloc(axf->ctxsize,
248			    M_CRYPTO_DATA, M_NOWAIT);
249			if (swd->sw_ictx == NULL) {
250				via_padlock_crypto_freesession(sc, sesn);
251				return (ENOMEM);
252			}
253
254			swd->sw_octx = malloc(axf->ctxsize,
255			    M_CRYPTO_DATA, M_NOWAIT);
256			if (swd->sw_octx == NULL) {
257				via_padlock_crypto_freesession(sc, sesn);
258				return (ENOMEM);
259			}
260
261			for (i = 0; i < c->cri_klen / 8; i++)
262				c->cri_key[i] ^= HMAC_IPAD_VAL;
263
264			axf->Init(swd->sw_ictx);
265			axf->Update(swd->sw_ictx, c->cri_key, c->cri_klen / 8);
266			axf->Update(swd->sw_ictx, hmac_ipad_buffer,
267			    HMAC_BLOCK_LEN - (c->cri_klen / 8));
268
269			for (i = 0; i < c->cri_klen / 8; i++)
270				c->cri_key[i] ^= (HMAC_IPAD_VAL ^
271				    HMAC_OPAD_VAL);
272
273			axf->Init(swd->sw_octx);
274			axf->Update(swd->sw_octx, c->cri_key, c->cri_klen / 8);
275			axf->Update(swd->sw_octx, hmac_opad_buffer,
276			    HMAC_BLOCK_LEN - (c->cri_klen / 8));
277
278			for (i = 0; i < c->cri_klen / 8; i++)
279				c->cri_key[i] ^= HMAC_OPAD_VAL;
280
281			swd->sw_axf = axf;
282			swd->sw_alg = c->cri_alg;
283
284			break;
285		default:
286			return (EINVAL);
287		}
288	}
289
290	*sidp = VIAC3_SID(0, sesn);
291	return (0);
292}
293
294int
295via_padlock_crypto_freesession(void *arg, uint64_t tid)
296{
297	struct via_padlock_softc *sc = arg;
298	struct swcr_data *swd;
299	const struct swcr_auth_hash *axf;
300	int sesn;
301	uint32_t sid = ((uint32_t)tid) & 0xffffffff;
302
303	KASSERT(sc != NULL /*, ("via_padlock_crypto_freesession: null softc")*/);
304	if (sc == NULL)
305		return (EINVAL);
306
307	sesn = VIAC3_SESSION(sid);
308	if (sesn >= sc->sc_nsessions)
309		return (EINVAL);
310
311	if (sc->sc_sessions[sesn].swd) {
312		swd = sc->sc_sessions[sesn].swd;
313		axf = swd->sw_axf;
314
315		if (swd->sw_ictx) {
316			memset(swd->sw_ictx, 0, axf->ctxsize);
317			free(swd->sw_ictx, M_CRYPTO_DATA);
318		}
319		if (swd->sw_octx) {
320			memset(swd->sw_octx, 0, axf->ctxsize);
321			free(swd->sw_octx, M_CRYPTO_DATA);
322		}
323		free(swd, M_CRYPTO_DATA);
324	}
325
326	memset(&sc->sc_sessions[sesn], 0, sizeof(sc->sc_sessions[sesn]));
327	return (0);
328}
329
330static __inline void
331via_padlock_cbc(void *cw, void *src, void *dst, void *key, int rep,
332    void *iv)
333{
334	unsigned int cr0;
335	int s;
336
337	s = splhigh();
338
339	cr0 = rcr0();		/* Permit access to SIMD/FPU path */
340	lcr0(cr0 & ~(CR0_EM|CR0_TS));
341
342	/* Do the deed */
343	__asm __volatile("pushfl; popfl");	/* force key reload */
344	__asm __volatile(".byte 0xf3, 0x0f, 0xa7, 0xd0" : /* rep xcrypt-cbc */
345			: "a" (iv), "b" (key), "c" (rep), "d" (cw), "S" (src), "D" (dst)
346			: "memory", "cc");
347
348	lcr0(cr0);
349
350	splx(s);
351}
352
353int
354via_padlock_crypto_swauth(struct cryptop *crp, struct cryptodesc *crd,
355    struct swcr_data *sw, void *buf)
356{
357	int	type;
358
359	if (crp->crp_flags & CRYPTO_F_IMBUF)
360		type = CRYPTO_BUF_MBUF;
361	else
362		type= CRYPTO_BUF_IOV;
363
364	return (swcr_authcompute(crp, crd, sw, buf, type));
365}
366
367int
368via_padlock_crypto_encdec(struct cryptop *crp, struct cryptodesc *crd,
369    struct via_padlock_session *ses, struct via_padlock_softc *sc, void *buf)
370{
371	uint32_t *key;
372
373	if ((crd->crd_len % 16) != 0)
374		return (EINVAL);
375
376	sc->op_buf = malloc(crd->crd_len, M_DEVBUF, M_NOWAIT);
377	if (sc->op_buf == NULL)
378		return (ENOMEM);
379
380	if (crd->crd_flags & CRD_F_ENCRYPT) {
381		sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_ENCRYPT;
382		key = ses->ses_ekey;
383		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
384			memcpy(sc->op_iv, crd->crd_iv, 16);
385		else
386			cprng_fast(sc->op_iv, 16);
387
388		if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
389			if (crp->crp_flags & CRYPTO_F_IMBUF)
390				m_copyback((struct mbuf *)crp->crp_buf,
391				    crd->crd_inject, 16, sc->op_iv);
392			else if (crp->crp_flags & CRYPTO_F_IOV)
393				cuio_copyback((struct uio *)crp->crp_buf,
394				    crd->crd_inject, 16, sc->op_iv);
395			else
396				memcpy((char *)crp->crp_buf + crd->crd_inject,
397				    sc->op_iv, 16);
398		}
399	} else {
400		sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_DECRYPT;
401		key = ses->ses_dkey;
402		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
403			memcpy(sc->op_iv, crd->crd_iv, 16);
404		else {
405			if (crp->crp_flags & CRYPTO_F_IMBUF)
406				m_copydata((struct mbuf *)crp->crp_buf,
407				    crd->crd_inject, 16, sc->op_iv);
408			else if (crp->crp_flags & CRYPTO_F_IOV)
409				cuio_copydata((struct uio *)crp->crp_buf,
410				    crd->crd_inject, 16, sc->op_iv);
411			else
412				memcpy(sc->op_iv, (char *)crp->crp_buf +
413				    crd->crd_inject, 16);
414		}
415	}
416
417	if (crp->crp_flags & CRYPTO_F_IMBUF)
418		m_copydata((struct mbuf *)crp->crp_buf,
419		    crd->crd_skip, crd->crd_len, sc->op_buf);
420	else if (crp->crp_flags & CRYPTO_F_IOV)
421		cuio_copydata((struct uio *)crp->crp_buf,
422		    crd->crd_skip, crd->crd_len, sc->op_buf);
423	else
424		memcpy(sc->op_buf, (char *)crp->crp_buf + crd->crd_skip,
425		    crd->crd_len);
426
427	sc->op_cw[1] = sc->op_cw[2] = sc->op_cw[3] = 0;
428	via_padlock_cbc(&sc->op_cw, sc->op_buf, sc->op_buf, key,
429	    crd->crd_len / 16, sc->op_iv);
430
431	if (crp->crp_flags & CRYPTO_F_IMBUF)
432		m_copyback((struct mbuf *)crp->crp_buf,
433		    crd->crd_skip, crd->crd_len, sc->op_buf);
434	else if (crp->crp_flags & CRYPTO_F_IOV)
435		cuio_copyback((struct uio *)crp->crp_buf,
436		    crd->crd_skip, crd->crd_len, sc->op_buf);
437	else
438		memcpy((char *)crp->crp_buf + crd->crd_skip, sc->op_buf,
439		    crd->crd_len);
440
441	if (sc->op_buf != NULL) {
442		memset(sc->op_buf, 0, crd->crd_len);
443		free(sc->op_buf, M_DEVBUF);
444		sc->op_buf = NULL;
445	}
446
447	return 0;
448}
449
450int
451via_padlock_crypto_process(void *arg, struct cryptop *crp, int hint)
452{
453	struct via_padlock_softc *sc = arg;
454	struct via_padlock_session *ses;
455	struct cryptodesc *crd;
456	int sesn, err = 0;
457
458	KASSERT(sc != NULL /*, ("via_padlock_crypto_process: null softc")*/);
459	if (crp == NULL || crp->crp_callback == NULL) {
460		err = EINVAL;
461		goto out;
462	}
463
464	sesn = VIAC3_SESSION(crp->crp_sid);
465	if (sesn >= sc->sc_nsessions) {
466		err = EINVAL;
467		goto out;
468	}
469	ses = &sc->sc_sessions[sesn];
470
471	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
472		switch (crd->crd_alg) {
473		case CRYPTO_AES_CBC:
474			if ((err = via_padlock_crypto_encdec(crp, crd, ses,
475			    sc, crp->crp_buf)) != 0)
476				goto out;
477			break;
478
479		case CRYPTO_MD5_HMAC:
480		case CRYPTO_SHA1_HMAC:
481		case CRYPTO_RIPEMD160_HMAC:
482		case CRYPTO_SHA2_HMAC:
483			if ((err = via_padlock_crypto_swauth(crp, crd,
484			    ses->swd, crp->crp_buf)) != 0)
485				goto out;
486			break;
487
488		default:
489			err = EINVAL;
490			goto out;
491		}
492	}
493out:
494	crp->crp_etype = err;
495	crypto_done(crp);
496	return (err);
497}
498
499static int
500via_padlock_match(device_t parent, cfdata_t cf, void *opaque)
501{
502	struct cpufeature_attach_args *cfaa = opaque;
503	struct cpu_info *ci = cfaa->ci;
504
505	if (strcmp(cfaa->name, "padlock") != 0)
506		return 0;
507	if ((cpu_feature[4] & (CPUID_VIA_HAS_ACE|CPUID_VIA_HAS_RNG)) == 0)
508		return 0;
509	if ((ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) == 0)
510		return 0;
511	return 1;
512}
513
514static void
515via_padlock_attach(device_t parent, device_t self, void *opaque)
516{
517	struct via_padlock_softc *sc = device_private(self);
518
519	sc->sc_dev = self;
520
521	aprint_naive("\n");
522	aprint_normal(": VIA PadLock\n");
523
524	pmf_device_register(self, NULL, NULL);
525
526	config_interrupts(self, via_padlock_attach_intr);
527}
528
529static void
530via_padlock_attach_intr(device_t self)
531{
532	struct via_padlock_softc *sc = device_private(self);
533
534	aprint_normal("%s:", device_xname(self));
535	if (cpu_feature[4] & CPUID_VIA_HAS_RNG) {
536		aprint_normal(" RNG");
537	}
538	if (cpu_feature[4] & CPUID_VIA_HAS_ACE) {
539		via_c3_ace_init(sc);
540		aprint_normal(" ACE");
541	}
542	aprint_normal("\n");
543}
544
545static int
546via_padlock_detach(device_t self, int flags)
547{
548	struct via_padlock_softc *sc = device_private(self);
549
550	if (sc->sc_cid_attached) {
551		crypto_unregister(sc->sc_cid, CRYPTO_AES_CBC);
552		crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC_96);
553		crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC);
554		crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC_96);
555		crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC);
556		crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC_96);
557		crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC);
558		crypto_unregister(sc->sc_cid, CRYPTO_SHA2_HMAC);
559		sc->sc_cid_attached = false;
560	}
561
562	pmf_device_deregister(self);
563
564	return 0;
565}
566
567MODULE(MODULE_CLASS_DRIVER, padlock, NULL);
568
569#ifdef _MODULE
570#include "ioconf.c"
571#endif
572
573static int
574padlock_modcmd(modcmd_t cmd, void *opaque)
575{
576	int error = 0;
577
578	switch (cmd) {
579	case MODULE_CMD_INIT:
580#ifdef _MODULE
581		error = config_init_component(cfdriver_ioconf_padlock,
582		    cfattach_ioconf_padlock, cfdata_ioconf_padlock);
583#endif
584		return error;
585	case MODULE_CMD_FINI:
586#ifdef _MODULE
587		error = config_fini_component(cfdriver_ioconf_padlock,
588		    cfattach_ioconf_padlock, cfdata_ioconf_padlock);
589#endif
590		return error;
591	default:
592		return ENOTTY;
593	}
594}
595