via_padlock.c revision 1.31
1/*	$OpenBSD: via.c,v 1.8 2006/11/17 07:47:56 tom Exp $	*/
2/*	$NetBSD: via_padlock.c,v 1.31 2020/06/29 23:58:44 riastradh Exp $ */
3
4/*-
5 * Copyright (c) 2003 Jason Wright
6 * Copyright (c) 2003, 2004 Theo de Raadt
7 * All rights reserved.
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22#include <sys/cdefs.h>
23__KERNEL_RCSID(0, "$NetBSD: via_padlock.c,v 1.31 2020/06/29 23:58:44 riastradh Exp $");
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/signalvar.h>
28#include <sys/kernel.h>
29#include <sys/device.h>
30#include <sys/module.h>
31#include <sys/malloc.h>
32#include <sys/mbuf.h>
33#include <sys/cpu.h>
34
35#include <x86/specialreg.h>
36
37#include <machine/cpufunc.h>
38#include <machine/cpuvar.h>
39
40#include <crypto/aes/aes_bear.h>
41
42#include <opencrypto/cryptodev.h>
43#include <opencrypto/cryptosoft.h>
44#include <opencrypto/xform.h>
45
46#include <opencrypto/cryptosoft_xform.c>
47
48#include <x86/via_padlock.h>
49
50static int	via_padlock_match(device_t, cfdata_t, void *);
51static void	via_padlock_attach(device_t, device_t, void *);
52static int	via_padlock_detach(device_t, int);
53static void	via_padlock_attach_intr(device_t);
54
55CFATTACH_DECL_NEW(
56    padlock,
57    sizeof(struct via_padlock_softc),
58    via_padlock_match,
59    via_padlock_attach,
60    via_padlock_detach,
61    NULL
62);
63
64int	via_padlock_crypto_newsession(void *, uint32_t *, struct cryptoini *);
65int	via_padlock_crypto_process(void *, struct cryptop *, int);
66int	via_padlock_crypto_swauth(struct cryptop *, struct cryptodesc *,
67	    struct swcr_data *, void *);
68int	via_padlock_crypto_encdec(struct cryptop *, struct cryptodesc *,
69	    struct via_padlock_session *, struct via_padlock_softc *, void *);
70int	via_padlock_crypto_freesession(void *, uint64_t);
71static	__inline void via_padlock_cbc(void *, void *, void *, void *, int,
72	    void *);
73
74static void
75via_c3_ace_init(struct via_padlock_softc *sc)
76{
77	/*
78	 * There is no reason to call into the kernel to use this
79	 * driver from userspace, because the crypto instructions can
80	 * be directly accessed there.  Setting CRYPTOCAP_F_SOFTWARE
81	 * has approximately the right semantics though the name is
82	 * confusing (however, consider that crypto via unprivileged
83	 * instructions _is_ "just software" in some sense).
84	 */
85	sc->sc_cid = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
86	if (sc->sc_cid < 0) {
87		aprint_error_dev(sc->sc_dev,
88		    "could not get a crypto driver ID\n");
89		return;
90	}
91
92	sc->sc_cid_attached = true;
93
94	/*
95	 * Ask the opencrypto subsystem to register ourselves. Although
96	 * we don't support hardware offloading for various HMAC algorithms,
97	 * we will handle them, because opencrypto prefers drivers that
98	 * support all requested algorithms.
99	 *
100	 *
101	 * XXX We should actually implement the HMAC modes this hardware
102	 * XXX can accellerate (wrap its plain SHA1/SHA2 as HMAC) and
103	 * XXX strongly consider removing those passed through to cryptosoft.
104	 * XXX As it stands, we can "steal" sessions from drivers which could
105	 * XXX better accellerate them.
106	 *
107	 * XXX Note the ordering dependency between when this (or any
108	 * XXX crypto driver) attaches and when cryptosoft does.  We are
109	 * XXX basically counting on the swcrypto pseudo-device to just
110	 * XXX happen to attach last, or _it_ will steal every session
111	 * XXX from _us_!
112	 */
113#define REGISTER(alg) \
114	crypto_register(sc->sc_cid, alg, 0, 0, \
115	    via_padlock_crypto_newsession, via_padlock_crypto_freesession, \
116	    via_padlock_crypto_process, sc);
117
118	REGISTER(CRYPTO_AES_CBC);
119	REGISTER(CRYPTO_MD5_HMAC_96);
120	REGISTER(CRYPTO_MD5_HMAC);
121	REGISTER(CRYPTO_SHA1_HMAC_96);
122	REGISTER(CRYPTO_SHA1_HMAC);
123	REGISTER(CRYPTO_RIPEMD160_HMAC_96);
124	REGISTER(CRYPTO_RIPEMD160_HMAC);
125	REGISTER(CRYPTO_SHA2_HMAC);
126}
127
128int
129via_padlock_crypto_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
130{
131	struct cryptoini *c;
132	struct via_padlock_softc *sc = arg;
133	struct via_padlock_session *ses = NULL;
134	const struct swcr_auth_hash *axf;
135	struct swcr_data *swd;
136	int sesn, i, cw0;
137
138	KASSERT(sc != NULL /*, ("via_padlock_crypto_freesession: null softc")*/);
139	if (sc == NULL || sidp == NULL || cri == NULL)
140		return (EINVAL);
141
142	if (sc->sc_sessions == NULL) {
143		ses = sc->sc_sessions = malloc(sizeof(*ses), M_DEVBUF,
144		    M_NOWAIT);
145		if (ses == NULL)
146			return (ENOMEM);
147		sesn = 0;
148		sc->sc_nsessions = 1;
149	} else {
150		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
151			if (sc->sc_sessions[sesn].ses_used == 0) {
152				ses = &sc->sc_sessions[sesn];
153				break;
154			}
155		}
156
157		if (ses == NULL) {
158			sesn = sc->sc_nsessions;
159			ses = malloc((sesn + 1) * sizeof(*ses), M_DEVBUF,
160			    M_NOWAIT);
161			if (ses == NULL)
162				return (ENOMEM);
163			memcpy(ses, sc->sc_sessions, sesn * sizeof(*ses));
164			memset(sc->sc_sessions, 0, sesn * sizeof(*ses));
165			free(sc->sc_sessions, M_DEVBUF);
166			sc->sc_sessions = ses;
167			ses = &sc->sc_sessions[sesn];
168			sc->sc_nsessions++;
169		}
170	}
171
172	memset(ses, 0, sizeof(*ses));
173	ses->ses_used = 1;
174
175	for (c = cri; c != NULL; c = c->cri_next) {
176		switch (c->cri_alg) {
177		case CRYPTO_AES_CBC:
178			memset(ses->ses_ekey, 0, sizeof(ses->ses_ekey));
179			memset(ses->ses_dkey, 0, sizeof(ses->ses_dkey));
180
181			switch (c->cri_klen) {
182			case 128:
183				br_aes_ct_keysched_stdenc(ses->ses_ekey,
184				    c->cri_key, 16);
185				br_aes_ct_keysched_stddec(ses->ses_dkey,
186				    c->cri_key, 16);
187				cw0 = C3_CRYPT_CWLO_KEY128;
188				break;
189			case 192:
190				br_aes_ct_keysched_stdenc(ses->ses_ekey,
191				    c->cri_key, 24);
192				br_aes_ct_keysched_stddec(ses->ses_dkey,
193				    c->cri_key, 24);
194				cw0 = C3_CRYPT_CWLO_KEY192;
195				break;
196			case 256:
197				br_aes_ct_keysched_stdenc(ses->ses_ekey,
198				    c->cri_key, 32);
199				br_aes_ct_keysched_stddec(ses->ses_dkey,
200				    c->cri_key, 32);
201				cw0 = C3_CRYPT_CWLO_KEY256;
202				break;
203			default:
204				return (EINVAL);
205			}
206			cw0 |= C3_CRYPT_CWLO_ALG_AES |
207				C3_CRYPT_CWLO_KEYGEN_SW |
208				C3_CRYPT_CWLO_NORMAL;
209
210			ses->ses_klen = c->cri_klen;
211			ses->ses_cw0 = cw0;
212
213			/* Convert words to host byte order (???) */
214			for (i = 0; i < 4*(AES_256_NROUNDS + 1); i++) {
215				ses->ses_ekey[i] = ntohl(ses->ses_ekey[i]);
216				ses->ses_dkey[i] = ntohl(ses->ses_dkey[i]);
217			}
218			break;
219
220		/* Use hashing implementations from the cryptosoft code. */
221		case CRYPTO_MD5_HMAC:
222			axf = &swcr_auth_hash_hmac_md5;
223			goto authcommon;
224		case CRYPTO_MD5_HMAC_96:
225			axf = &swcr_auth_hash_hmac_md5_96;
226			goto authcommon;
227		case CRYPTO_SHA1_HMAC:
228			axf = &swcr_auth_hash_hmac_sha1;
229			goto authcommon;
230		case CRYPTO_SHA1_HMAC_96:
231			axf = &swcr_auth_hash_hmac_sha1_96;
232			goto authcommon;
233		case CRYPTO_RIPEMD160_HMAC:
234			axf = &swcr_auth_hash_hmac_ripemd_160;
235			goto authcommon;
236		case CRYPTO_RIPEMD160_HMAC_96:
237			axf = &swcr_auth_hash_hmac_ripemd_160_96;
238			goto authcommon;
239		case CRYPTO_SHA2_HMAC:
240			if (cri->cri_klen == 256)
241				axf = &swcr_auth_hash_hmac_sha2_256;
242			else if (cri->cri_klen == 384)
243				axf = &swcr_auth_hash_hmac_sha2_384;
244			else if (cri->cri_klen == 512)
245				axf = &swcr_auth_hash_hmac_sha2_512;
246			else {
247				return EINVAL;
248			}
249		authcommon:
250			swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
251			    M_NOWAIT|M_ZERO);
252			if (swd == NULL) {
253				via_padlock_crypto_freesession(sc, sesn);
254				return (ENOMEM);
255			}
256			ses->swd = swd;
257
258			swd->sw_ictx = malloc(axf->ctxsize,
259			    M_CRYPTO_DATA, M_NOWAIT);
260			if (swd->sw_ictx == NULL) {
261				via_padlock_crypto_freesession(sc, sesn);
262				return (ENOMEM);
263			}
264
265			swd->sw_octx = malloc(axf->ctxsize,
266			    M_CRYPTO_DATA, M_NOWAIT);
267			if (swd->sw_octx == NULL) {
268				via_padlock_crypto_freesession(sc, sesn);
269				return (ENOMEM);
270			}
271
272			for (i = 0; i < c->cri_klen / 8; i++)
273				c->cri_key[i] ^= HMAC_IPAD_VAL;
274
275			axf->Init(swd->sw_ictx);
276			axf->Update(swd->sw_ictx, c->cri_key, c->cri_klen / 8);
277			axf->Update(swd->sw_ictx, hmac_ipad_buffer,
278			    HMAC_BLOCK_LEN - (c->cri_klen / 8));
279
280			for (i = 0; i < c->cri_klen / 8; i++)
281				c->cri_key[i] ^= (HMAC_IPAD_VAL ^
282				    HMAC_OPAD_VAL);
283
284			axf->Init(swd->sw_octx);
285			axf->Update(swd->sw_octx, c->cri_key, c->cri_klen / 8);
286			axf->Update(swd->sw_octx, hmac_opad_buffer,
287			    HMAC_BLOCK_LEN - (c->cri_klen / 8));
288
289			for (i = 0; i < c->cri_klen / 8; i++)
290				c->cri_key[i] ^= HMAC_OPAD_VAL;
291
292			swd->sw_axf = axf;
293			swd->sw_alg = c->cri_alg;
294
295			break;
296		default:
297			return (EINVAL);
298		}
299	}
300
301	*sidp = VIAC3_SID(0, sesn);
302	return (0);
303}
304
305int
306via_padlock_crypto_freesession(void *arg, uint64_t tid)
307{
308	struct via_padlock_softc *sc = arg;
309	struct swcr_data *swd;
310	const struct swcr_auth_hash *axf;
311	int sesn;
312	uint32_t sid = ((uint32_t)tid) & 0xffffffff;
313
314	KASSERT(sc != NULL /*, ("via_padlock_crypto_freesession: null softc")*/);
315	if (sc == NULL)
316		return (EINVAL);
317
318	sesn = VIAC3_SESSION(sid);
319	if (sesn >= sc->sc_nsessions)
320		return (EINVAL);
321
322	if (sc->sc_sessions[sesn].swd) {
323		swd = sc->sc_sessions[sesn].swd;
324		axf = swd->sw_axf;
325
326		if (swd->sw_ictx) {
327			memset(swd->sw_ictx, 0, axf->ctxsize);
328			free(swd->sw_ictx, M_CRYPTO_DATA);
329		}
330		if (swd->sw_octx) {
331			memset(swd->sw_octx, 0, axf->ctxsize);
332			free(swd->sw_octx, M_CRYPTO_DATA);
333		}
334		free(swd, M_CRYPTO_DATA);
335	}
336
337	memset(&sc->sc_sessions[sesn], 0, sizeof(sc->sc_sessions[sesn]));
338	return (0);
339}
340
341static __inline void
342via_padlock_cbc(void *cw, void *src, void *dst, void *key, int rep,
343    void *iv)
344{
345	unsigned int cr0;
346	int s;
347
348	s = splhigh();
349
350	cr0 = rcr0();		/* Permit access to SIMD/FPU path */
351	lcr0(cr0 & ~(CR0_EM|CR0_TS));
352
353	/* Do the deed */
354	__asm __volatile("pushf; popf");	/* force key reload */
355	__asm __volatile(".byte 0xf3, 0x0f, 0xa7, 0xd0" : /* rep xcrypt-cbc */
356			: "a" (iv), "b" (key), "c" (rep), "d" (cw), "S" (src), "D" (dst)
357			: "memory", "cc");
358
359	lcr0(cr0);
360
361	splx(s);
362}
363
364int
365via_padlock_crypto_swauth(struct cryptop *crp, struct cryptodesc *crd,
366    struct swcr_data *sw, void *buf)
367{
368	int	type;
369
370	if (crp->crp_flags & CRYPTO_F_IMBUF)
371		type = CRYPTO_BUF_MBUF;
372	else
373		type= CRYPTO_BUF_IOV;
374
375	return (swcr_authcompute(crp, crd, sw, buf, type));
376}
377
378int
379via_padlock_crypto_encdec(struct cryptop *crp, struct cryptodesc *crd,
380    struct via_padlock_session *ses, struct via_padlock_softc *sc, void *buf)
381{
382	uint32_t *key;
383
384	if ((crd->crd_len % 16) != 0)
385		return (EINVAL);
386
387	sc->op_buf = malloc(crd->crd_len, M_DEVBUF, M_NOWAIT);
388	if (sc->op_buf == NULL)
389		return (ENOMEM);
390
391	if (crd->crd_flags & CRD_F_ENCRYPT) {
392		sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_ENCRYPT;
393		key = ses->ses_ekey;
394		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
395			memcpy(sc->op_iv, crd->crd_iv, 16);
396		else
397			cprng_fast(sc->op_iv, 16);
398
399		if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
400			if (crp->crp_flags & CRYPTO_F_IMBUF)
401				m_copyback((struct mbuf *)crp->crp_buf,
402				    crd->crd_inject, 16, sc->op_iv);
403			else if (crp->crp_flags & CRYPTO_F_IOV)
404				cuio_copyback((struct uio *)crp->crp_buf,
405				    crd->crd_inject, 16, sc->op_iv);
406			else
407				memcpy((char *)crp->crp_buf + crd->crd_inject,
408				    sc->op_iv, 16);
409		}
410	} else {
411		sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_DECRYPT;
412		key = ses->ses_dkey;
413		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
414			memcpy(sc->op_iv, crd->crd_iv, 16);
415		else {
416			if (crp->crp_flags & CRYPTO_F_IMBUF)
417				m_copydata((struct mbuf *)crp->crp_buf,
418				    crd->crd_inject, 16, sc->op_iv);
419			else if (crp->crp_flags & CRYPTO_F_IOV)
420				cuio_copydata((struct uio *)crp->crp_buf,
421				    crd->crd_inject, 16, sc->op_iv);
422			else
423				memcpy(sc->op_iv, (char *)crp->crp_buf +
424				    crd->crd_inject, 16);
425		}
426	}
427
428	if (crp->crp_flags & CRYPTO_F_IMBUF)
429		m_copydata((struct mbuf *)crp->crp_buf,
430		    crd->crd_skip, crd->crd_len, sc->op_buf);
431	else if (crp->crp_flags & CRYPTO_F_IOV)
432		cuio_copydata((struct uio *)crp->crp_buf,
433		    crd->crd_skip, crd->crd_len, sc->op_buf);
434	else
435		memcpy(sc->op_buf, (char *)crp->crp_buf + crd->crd_skip,
436		    crd->crd_len);
437
438	sc->op_cw[1] = sc->op_cw[2] = sc->op_cw[3] = 0;
439	via_padlock_cbc(&sc->op_cw, sc->op_buf, sc->op_buf, key,
440	    crd->crd_len / 16, sc->op_iv);
441
442	if (crp->crp_flags & CRYPTO_F_IMBUF)
443		m_copyback((struct mbuf *)crp->crp_buf,
444		    crd->crd_skip, crd->crd_len, sc->op_buf);
445	else if (crp->crp_flags & CRYPTO_F_IOV)
446		cuio_copyback((struct uio *)crp->crp_buf,
447		    crd->crd_skip, crd->crd_len, sc->op_buf);
448	else
449		memcpy((char *)crp->crp_buf + crd->crd_skip, sc->op_buf,
450		    crd->crd_len);
451
452	if (sc->op_buf != NULL) {
453		memset(sc->op_buf, 0, crd->crd_len);
454		free(sc->op_buf, M_DEVBUF);
455		sc->op_buf = NULL;
456	}
457
458	return 0;
459}
460
461int
462via_padlock_crypto_process(void *arg, struct cryptop *crp, int hint)
463{
464	struct via_padlock_softc *sc = arg;
465	struct via_padlock_session *ses;
466	struct cryptodesc *crd;
467	int sesn, err = 0;
468
469	KASSERT(sc != NULL /*, ("via_padlock_crypto_process: null softc")*/);
470	if (crp == NULL || crp->crp_callback == NULL) {
471		err = EINVAL;
472		goto out;
473	}
474
475	sesn = VIAC3_SESSION(crp->crp_sid);
476	if (sesn >= sc->sc_nsessions) {
477		err = EINVAL;
478		goto out;
479	}
480	ses = &sc->sc_sessions[sesn];
481
482	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
483		switch (crd->crd_alg) {
484		case CRYPTO_AES_CBC:
485			if ((err = via_padlock_crypto_encdec(crp, crd, ses,
486			    sc, crp->crp_buf)) != 0)
487				goto out;
488			break;
489
490		case CRYPTO_MD5_HMAC:
491		case CRYPTO_SHA1_HMAC:
492		case CRYPTO_RIPEMD160_HMAC:
493		case CRYPTO_SHA2_HMAC:
494			if ((err = via_padlock_crypto_swauth(crp, crd,
495			    ses->swd, crp->crp_buf)) != 0)
496				goto out;
497			break;
498
499		default:
500			err = EINVAL;
501			goto out;
502		}
503	}
504out:
505	crp->crp_etype = err;
506	crypto_done(crp);
507	return (err);
508}
509
510static int
511via_padlock_match(device_t parent, cfdata_t cf, void *opaque)
512{
513	struct cpufeature_attach_args *cfaa = opaque;
514	struct cpu_info *ci = cfaa->ci;
515
516	if (strcmp(cfaa->name, "padlock") != 0)
517		return 0;
518	if ((cpu_feature[4] & (CPUID_VIA_HAS_ACE|CPUID_VIA_HAS_RNG)) == 0)
519		return 0;
520	if ((ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) == 0)
521		return 0;
522	return 1;
523}
524
525static void
526via_padlock_attach(device_t parent, device_t self, void *opaque)
527{
528	struct via_padlock_softc *sc = device_private(self);
529
530	sc->sc_dev = self;
531
532	aprint_naive("\n");
533	aprint_normal(": VIA PadLock\n");
534
535	pmf_device_register(self, NULL, NULL);
536
537	config_interrupts(self, via_padlock_attach_intr);
538}
539
540static void
541via_padlock_attach_intr(device_t self)
542{
543	struct via_padlock_softc *sc = device_private(self);
544
545	aprint_normal("%s:", device_xname(self));
546	if (cpu_feature[4] & CPUID_VIA_HAS_RNG) {
547		aprint_normal(" RNG");
548	}
549	if (cpu_feature[4] & CPUID_VIA_HAS_ACE) {
550		via_c3_ace_init(sc);
551		aprint_normal(" ACE");
552	}
553	aprint_normal("\n");
554}
555
556static int
557via_padlock_detach(device_t self, int flags)
558{
559	struct via_padlock_softc *sc = device_private(self);
560
561	if (sc->sc_cid_attached) {
562		crypto_unregister(sc->sc_cid, CRYPTO_AES_CBC);
563		crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC_96);
564		crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC);
565		crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC_96);
566		crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC);
567		crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC_96);
568		crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC);
569		crypto_unregister(sc->sc_cid, CRYPTO_SHA2_HMAC);
570		sc->sc_cid_attached = false;
571	}
572
573	pmf_device_deregister(self);
574
575	return 0;
576}
577
578MODULE(MODULE_CLASS_DRIVER, padlock, NULL);
579
580#ifdef _MODULE
581#include "ioconf.c"
582#endif
583
584static int
585padlock_modcmd(modcmd_t cmd, void *opaque)
586{
587	int error = 0;
588
589	switch (cmd) {
590	case MODULE_CMD_INIT:
591#ifdef _MODULE
592		error = config_init_component(cfdriver_ioconf_padlock,
593		    cfattach_ioconf_padlock, cfdata_ioconf_padlock);
594#endif
595		return error;
596	case MODULE_CMD_FINI:
597#ifdef _MODULE
598		error = config_fini_component(cfdriver_ioconf_padlock,
599		    cfattach_ioconf_padlock, cfdata_ioconf_padlock);
600#endif
601		return error;
602	default:
603		return ENOTTY;
604	}
605}
606