via_padlock.c revision 1.24
1/*	$OpenBSD: via.c,v 1.8 2006/11/17 07:47:56 tom Exp $	*/
2/*	$NetBSD: via_padlock.c,v 1.24 2015/04/13 16:03:51 riastradh Exp $ */
3
4/*-
5 * Copyright (c) 2003 Jason Wright
6 * Copyright (c) 2003, 2004 Theo de Raadt
7 * All rights reserved.
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22#include <sys/cdefs.h>
23__KERNEL_RCSID(0, "$NetBSD: via_padlock.c,v 1.24 2015/04/13 16:03:51 riastradh Exp $");
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/signalvar.h>
28#include <sys/kernel.h>
29#include <sys/device.h>
30#include <sys/module.h>
31#include <sys/rndsource.h>
32#include <sys/malloc.h>
33#include <sys/mbuf.h>
34#include <sys/cpu.h>
35
36#include <x86/specialreg.h>
37
38#include <machine/cpufunc.h>
39#include <machine/cpuvar.h>
40
41#include <opencrypto/cryptodev.h>
42#include <opencrypto/cryptosoft.h>
43#include <opencrypto/xform.h>
44#include <crypto/rijndael/rijndael.h>
45
46#include <opencrypto/cryptosoft_xform.c>
47
48#include <x86/via_padlock.h>
49
50static int	via_padlock_match(device_t, cfdata_t, void *);
51static void	via_padlock_attach(device_t, device_t, void *);
52static int	via_padlock_detach(device_t, int);
53static void	via_padlock_attach_intr(device_t);
54
55CFATTACH_DECL_NEW(
56    padlock,
57    sizeof(struct via_padlock_softc),
58    via_padlock_match,
59    via_padlock_attach,
60    via_padlock_detach,
61    NULL
62);
63
64int	via_padlock_crypto_newsession(void *, uint32_t *, struct cryptoini *);
65int	via_padlock_crypto_process(void *, struct cryptop *, int);
66int	via_padlock_crypto_swauth(struct cryptop *, struct cryptodesc *,
67	    struct swcr_data *, void *);
68int	via_padlock_crypto_encdec(struct cryptop *, struct cryptodesc *,
69	    struct via_padlock_session *, struct via_padlock_softc *, void *);
70int	via_padlock_crypto_freesession(void *, uint64_t);
71static	__inline void via_padlock_cbc(void *, void *, void *, void *, int,
72	    void *);
73
74static void
75via_c3_rnd(void *arg)
76{
77	struct via_padlock_softc *sc = arg;
78
79	uint32_t creg0, len = VIAC3_RNG_BUFSIZ;
80	uint32_t buffer[VIAC3_RNG_BUFSIZ/4 + 1]; /* CPU goes 3 bytes beyond */
81	uint32_t eax, ecx, edi; /* XXX write-only, but necessary it seems */
82
83	/*
84	 * Sadly, we have to monkey with the coprocessor enable and fault
85	 * registers, which are really for the FPU, in order to read
86	 * from the RNG.
87	 *
88	 * Don't remove CR0_TS from the call below -- comments in the Linux
89	 * driver indicate that the xstorerng instruction can generate
90	 * spurious DNA faults though no FPU or SIMD state is changed
91	 * even if such a fault is generated.
92	 *
93	 */
94	kpreempt_disable();
95	x86_disable_intr();
96	creg0 = rcr0();
97	lcr0(creg0 & ~(CR0_EM|CR0_TS));	/* Permit access to SIMD/FPU path */
98	/*
99	 * Collect the random data from the C3 RNG into our buffer.
100	 * We turn on maximum whitening (is this actually desirable
101	 * if we will feed the data to SHA1?) (%edx[0,1] = "11").
102	 */
103	__asm __volatile("rep xstorerng"
104			 : "=a" (eax), "=c" (ecx), "=D" (edi)
105			 : "d" (3), "D" (buffer), "c" (len)
106			 : "memory", "cc");
107	/* Put CR0 back how it was */
108	lcr0(creg0);
109	x86_enable_intr();
110	kpreempt_enable();
111	rnd_add_data(&sc->sc_rnd_source, buffer, len, len * NBBY);
112	callout_reset(&sc->sc_rnd_co, sc->sc_rnd_hz, via_c3_rnd, sc);
113}
114
115static void
116via_c3_rnd_init(struct via_padlock_softc *sc)
117{
118	sc->sc_rnd_attached = true;
119
120	if (hz >= 100) {
121	    sc->sc_rnd_hz = 10 * hz / 100;
122	} else {
123	    sc->sc_rnd_hz = 10;
124	}
125	rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
126			  RND_TYPE_RNG, RND_FLAG_COLLECT_VALUE);
127	callout_init(&sc->sc_rnd_co, 0);
128	/* Call once to prime the pool early and set callout. */
129	via_c3_rnd(sc);
130}
131
132static void
133via_c3_ace_init(struct via_padlock_softc *sc)
134{
135	/*
136	 * There is no reason to call into the kernel to use this
137	 * driver from userspace, because the crypto instructions can
138	 * be directly accessed there.  Setting CRYPTOCAP_F_SOFTWARE
139	 * has approximately the right semantics though the name is
140	 * confusing (however, consider that crypto via unprivileged
141	 * instructions _is_ "just software" in some sense).
142	 */
143	sc->sc_cid = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
144	if (sc->sc_cid < 0) {
145		aprint_error_dev(sc->sc_dev,
146		    "could not get a crypto driver ID\n");
147		return;
148	}
149
150	sc->sc_cid_attached = true;
151
152	/*
153	 * Ask the opencrypto subsystem to register ourselves. Although
154	 * we don't support hardware offloading for various HMAC algorithms,
155	 * we will handle them, because opencrypto prefers drivers that
156	 * support all requested algorithms.
157	 *
158	 *
159	 * XXX We should actually implement the HMAC modes this hardware
160	 * XXX can accellerate (wrap its plain SHA1/SHA2 as HMAC) and
161	 * XXX strongly consider removing those passed through to cryptosoft.
162	 * XXX As it stands, we can "steal" sessions from drivers which could
163	 * XXX better accellerate them.
164	 *
165	 * XXX Note the ordering dependency between when this (or any
166	 * XXX crypto driver) attaches and when cryptosoft does.  We are
167	 * XXX basically counting on the swcrypto pseudo-device to just
168	 * XXX happen to attach last, or _it_ will steal every session
169	 * XXX from _us_!
170	 */
171#define REGISTER(alg) \
172	crypto_register(sc->sc_cid, alg, 0, 0, \
173	    via_padlock_crypto_newsession, via_padlock_crypto_freesession, \
174	    via_padlock_crypto_process, sc);
175
176	REGISTER(CRYPTO_AES_CBC);
177	REGISTER(CRYPTO_MD5_HMAC_96);
178	REGISTER(CRYPTO_MD5_HMAC);
179	REGISTER(CRYPTO_SHA1_HMAC_96);
180	REGISTER(CRYPTO_SHA1_HMAC);
181	REGISTER(CRYPTO_RIPEMD160_HMAC_96);
182	REGISTER(CRYPTO_RIPEMD160_HMAC);
183	REGISTER(CRYPTO_SHA2_HMAC);
184}
185
186int
187via_padlock_crypto_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
188{
189	struct cryptoini *c;
190	struct via_padlock_softc *sc = arg;
191	struct via_padlock_session *ses = NULL;
192	const struct swcr_auth_hash *axf;
193	struct swcr_data *swd;
194	int sesn, i, cw0;
195
196	KASSERT(sc != NULL /*, ("via_padlock_crypto_freesession: null softc")*/);
197	if (sc == NULL || sidp == NULL || cri == NULL)
198		return (EINVAL);
199
200	if (sc->sc_sessions == NULL) {
201		ses = sc->sc_sessions = malloc(sizeof(*ses), M_DEVBUF,
202		    M_NOWAIT);
203		if (ses == NULL)
204			return (ENOMEM);
205		sesn = 0;
206		sc->sc_nsessions = 1;
207	} else {
208		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
209			if (sc->sc_sessions[sesn].ses_used == 0) {
210				ses = &sc->sc_sessions[sesn];
211				break;
212			}
213		}
214
215		if (ses == NULL) {
216			sesn = sc->sc_nsessions;
217			ses = malloc((sesn + 1) * sizeof(*ses), M_DEVBUF,
218			    M_NOWAIT);
219			if (ses == NULL)
220				return (ENOMEM);
221			memcpy(ses, sc->sc_sessions, sesn * sizeof(*ses));
222			memset(sc->sc_sessions, 0, sesn * sizeof(*ses));
223			free(sc->sc_sessions, M_DEVBUF);
224			sc->sc_sessions = ses;
225			ses = &sc->sc_sessions[sesn];
226			sc->sc_nsessions++;
227		}
228	}
229
230	memset(ses, 0, sizeof(*ses));
231	ses->ses_used = 1;
232
233	for (c = cri; c != NULL; c = c->cri_next) {
234		switch (c->cri_alg) {
235		case CRYPTO_AES_CBC:
236			switch (c->cri_klen) {
237			case 128:
238				cw0 = C3_CRYPT_CWLO_KEY128;
239				break;
240			case 192:
241				cw0 = C3_CRYPT_CWLO_KEY192;
242				break;
243			case 256:
244				cw0 = C3_CRYPT_CWLO_KEY256;
245				break;
246			default:
247				return (EINVAL);
248			}
249			cw0 |= C3_CRYPT_CWLO_ALG_AES |
250				C3_CRYPT_CWLO_KEYGEN_SW |
251				C3_CRYPT_CWLO_NORMAL;
252
253			cprng_fast(ses->ses_iv, sizeof(ses->ses_iv));
254			ses->ses_klen = c->cri_klen;
255			ses->ses_cw0 = cw0;
256
257			/* Build expanded keys for both directions */
258			rijndaelKeySetupEnc(ses->ses_ekey, c->cri_key,
259			    c->cri_klen);
260			rijndaelKeySetupDec(ses->ses_dkey, c->cri_key,
261			    c->cri_klen);
262			for (i = 0; i < 4 * (RIJNDAEL_MAXNR + 1); i++) {
263				ses->ses_ekey[i] = ntohl(ses->ses_ekey[i]);
264				ses->ses_dkey[i] = ntohl(ses->ses_dkey[i]);
265			}
266
267			break;
268
269		/* Use hashing implementations from the cryptosoft code. */
270		case CRYPTO_MD5_HMAC:
271			axf = &swcr_auth_hash_hmac_md5;
272			goto authcommon;
273		case CRYPTO_MD5_HMAC_96:
274			axf = &swcr_auth_hash_hmac_md5_96;
275			goto authcommon;
276		case CRYPTO_SHA1_HMAC:
277			axf = &swcr_auth_hash_hmac_sha1;
278			goto authcommon;
279		case CRYPTO_SHA1_HMAC_96:
280			axf = &swcr_auth_hash_hmac_sha1_96;
281			goto authcommon;
282		case CRYPTO_RIPEMD160_HMAC:
283			axf = &swcr_auth_hash_hmac_ripemd_160;
284			goto authcommon;
285		case CRYPTO_RIPEMD160_HMAC_96:
286			axf = &swcr_auth_hash_hmac_ripemd_160_96;
287			goto authcommon;
288		case CRYPTO_SHA2_HMAC:
289			if (cri->cri_klen == 256)
290				axf = &swcr_auth_hash_hmac_sha2_256;
291			else if (cri->cri_klen == 384)
292				axf = &swcr_auth_hash_hmac_sha2_384;
293			else if (cri->cri_klen == 512)
294				axf = &swcr_auth_hash_hmac_sha2_512;
295			else {
296				return EINVAL;
297			}
298		authcommon:
299			swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
300			    M_NOWAIT|M_ZERO);
301			if (swd == NULL) {
302				via_padlock_crypto_freesession(sc, sesn);
303				return (ENOMEM);
304			}
305			ses->swd = swd;
306
307			swd->sw_ictx = malloc(axf->ctxsize,
308			    M_CRYPTO_DATA, M_NOWAIT);
309			if (swd->sw_ictx == NULL) {
310				via_padlock_crypto_freesession(sc, sesn);
311				return (ENOMEM);
312			}
313
314			swd->sw_octx = malloc(axf->ctxsize,
315			    M_CRYPTO_DATA, M_NOWAIT);
316			if (swd->sw_octx == NULL) {
317				via_padlock_crypto_freesession(sc, sesn);
318				return (ENOMEM);
319			}
320
321			for (i = 0; i < c->cri_klen / 8; i++)
322				c->cri_key[i] ^= HMAC_IPAD_VAL;
323
324			axf->Init(swd->sw_ictx);
325			axf->Update(swd->sw_ictx, c->cri_key, c->cri_klen / 8);
326			axf->Update(swd->sw_ictx, hmac_ipad_buffer,
327			    HMAC_BLOCK_LEN - (c->cri_klen / 8));
328
329			for (i = 0; i < c->cri_klen / 8; i++)
330				c->cri_key[i] ^= (HMAC_IPAD_VAL ^
331				    HMAC_OPAD_VAL);
332
333			axf->Init(swd->sw_octx);
334			axf->Update(swd->sw_octx, c->cri_key, c->cri_klen / 8);
335			axf->Update(swd->sw_octx, hmac_opad_buffer,
336			    HMAC_BLOCK_LEN - (c->cri_klen / 8));
337
338			for (i = 0; i < c->cri_klen / 8; i++)
339				c->cri_key[i] ^= HMAC_OPAD_VAL;
340
341			swd->sw_axf = axf;
342			swd->sw_alg = c->cri_alg;
343
344			break;
345		default:
346			return (EINVAL);
347		}
348	}
349
350	*sidp = VIAC3_SID(0, sesn);
351	return (0);
352}
353
354int
355via_padlock_crypto_freesession(void *arg, uint64_t tid)
356{
357	struct via_padlock_softc *sc = arg;
358	struct swcr_data *swd;
359	const struct swcr_auth_hash *axf;
360	int sesn;
361	uint32_t sid = ((uint32_t)tid) & 0xffffffff;
362
363	KASSERT(sc != NULL /*, ("via_padlock_crypto_freesession: null softc")*/);
364	if (sc == NULL)
365		return (EINVAL);
366
367	sesn = VIAC3_SESSION(sid);
368	if (sesn >= sc->sc_nsessions)
369		return (EINVAL);
370
371	if (sc->sc_sessions[sesn].swd) {
372		swd = sc->sc_sessions[sesn].swd;
373		axf = swd->sw_axf;
374
375		if (swd->sw_ictx) {
376			memset(swd->sw_ictx, 0, axf->ctxsize);
377			free(swd->sw_ictx, M_CRYPTO_DATA);
378		}
379		if (swd->sw_octx) {
380			memset(swd->sw_octx, 0, axf->ctxsize);
381			free(swd->sw_octx, M_CRYPTO_DATA);
382		}
383		free(swd, M_CRYPTO_DATA);
384	}
385
386	memset(&sc->sc_sessions[sesn], 0, sizeof(sc->sc_sessions[sesn]));
387	return (0);
388}
389
390static __inline void
391via_padlock_cbc(void *cw, void *src, void *dst, void *key, int rep,
392    void *iv)
393{
394	unsigned int creg0;
395
396	creg0 = rcr0();		/* Permit access to SIMD/FPU path */
397	lcr0(creg0 & ~(CR0_EM|CR0_TS));
398
399	/* Do the deed */
400	__asm __volatile("pushfl; popfl");	/* force key reload */
401	__asm __volatile(".byte 0xf3, 0x0f, 0xa7, 0xd0" : /* rep xcrypt-cbc */
402			: "a" (iv), "b" (key), "c" (rep), "d" (cw), "S" (src), "D" (dst)
403			: "memory", "cc");
404
405	lcr0(creg0);
406}
407
408int
409via_padlock_crypto_swauth(struct cryptop *crp, struct cryptodesc *crd,
410    struct swcr_data *sw, void *buf)
411{
412	int	type;
413
414	if (crp->crp_flags & CRYPTO_F_IMBUF)
415		type = CRYPTO_BUF_MBUF;
416	else
417		type= CRYPTO_BUF_IOV;
418
419	return (swcr_authcompute(crp, crd, sw, buf, type));
420}
421
422int
423via_padlock_crypto_encdec(struct cryptop *crp, struct cryptodesc *crd,
424    struct via_padlock_session *ses, struct via_padlock_softc *sc, void *buf)
425{
426	uint32_t *key;
427	int err = 0;
428
429	if ((crd->crd_len % 16) != 0) {
430		err = EINVAL;
431		return (err);
432	}
433
434	sc->op_buf = malloc(crd->crd_len, M_DEVBUF, M_NOWAIT);
435	if (sc->op_buf == NULL) {
436		err = ENOMEM;
437		return (err);
438	}
439
440	if (crd->crd_flags & CRD_F_ENCRYPT) {
441		sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_ENCRYPT;
442		key = ses->ses_ekey;
443		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
444			memcpy(sc->op_iv, crd->crd_iv, 16);
445		else
446			memcpy(sc->op_iv, ses->ses_iv, 16);
447
448		if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
449			if (crp->crp_flags & CRYPTO_F_IMBUF)
450				m_copyback((struct mbuf *)crp->crp_buf,
451				    crd->crd_inject, 16, sc->op_iv);
452			else if (crp->crp_flags & CRYPTO_F_IOV)
453				cuio_copyback((struct uio *)crp->crp_buf,
454				    crd->crd_inject, 16, sc->op_iv);
455			else
456				memcpy((char *)crp->crp_buf + crd->crd_inject,
457				    sc->op_iv, 16);
458		}
459	} else {
460		sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_DECRYPT;
461		key = ses->ses_dkey;
462		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
463			memcpy(sc->op_iv, crd->crd_iv, 16);
464		else {
465			if (crp->crp_flags & CRYPTO_F_IMBUF)
466				m_copydata((struct mbuf *)crp->crp_buf,
467				    crd->crd_inject, 16, sc->op_iv);
468			else if (crp->crp_flags & CRYPTO_F_IOV)
469				cuio_copydata((struct uio *)crp->crp_buf,
470				    crd->crd_inject, 16, sc->op_iv);
471			else
472				memcpy(sc->op_iv, (char *)crp->crp_buf +
473				    crd->crd_inject, 16);
474		}
475	}
476
477	if (crp->crp_flags & CRYPTO_F_IMBUF)
478		m_copydata((struct mbuf *)crp->crp_buf,
479		    crd->crd_skip, crd->crd_len, sc->op_buf);
480	else if (crp->crp_flags & CRYPTO_F_IOV)
481		cuio_copydata((struct uio *)crp->crp_buf,
482		    crd->crd_skip, crd->crd_len, sc->op_buf);
483	else
484		memcpy(sc->op_buf, (char *)crp->crp_buf + crd->crd_skip,
485		    crd->crd_len);
486
487	sc->op_cw[1] = sc->op_cw[2] = sc->op_cw[3] = 0;
488	via_padlock_cbc(&sc->op_cw, sc->op_buf, sc->op_buf, key,
489	    crd->crd_len / 16, sc->op_iv);
490
491	if (crp->crp_flags & CRYPTO_F_IMBUF)
492		m_copyback((struct mbuf *)crp->crp_buf,
493		    crd->crd_skip, crd->crd_len, sc->op_buf);
494	else if (crp->crp_flags & CRYPTO_F_IOV)
495		cuio_copyback((struct uio *)crp->crp_buf,
496		    crd->crd_skip, crd->crd_len, sc->op_buf);
497	else
498		memcpy((char *)crp->crp_buf + crd->crd_skip, sc->op_buf,
499		    crd->crd_len);
500
501	/* copy out last block for use as next session IV */
502	if (crd->crd_flags & CRD_F_ENCRYPT) {
503		if (crp->crp_flags & CRYPTO_F_IMBUF)
504			m_copydata((struct mbuf *)crp->crp_buf,
505			    crd->crd_skip + crd->crd_len - 16, 16,
506			    ses->ses_iv);
507		else if (crp->crp_flags & CRYPTO_F_IOV)
508			cuio_copydata((struct uio *)crp->crp_buf,
509			    crd->crd_skip + crd->crd_len - 16, 16,
510			    ses->ses_iv);
511		else
512			memcpy(ses->ses_iv, (char *)crp->crp_buf +
513			    crd->crd_skip + crd->crd_len - 16, 16);
514	}
515
516	if (sc->op_buf != NULL) {
517		memset(sc->op_buf, 0, crd->crd_len);
518		free(sc->op_buf, M_DEVBUF);
519		sc->op_buf = NULL;
520	}
521
522	return (err);
523}
524
525int
526via_padlock_crypto_process(void *arg, struct cryptop *crp, int hint)
527{
528	struct via_padlock_softc *sc = arg;
529	struct via_padlock_session *ses;
530	struct cryptodesc *crd;
531	int sesn, err = 0;
532
533	KASSERT(sc != NULL /*, ("via_padlock_crypto_process: null softc")*/);
534	if (crp == NULL || crp->crp_callback == NULL) {
535		err = EINVAL;
536		goto out;
537	}
538
539	sesn = VIAC3_SESSION(crp->crp_sid);
540	if (sesn >= sc->sc_nsessions) {
541		err = EINVAL;
542		goto out;
543	}
544	ses = &sc->sc_sessions[sesn];
545
546	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
547		switch (crd->crd_alg) {
548		case CRYPTO_AES_CBC:
549			if ((err = via_padlock_crypto_encdec(crp, crd, ses,
550			    sc, crp->crp_buf)) != 0)
551				goto out;
552			break;
553
554		case CRYPTO_MD5_HMAC:
555		case CRYPTO_SHA1_HMAC:
556		case CRYPTO_RIPEMD160_HMAC:
557		case CRYPTO_SHA2_HMAC:
558			if ((err = via_padlock_crypto_swauth(crp, crd,
559			    ses->swd, crp->crp_buf)) != 0)
560				goto out;
561			break;
562
563		default:
564			err = EINVAL;
565			goto out;
566		}
567	}
568out:
569	crp->crp_etype = err;
570	crypto_done(crp);
571	return (err);
572}
573
574static int
575via_padlock_match(device_t parent, cfdata_t cf, void *opaque)
576{
577	struct cpufeature_attach_args *cfaa = opaque;
578	struct cpu_info *ci = cfaa->ci;
579
580	if (strcmp(cfaa->name, "padlock") != 0)
581		return 0;
582	if ((cpu_feature[4] & (CPUID_VIA_HAS_ACE|CPUID_VIA_HAS_RNG)) == 0)
583		return 0;
584	if ((ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) == 0)
585		return 0;
586	return 1;
587}
588
589static void
590via_padlock_attach(device_t parent, device_t self, void *opaque)
591{
592	struct via_padlock_softc *sc = device_private(self);
593
594	sc->sc_dev = self;
595
596	aprint_naive("\n");
597	aprint_normal(": VIA PadLock\n");
598
599	pmf_device_register(self, NULL, NULL);
600
601	config_interrupts(self, via_padlock_attach_intr);
602}
603
604static void
605via_padlock_attach_intr(device_t self)
606{
607	struct via_padlock_softc *sc = device_private(self);
608
609	aprint_normal("%s:", device_xname(self));
610	if (cpu_feature[4] & CPUID_VIA_HAS_RNG) {
611		via_c3_rnd_init(sc);
612		aprint_normal(" RNG");
613	}
614	if (cpu_feature[4] & CPUID_VIA_HAS_ACE) {
615		via_c3_ace_init(sc);
616		aprint_normal(" ACE");
617	}
618	aprint_normal("\n");
619}
620
621static int
622via_padlock_detach(device_t self, int flags)
623{
624	struct via_padlock_softc *sc = device_private(self);
625
626	if (sc->sc_rnd_attached) {
627		callout_halt(&sc->sc_rnd_co, NULL);
628		callout_destroy(&sc->sc_rnd_co);
629		rnd_detach_source(&sc->sc_rnd_source);
630		sc->sc_rnd_attached = false;
631	}
632	if (sc->sc_cid_attached) {
633		crypto_unregister(sc->sc_cid, CRYPTO_AES_CBC);
634		crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC_96);
635		crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC);
636		crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC_96);
637		crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC);
638		crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC_96);
639		crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC);
640		crypto_unregister(sc->sc_cid, CRYPTO_SHA2_HMAC);
641		sc->sc_cid_attached = false;
642	}
643
644	pmf_device_deregister(self);
645
646	return 0;
647}
648
649MODULE(MODULE_CLASS_DRIVER, padlock, NULL);
650
651#ifdef _MODULE
652#include "ioconf.c"
653#endif
654
655static int
656padlock_modcmd(modcmd_t cmd, void *opaque)
657{
658	int error = 0;
659
660	switch (cmd) {
661	case MODULE_CMD_INIT:
662#ifdef _MODULE
663		error = config_init_component(cfdriver_ioconf_padlock,
664		    cfattach_ioconf_padlock, cfdata_ioconf_padlock);
665#endif
666		return error;
667	case MODULE_CMD_FINI:
668#ifdef _MODULE
669		error = config_fini_component(cfdriver_ioconf_padlock,
670		    cfattach_ioconf_padlock, cfdata_ioconf_padlock);
671#endif
672		return error;
673	default:
674		return ENOTTY;
675	}
676}
677