1/*	$OpenBSD: via.c,v 1.8 2006/11/17 07:47:56 tom Exp $	*/
2/*	$NetBSD: via_padlock.c,v 1.20 2012/01/17 03:39:33 jakllsch Exp $ */
3
4/*-
5 * Copyright (c) 2003 Jason Wright
6 * Copyright (c) 2003, 2004 Theo de Raadt
7 * All rights reserved.
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22#include <sys/cdefs.h>
23__KERNEL_RCSID(0, "$NetBSD: via_padlock.c,v 1.20 2012/01/17 03:39:33 jakllsch Exp $");
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/signalvar.h>
28#include <sys/kernel.h>
29#include <sys/device.h>
30#include <sys/module.h>
31#include <sys/rnd.h>
32#include <sys/malloc.h>
33#include <sys/mbuf.h>
34#include <sys/cpu.h>
35#include <sys/rnd.h>
36#include <sys/cprng.h>
37
38#include <x86/specialreg.h>
39
40#include <machine/cpufunc.h>
41#include <machine/cpuvar.h>
42
43#include <opencrypto/cryptodev.h>
44#include <opencrypto/cryptosoft.h>
45#include <opencrypto/xform.h>
46#include <crypto/rijndael/rijndael.h>
47
48#include <opencrypto/cryptosoft_xform.c>
49
50#include <x86/via_padlock.h>
51
52static int	via_padlock_match(device_t, cfdata_t, void *);
53static void	via_padlock_attach(device_t, device_t, void *);
54static int	via_padlock_detach(device_t, int);
55static void	via_padlock_attach_intr(device_t);
56
57CFATTACH_DECL_NEW(
58    padlock,
59    sizeof(struct via_padlock_softc),
60    via_padlock_match,
61    via_padlock_attach,
62    via_padlock_detach,
63    NULL
64);
65
66int	via_padlock_crypto_newsession(void *, uint32_t *, struct cryptoini *);
67int	via_padlock_crypto_process(void *, struct cryptop *, int);
68int	via_padlock_crypto_swauth(struct cryptop *, struct cryptodesc *,
69	    struct swcr_data *, void *);
70int	via_padlock_crypto_encdec(struct cryptop *, struct cryptodesc *,
71	    struct via_padlock_session *, struct via_padlock_softc *, void *);
72int	via_padlock_crypto_freesession(void *, uint64_t);
73static	__inline void via_padlock_cbc(void *, void *, void *, void *, int,
74	    void *);
75
76static void
77via_c3_rnd(void *arg)
78{
79	struct via_padlock_softc *sc = arg;
80
81	uint32_t creg0, len = VIAC3_RNG_BUFSIZ;
82	uint32_t buffer[VIAC3_RNG_BUFSIZ/4 + 1]; /* CPU goes 3 bytes beyond */
83	uint32_t eax, ecx, edi; /* XXX write-only, but necessary it seems */
84
85	/*
86	 * Sadly, we have to monkey with the coprocessor enable and fault
87	 * registers, which are really for the FPU, in order to read
88	 * from the RNG.
89	 *
90	 * Don't remove CR0_TS from the call below -- comments in the Linux
91	 * driver indicate that the xstorerng instruction can generate
92	 * spurious DNA faults though no FPU or SIMD state is changed
93	 * even if such a fault is generated.
94	 *
95	 */
96	kpreempt_disable();
97	x86_disable_intr();
98	creg0 = rcr0();
99	lcr0(creg0 & ~(CR0_EM|CR0_TS));	/* Permit access to SIMD/FPU path */
100	/*
101	 * Collect the random data from the C3 RNG into our buffer.
102	 * We turn on maximum whitening (is this actually desirable
103	 * if we will feed the data to SHA1?) (%edx[0,1] = "11").
104	 */
105	__asm __volatile("rep xstorerng"
106			 : "=a" (eax), "=c" (ecx), "=D" (edi)
107			 : "d" (3), "D" (buffer), "c" (len)
108			 : "memory", "cc");
109	/* Put CR0 back how it was */
110	lcr0(creg0);
111	x86_enable_intr();
112	kpreempt_enable();
113	rnd_add_data(&sc->sc_rnd_source, buffer, len, len * NBBY);
114	callout_reset(&sc->sc_rnd_co, sc->sc_rnd_hz, via_c3_rnd, sc);
115}
116
117static void
118via_c3_rnd_init(struct via_padlock_softc *sc)
119{
120	sc->sc_rnd_attached = true;
121
122	if (hz >= 100) {
123	    sc->sc_rnd_hz = 10 * hz / 100;
124	} else {
125	    sc->sc_rnd_hz = 10;
126	}
127	/* See hifn7751.c re use of RND_FLAG_NO_ESTIMATE */
128	rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
129			  RND_TYPE_RNG, RND_FLAG_NO_ESTIMATE);
130	callout_init(&sc->sc_rnd_co, 0);
131	/* Call once to prime the pool early and set callout. */
132	via_c3_rnd(sc);
133}
134
135static void
136via_c3_ace_init(struct via_padlock_softc *sc)
137{
138	/*
139	 * There is no reason to call into the kernel to use this
140	 * driver from userspace, because the crypto instructions can
141	 * be directly accessed there.  Setting CRYPTOCAP_F_SOFTWARE
142	 * has approximately the right semantics though the name is
143	 * confusing (however, consider that crypto via unprivileged
144	 * instructions _is_ "just software" in some sense).
145	 */
146	sc->sc_cid = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
147	if (sc->sc_cid < 0) {
148		aprint_error_dev(sc->sc_dev,
149		    "could not get a crypto driver ID\n");
150		return;
151	}
152
153	sc->sc_cid_attached = true;
154
155	/*
156	 * Ask the opencrypto subsystem to register ourselves. Although
157	 * we don't support hardware offloading for various HMAC algorithms,
158	 * we will handle them, because opencrypto prefers drivers that
159	 * support all requested algorithms.
160	 *
161	 *
162	 * XXX We should actually implement the HMAC modes this hardware
163	 * XXX can accellerate (wrap its plain SHA1/SHA2 as HMAC) and
164	 * XXX strongly consider removing those passed through to cryptosoft.
165	 * XXX As it stands, we can "steal" sessions from drivers which could
166	 * XXX better accellerate them.
167	 *
168	 * XXX Note the ordering dependency between when this (or any
169	 * XXX crypto driver) attaches and when cryptosoft does.  We are
170	 * XXX basically counting on the swcrypto pseudo-device to just
171	 * XXX happen to attach last, or _it_ will steal every session
172	 * XXX from _us_!
173	 */
174#define REGISTER(alg) \
175	crypto_register(sc->sc_cid, alg, 0, 0, \
176	    via_padlock_crypto_newsession, via_padlock_crypto_freesession, \
177	    via_padlock_crypto_process, sc);
178
179	REGISTER(CRYPTO_AES_CBC);
180	REGISTER(CRYPTO_MD5_HMAC_96);
181	REGISTER(CRYPTO_MD5_HMAC);
182	REGISTER(CRYPTO_SHA1_HMAC_96);
183	REGISTER(CRYPTO_SHA1_HMAC);
184	REGISTER(CRYPTO_RIPEMD160_HMAC_96);
185	REGISTER(CRYPTO_RIPEMD160_HMAC);
186	REGISTER(CRYPTO_SHA2_HMAC);
187}
188
189int
190via_padlock_crypto_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
191{
192	struct cryptoini *c;
193	struct via_padlock_softc *sc = arg;
194	struct via_padlock_session *ses = NULL;
195	const struct swcr_auth_hash *axf;
196	struct swcr_data *swd;
197	int sesn, i, cw0;
198
199	KASSERT(sc != NULL /*, ("via_padlock_crypto_freesession: null softc")*/);
200	if (sc == NULL || sidp == NULL || cri == NULL)
201		return (EINVAL);
202
203	if (sc->sc_sessions == NULL) {
204		ses = sc->sc_sessions = malloc(sizeof(*ses), M_DEVBUF,
205		    M_NOWAIT);
206		if (ses == NULL)
207			return (ENOMEM);
208		sesn = 0;
209		sc->sc_nsessions = 1;
210	} else {
211		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
212			if (sc->sc_sessions[sesn].ses_used == 0) {
213				ses = &sc->sc_sessions[sesn];
214				break;
215			}
216		}
217
218		if (ses == NULL) {
219			sesn = sc->sc_nsessions;
220			ses = malloc((sesn + 1) * sizeof(*ses), M_DEVBUF,
221			    M_NOWAIT);
222			if (ses == NULL)
223				return (ENOMEM);
224			memcpy(ses, sc->sc_sessions, sesn * sizeof(*ses));
225			memset(sc->sc_sessions, 0, sesn * sizeof(*ses));
226			free(sc->sc_sessions, M_DEVBUF);
227			sc->sc_sessions = ses;
228			ses = &sc->sc_sessions[sesn];
229			sc->sc_nsessions++;
230		}
231	}
232
233	memset(ses, 0, sizeof(*ses));
234	ses->ses_used = 1;
235
236	for (c = cri; c != NULL; c = c->cri_next) {
237		switch (c->cri_alg) {
238		case CRYPTO_AES_CBC:
239			switch (c->cri_klen) {
240			case 128:
241				cw0 = C3_CRYPT_CWLO_KEY128;
242				break;
243			case 192:
244				cw0 = C3_CRYPT_CWLO_KEY192;
245				break;
246			case 256:
247				cw0 = C3_CRYPT_CWLO_KEY256;
248				break;
249			default:
250				return (EINVAL);
251			}
252			cw0 |= C3_CRYPT_CWLO_ALG_AES |
253				C3_CRYPT_CWLO_KEYGEN_SW |
254				C3_CRYPT_CWLO_NORMAL;
255
256			cprng_fast(ses->ses_iv, sizeof(ses->ses_iv));
257			ses->ses_klen = c->cri_klen;
258			ses->ses_cw0 = cw0;
259
260			/* Build expanded keys for both directions */
261			rijndaelKeySetupEnc(ses->ses_ekey, c->cri_key,
262			    c->cri_klen);
263			rijndaelKeySetupDec(ses->ses_dkey, c->cri_key,
264			    c->cri_klen);
265			for (i = 0; i < 4 * (RIJNDAEL_MAXNR + 1); i++) {
266				ses->ses_ekey[i] = ntohl(ses->ses_ekey[i]);
267				ses->ses_dkey[i] = ntohl(ses->ses_dkey[i]);
268			}
269
270			break;
271
272		/* Use hashing implementations from the cryptosoft code. */
273		case CRYPTO_MD5_HMAC:
274			axf = &swcr_auth_hash_hmac_md5;
275			goto authcommon;
276		case CRYPTO_MD5_HMAC_96:
277			axf = &swcr_auth_hash_hmac_md5_96;
278			goto authcommon;
279		case CRYPTO_SHA1_HMAC:
280			axf = &swcr_auth_hash_hmac_sha1;
281			goto authcommon;
282		case CRYPTO_SHA1_HMAC_96:
283			axf = &swcr_auth_hash_hmac_sha1_96;
284			goto authcommon;
285		case CRYPTO_RIPEMD160_HMAC:
286			axf = &swcr_auth_hash_hmac_ripemd_160;
287			goto authcommon;
288		case CRYPTO_RIPEMD160_HMAC_96:
289			axf = &swcr_auth_hash_hmac_ripemd_160_96;
290			goto authcommon;
291		case CRYPTO_SHA2_HMAC:
292			if (cri->cri_klen == 256)
293				axf = &swcr_auth_hash_hmac_sha2_256;
294			else if (cri->cri_klen == 384)
295				axf = &swcr_auth_hash_hmac_sha2_384;
296			else if (cri->cri_klen == 512)
297				axf = &swcr_auth_hash_hmac_sha2_512;
298			else {
299				return EINVAL;
300			}
301		authcommon:
302			swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
303			    M_NOWAIT|M_ZERO);
304			if (swd == NULL) {
305				via_padlock_crypto_freesession(sc, sesn);
306				return (ENOMEM);
307			}
308			ses->swd = swd;
309
310			swd->sw_ictx = malloc(axf->ctxsize,
311			    M_CRYPTO_DATA, M_NOWAIT);
312			if (swd->sw_ictx == NULL) {
313				via_padlock_crypto_freesession(sc, sesn);
314				return (ENOMEM);
315			}
316
317			swd->sw_octx = malloc(axf->ctxsize,
318			    M_CRYPTO_DATA, M_NOWAIT);
319			if (swd->sw_octx == NULL) {
320				via_padlock_crypto_freesession(sc, sesn);
321				return (ENOMEM);
322			}
323
324			for (i = 0; i < c->cri_klen / 8; i++)
325				c->cri_key[i] ^= HMAC_IPAD_VAL;
326
327			axf->Init(swd->sw_ictx);
328			axf->Update(swd->sw_ictx, c->cri_key, c->cri_klen / 8);
329			axf->Update(swd->sw_ictx, hmac_ipad_buffer,
330			    HMAC_BLOCK_LEN - (c->cri_klen / 8));
331
332			for (i = 0; i < c->cri_klen / 8; i++)
333				c->cri_key[i] ^= (HMAC_IPAD_VAL ^
334				    HMAC_OPAD_VAL);
335
336			axf->Init(swd->sw_octx);
337			axf->Update(swd->sw_octx, c->cri_key, c->cri_klen / 8);
338			axf->Update(swd->sw_octx, hmac_opad_buffer,
339			    HMAC_BLOCK_LEN - (c->cri_klen / 8));
340
341			for (i = 0; i < c->cri_klen / 8; i++)
342				c->cri_key[i] ^= HMAC_OPAD_VAL;
343
344			swd->sw_axf = axf;
345			swd->sw_alg = c->cri_alg;
346
347			break;
348		default:
349			return (EINVAL);
350		}
351	}
352
353	*sidp = VIAC3_SID(0, sesn);
354	return (0);
355}
356
357int
358via_padlock_crypto_freesession(void *arg, uint64_t tid)
359{
360	struct via_padlock_softc *sc = arg;
361	struct swcr_data *swd;
362	const struct swcr_auth_hash *axf;
363	int sesn;
364	uint32_t sid = ((uint32_t)tid) & 0xffffffff;
365
366	KASSERT(sc != NULL /*, ("via_padlock_crypto_freesession: null softc")*/);
367	if (sc == NULL)
368		return (EINVAL);
369
370	sesn = VIAC3_SESSION(sid);
371	if (sesn >= sc->sc_nsessions)
372		return (EINVAL);
373
374	if (sc->sc_sessions[sesn].swd) {
375		swd = sc->sc_sessions[sesn].swd;
376		axf = swd->sw_axf;
377
378		if (swd->sw_ictx) {
379			memset(swd->sw_ictx, 0, axf->ctxsize);
380			free(swd->sw_ictx, M_CRYPTO_DATA);
381		}
382		if (swd->sw_octx) {
383			memset(swd->sw_octx, 0, axf->ctxsize);
384			free(swd->sw_octx, M_CRYPTO_DATA);
385		}
386		free(swd, M_CRYPTO_DATA);
387	}
388
389	memset(&sc->sc_sessions[sesn], 0, sizeof(sc->sc_sessions[sesn]));
390	return (0);
391}
392
393static __inline void
394via_padlock_cbc(void *cw, void *src, void *dst, void *key, int rep,
395    void *iv)
396{
397	unsigned int creg0;
398
399	creg0 = rcr0();		/* Permit access to SIMD/FPU path */
400	lcr0(creg0 & ~(CR0_EM|CR0_TS));
401
402	/* Do the deed */
403	__asm __volatile("pushfl; popfl");	/* force key reload */
404	__asm __volatile(".byte 0xf3, 0x0f, 0xa7, 0xd0" : /* rep xcrypt-cbc */
405			: "a" (iv), "b" (key), "c" (rep), "d" (cw), "S" (src), "D" (dst)
406			: "memory", "cc");
407
408	lcr0(creg0);
409}
410
411int
412via_padlock_crypto_swauth(struct cryptop *crp, struct cryptodesc *crd,
413    struct swcr_data *sw, void *buf)
414{
415	int	type;
416
417	if (crp->crp_flags & CRYPTO_F_IMBUF)
418		type = CRYPTO_BUF_MBUF;
419	else
420		type= CRYPTO_BUF_IOV;
421
422	return (swcr_authcompute(crp, crd, sw, buf, type));
423}
424
425int
426via_padlock_crypto_encdec(struct cryptop *crp, struct cryptodesc *crd,
427    struct via_padlock_session *ses, struct via_padlock_softc *sc, void *buf)
428{
429	uint32_t *key;
430	int err = 0;
431
432	if ((crd->crd_len % 16) != 0) {
433		err = EINVAL;
434		return (err);
435	}
436
437	sc->op_buf = malloc(crd->crd_len, M_DEVBUF, M_NOWAIT);
438	if (sc->op_buf == NULL) {
439		err = ENOMEM;
440		return (err);
441	}
442
443	if (crd->crd_flags & CRD_F_ENCRYPT) {
444		sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_ENCRYPT;
445		key = ses->ses_ekey;
446		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
447			memcpy(sc->op_iv, crd->crd_iv, 16);
448		else
449			memcpy(sc->op_iv, ses->ses_iv, 16);
450
451		if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
452			if (crp->crp_flags & CRYPTO_F_IMBUF)
453				m_copyback((struct mbuf *)crp->crp_buf,
454				    crd->crd_inject, 16, sc->op_iv);
455			else if (crp->crp_flags & CRYPTO_F_IOV)
456				cuio_copyback((struct uio *)crp->crp_buf,
457				    crd->crd_inject, 16, sc->op_iv);
458			else
459				memcpy((char *)crp->crp_buf + crd->crd_inject,
460				    sc->op_iv, 16);
461		}
462	} else {
463		sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_DECRYPT;
464		key = ses->ses_dkey;
465		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
466			memcpy(sc->op_iv, crd->crd_iv, 16);
467		else {
468			if (crp->crp_flags & CRYPTO_F_IMBUF)
469				m_copydata((struct mbuf *)crp->crp_buf,
470				    crd->crd_inject, 16, sc->op_iv);
471			else if (crp->crp_flags & CRYPTO_F_IOV)
472				cuio_copydata((struct uio *)crp->crp_buf,
473				    crd->crd_inject, 16, sc->op_iv);
474			else
475				memcpy(sc->op_iv, (char *)crp->crp_buf +
476				    crd->crd_inject, 16);
477		}
478	}
479
480	if (crp->crp_flags & CRYPTO_F_IMBUF)
481		m_copydata((struct mbuf *)crp->crp_buf,
482		    crd->crd_skip, crd->crd_len, sc->op_buf);
483	else if (crp->crp_flags & CRYPTO_F_IOV)
484		cuio_copydata((struct uio *)crp->crp_buf,
485		    crd->crd_skip, crd->crd_len, sc->op_buf);
486	else
487		memcpy(sc->op_buf, (char *)crp->crp_buf + crd->crd_skip,
488		    crd->crd_len);
489
490	sc->op_cw[1] = sc->op_cw[2] = sc->op_cw[3] = 0;
491	via_padlock_cbc(&sc->op_cw, sc->op_buf, sc->op_buf, key,
492	    crd->crd_len / 16, sc->op_iv);
493
494	if (crp->crp_flags & CRYPTO_F_IMBUF)
495		m_copyback((struct mbuf *)crp->crp_buf,
496		    crd->crd_skip, crd->crd_len, sc->op_buf);
497	else if (crp->crp_flags & CRYPTO_F_IOV)
498		cuio_copyback((struct uio *)crp->crp_buf,
499		    crd->crd_skip, crd->crd_len, sc->op_buf);
500	else
501		memcpy((char *)crp->crp_buf + crd->crd_skip, sc->op_buf,
502		    crd->crd_len);
503
504	/* copy out last block for use as next session IV */
505	if (crd->crd_flags & CRD_F_ENCRYPT) {
506		if (crp->crp_flags & CRYPTO_F_IMBUF)
507			m_copydata((struct mbuf *)crp->crp_buf,
508			    crd->crd_skip + crd->crd_len - 16, 16,
509			    ses->ses_iv);
510		else if (crp->crp_flags & CRYPTO_F_IOV)
511			cuio_copydata((struct uio *)crp->crp_buf,
512			    crd->crd_skip + crd->crd_len - 16, 16,
513			    ses->ses_iv);
514		else
515			memcpy(ses->ses_iv, (char *)crp->crp_buf +
516			    crd->crd_skip + crd->crd_len - 16, 16);
517	}
518
519	if (sc->op_buf != NULL) {
520		memset(sc->op_buf, 0, crd->crd_len);
521		free(sc->op_buf, M_DEVBUF);
522		sc->op_buf = NULL;
523	}
524
525	return (err);
526}
527
528int
529via_padlock_crypto_process(void *arg, struct cryptop *crp, int hint)
530{
531	struct via_padlock_softc *sc = arg;
532	struct via_padlock_session *ses;
533	struct cryptodesc *crd;
534	int sesn, err = 0;
535
536	KASSERT(sc != NULL /*, ("via_padlock_crypto_process: null softc")*/);
537	if (crp == NULL || crp->crp_callback == NULL) {
538		err = EINVAL;
539		goto out;
540	}
541
542	sesn = VIAC3_SESSION(crp->crp_sid);
543	if (sesn >= sc->sc_nsessions) {
544		err = EINVAL;
545		goto out;
546	}
547	ses = &sc->sc_sessions[sesn];
548
549	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
550		switch (crd->crd_alg) {
551		case CRYPTO_AES_CBC:
552			if ((err = via_padlock_crypto_encdec(crp, crd, ses,
553			    sc, crp->crp_buf)) != 0)
554				goto out;
555			break;
556
557		case CRYPTO_MD5_HMAC:
558		case CRYPTO_SHA1_HMAC:
559		case CRYPTO_RIPEMD160_HMAC:
560		case CRYPTO_SHA2_HMAC:
561			if ((err = via_padlock_crypto_swauth(crp, crd,
562			    ses->swd, crp->crp_buf)) != 0)
563				goto out;
564			break;
565
566		default:
567			err = EINVAL;
568			goto out;
569		}
570	}
571out:
572	crp->crp_etype = err;
573	crypto_done(crp);
574	return (err);
575}
576
577static int
578via_padlock_match(device_t parent, cfdata_t cf, void *opaque)
579{
580	struct cpufeature_attach_args *cfaa = opaque;
581	struct cpu_info *ci = cfaa->ci;
582
583	if (strcmp(cfaa->name, "padlock") != 0)
584		return 0;
585	if ((cpu_feature[4] & (CPUID_VIA_HAS_ACE|CPUID_VIA_HAS_RNG)) == 0)
586		return 0;
587	if ((ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) == 0)
588		return 0;
589	return 1;
590}
591
592static void
593via_padlock_attach(device_t parent, device_t self, void *opaque)
594{
595	struct via_padlock_softc *sc = device_private(self);
596
597	sc->sc_dev = self;
598
599	aprint_naive("\n");
600	aprint_normal(": VIA PadLock\n");
601
602	pmf_device_register(self, NULL, NULL);
603
604	config_interrupts(self, via_padlock_attach_intr);
605}
606
607static void
608via_padlock_attach_intr(device_t self)
609{
610	struct via_padlock_softc *sc = device_private(self);
611
612	aprint_normal("%s:", device_xname(self));
613	if (cpu_feature[4] & CPUID_VIA_HAS_RNG) {
614		via_c3_rnd_init(sc);
615		aprint_normal(" RNG");
616	}
617	if (cpu_feature[4] & CPUID_VIA_HAS_ACE) {
618		via_c3_ace_init(sc);
619		aprint_normal(" ACE");
620	}
621	aprint_normal("\n");
622}
623
624static int
625via_padlock_detach(device_t self, int flags)
626{
627	struct via_padlock_softc *sc = device_private(self);
628
629	if (sc->sc_rnd_attached) {
630		callout_stop(&sc->sc_rnd_co);
631		callout_destroy(&sc->sc_rnd_co);
632		rnd_detach_source(&sc->sc_rnd_source);
633		sc->sc_rnd_attached = false;
634	}
635	if (sc->sc_cid_attached) {
636		crypto_unregister(sc->sc_cid, CRYPTO_AES_CBC);
637		crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC_96);
638		crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC);
639		crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC_96);
640		crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC);
641		crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC_96);
642		crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC);
643		crypto_unregister(sc->sc_cid, CRYPTO_SHA2_HMAC);
644		sc->sc_cid_attached = false;
645	}
646
647	pmf_device_deregister(self);
648
649	return 0;
650}
651
652MODULE(MODULE_CLASS_DRIVER, padlock, NULL);
653
654#ifdef _MODULE
655#include "ioconf.c"
656#endif
657
658static int
659padlock_modcmd(modcmd_t cmd, void *opaque)
660{
661	int error = 0;
662
663	switch (cmd) {
664	case MODULE_CMD_INIT:
665#ifdef _MODULE
666		error = config_init_component(cfdriver_ioconf_padlock,
667		    cfattach_ioconf_padlock, cfdata_ioconf_padlock);
668#endif
669		return error;
670	case MODULE_CMD_FINI:
671#ifdef _MODULE
672		error = config_fini_component(cfdriver_ioconf_padlock,
673		    cfattach_ioconf_padlock, cfdata_ioconf_padlock);
674#endif
675		return error;
676	default:
677		return ENOTTY;
678	}
679}
680