via_padlock.c revision 1.20
1/*	$OpenBSD: via.c,v 1.8 2006/11/17 07:47:56 tom Exp $	*/
2/*	$NetBSD: via_padlock.c,v 1.20 2012/01/17 03:39:33 jakllsch Exp $ */
3
4/*-
5 * Copyright (c) 2003 Jason Wright
6 * Copyright (c) 2003, 2004 Theo de Raadt
7 * All rights reserved.
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22#include <sys/cdefs.h>
23__KERNEL_RCSID(0, "$NetBSD: via_padlock.c,v 1.20 2012/01/17 03:39:33 jakllsch Exp $");
24
25#ifdef _KERNEL_OPT
26# include "rnd.h"
27# if NRND == 0
28#  error padlock requires rnd pseudo-devices
29# endif
30#endif
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/signalvar.h>
35#include <sys/kernel.h>
36#include <sys/device.h>
37#include <sys/module.h>
38#include <sys/rnd.h>
39#include <sys/malloc.h>
40#include <sys/mbuf.h>
41#include <sys/cpu.h>
42#include <sys/rnd.h>
43#include <sys/cprng.h>
44
45#include <x86/specialreg.h>
46
47#include <machine/cpufunc.h>
48#include <machine/cpuvar.h>
49
50#include <opencrypto/cryptodev.h>
51#include <opencrypto/cryptosoft.h>
52#include <opencrypto/xform.h>
53#include <crypto/rijndael/rijndael.h>
54
55#include <opencrypto/cryptosoft_xform.c>
56
57#include <x86/via_padlock.h>
58
59static int	via_padlock_match(device_t, cfdata_t, void *);
60static void	via_padlock_attach(device_t, device_t, void *);
61static int	via_padlock_detach(device_t, int);
62static void	via_padlock_attach_intr(device_t);
63
64CFATTACH_DECL_NEW(
65    padlock,
66    sizeof(struct via_padlock_softc),
67    via_padlock_match,
68    via_padlock_attach,
69    via_padlock_detach,
70    NULL
71);
72
73int	via_padlock_crypto_newsession(void *, uint32_t *, struct cryptoini *);
74int	via_padlock_crypto_process(void *, struct cryptop *, int);
75int	via_padlock_crypto_swauth(struct cryptop *, struct cryptodesc *,
76	    struct swcr_data *, void *);
77int	via_padlock_crypto_encdec(struct cryptop *, struct cryptodesc *,
78	    struct via_padlock_session *, struct via_padlock_softc *, void *);
79int	via_padlock_crypto_freesession(void *, uint64_t);
80static	__inline void via_padlock_cbc(void *, void *, void *, void *, int,
81	    void *);
82
83static void
84via_c3_rnd(void *arg)
85{
86	struct via_padlock_softc *sc = arg;
87
88	uint32_t creg0, len = VIAC3_RNG_BUFSIZ;
89	uint32_t buffer[VIAC3_RNG_BUFSIZ/4 + 1]; /* CPU goes 3 bytes beyond */
90	uint32_t eax, ecx, edi; /* XXX write-only, but necessary it seems */
91
92	/*
93	 * Sadly, we have to monkey with the coprocessor enable and fault
94	 * registers, which are really for the FPU, in order to read
95	 * from the RNG.
96	 *
97	 * Don't remove CR0_TS from the call below -- comments in the Linux
98	 * driver indicate that the xstorerng instruction can generate
99	 * spurious DNA faults though no FPU or SIMD state is changed
100	 * even if such a fault is generated.
101	 *
102	 */
103	kpreempt_disable();
104	x86_disable_intr();
105	creg0 = rcr0();
106	lcr0(creg0 & ~(CR0_EM|CR0_TS));	/* Permit access to SIMD/FPU path */
107	/*
108	 * Collect the random data from the C3 RNG into our buffer.
109	 * We turn on maximum whitening (is this actually desirable
110	 * if we will feed the data to SHA1?) (%edx[0,1] = "11").
111	 */
112	__asm __volatile("rep xstorerng"
113			 : "=a" (eax), "=c" (ecx), "=D" (edi)
114			 : "d" (3), "D" (buffer), "c" (len)
115			 : "memory", "cc");
116	/* Put CR0 back how it was */
117	lcr0(creg0);
118	x86_enable_intr();
119	kpreempt_enable();
120	rnd_add_data(&sc->sc_rnd_source, buffer, len, len * NBBY);
121	callout_reset(&sc->sc_rnd_co, sc->sc_rnd_hz, via_c3_rnd, sc);
122}
123
124static void
125via_c3_rnd_init(struct via_padlock_softc *sc)
126{
127	sc->sc_rnd_attached = true;
128
129	if (hz >= 100) {
130	    sc->sc_rnd_hz = 10 * hz / 100;
131	} else {
132	    sc->sc_rnd_hz = 10;
133	}
134	/* See hifn7751.c re use of RND_FLAG_NO_ESTIMATE */
135	rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
136			  RND_TYPE_RNG, RND_FLAG_NO_ESTIMATE);
137	callout_init(&sc->sc_rnd_co, 0);
138	/* Call once to prime the pool early and set callout. */
139	via_c3_rnd(sc);
140}
141
142static void
143via_c3_ace_init(struct via_padlock_softc *sc)
144{
145	/*
146	 * There is no reason to call into the kernel to use this
147	 * driver from userspace, because the crypto instructions can
148	 * be directly accessed there.  Setting CRYPTOCAP_F_SOFTWARE
149	 * has approximately the right semantics though the name is
150	 * confusing (however, consider that crypto via unprivileged
151	 * instructions _is_ "just software" in some sense).
152	 */
153	sc->sc_cid = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
154	if (sc->sc_cid < 0) {
155		aprint_error_dev(sc->sc_dev,
156		    "could not get a crypto driver ID\n");
157		return;
158	}
159
160	sc->sc_cid_attached = true;
161
162	/*
163	 * Ask the opencrypto subsystem to register ourselves. Although
164	 * we don't support hardware offloading for various HMAC algorithms,
165	 * we will handle them, because opencrypto prefers drivers that
166	 * support all requested algorithms.
167	 *
168	 *
169	 * XXX We should actually implement the HMAC modes this hardware
170	 * XXX can accellerate (wrap its plain SHA1/SHA2 as HMAC) and
171	 * XXX strongly consider removing those passed through to cryptosoft.
172	 * XXX As it stands, we can "steal" sessions from drivers which could
173	 * XXX better accellerate them.
174	 *
175	 * XXX Note the ordering dependency between when this (or any
176	 * XXX crypto driver) attaches and when cryptosoft does.  We are
177	 * XXX basically counting on the swcrypto pseudo-device to just
178	 * XXX happen to attach last, or _it_ will steal every session
179	 * XXX from _us_!
180	 */
181#define REGISTER(alg) \
182	crypto_register(sc->sc_cid, alg, 0, 0, \
183	    via_padlock_crypto_newsession, via_padlock_crypto_freesession, \
184	    via_padlock_crypto_process, sc);
185
186	REGISTER(CRYPTO_AES_CBC);
187	REGISTER(CRYPTO_MD5_HMAC_96);
188	REGISTER(CRYPTO_MD5_HMAC);
189	REGISTER(CRYPTO_SHA1_HMAC_96);
190	REGISTER(CRYPTO_SHA1_HMAC);
191	REGISTER(CRYPTO_RIPEMD160_HMAC_96);
192	REGISTER(CRYPTO_RIPEMD160_HMAC);
193	REGISTER(CRYPTO_SHA2_HMAC);
194}
195
196int
197via_padlock_crypto_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
198{
199	struct cryptoini *c;
200	struct via_padlock_softc *sc = arg;
201	struct via_padlock_session *ses = NULL;
202	const struct swcr_auth_hash *axf;
203	struct swcr_data *swd;
204	int sesn, i, cw0;
205
206	KASSERT(sc != NULL /*, ("via_padlock_crypto_freesession: null softc")*/);
207	if (sc == NULL || sidp == NULL || cri == NULL)
208		return (EINVAL);
209
210	if (sc->sc_sessions == NULL) {
211		ses = sc->sc_sessions = malloc(sizeof(*ses), M_DEVBUF,
212		    M_NOWAIT);
213		if (ses == NULL)
214			return (ENOMEM);
215		sesn = 0;
216		sc->sc_nsessions = 1;
217	} else {
218		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
219			if (sc->sc_sessions[sesn].ses_used == 0) {
220				ses = &sc->sc_sessions[sesn];
221				break;
222			}
223		}
224
225		if (ses == NULL) {
226			sesn = sc->sc_nsessions;
227			ses = malloc((sesn + 1) * sizeof(*ses), M_DEVBUF,
228			    M_NOWAIT);
229			if (ses == NULL)
230				return (ENOMEM);
231			memcpy(ses, sc->sc_sessions, sesn * sizeof(*ses));
232			memset(sc->sc_sessions, 0, sesn * sizeof(*ses));
233			free(sc->sc_sessions, M_DEVBUF);
234			sc->sc_sessions = ses;
235			ses = &sc->sc_sessions[sesn];
236			sc->sc_nsessions++;
237		}
238	}
239
240	memset(ses, 0, sizeof(*ses));
241	ses->ses_used = 1;
242
243	for (c = cri; c != NULL; c = c->cri_next) {
244		switch (c->cri_alg) {
245		case CRYPTO_AES_CBC:
246			switch (c->cri_klen) {
247			case 128:
248				cw0 = C3_CRYPT_CWLO_KEY128;
249				break;
250			case 192:
251				cw0 = C3_CRYPT_CWLO_KEY192;
252				break;
253			case 256:
254				cw0 = C3_CRYPT_CWLO_KEY256;
255				break;
256			default:
257				return (EINVAL);
258			}
259			cw0 |= C3_CRYPT_CWLO_ALG_AES |
260				C3_CRYPT_CWLO_KEYGEN_SW |
261				C3_CRYPT_CWLO_NORMAL;
262
263			cprng_fast(ses->ses_iv, sizeof(ses->ses_iv));
264			ses->ses_klen = c->cri_klen;
265			ses->ses_cw0 = cw0;
266
267			/* Build expanded keys for both directions */
268			rijndaelKeySetupEnc(ses->ses_ekey, c->cri_key,
269			    c->cri_klen);
270			rijndaelKeySetupDec(ses->ses_dkey, c->cri_key,
271			    c->cri_klen);
272			for (i = 0; i < 4 * (RIJNDAEL_MAXNR + 1); i++) {
273				ses->ses_ekey[i] = ntohl(ses->ses_ekey[i]);
274				ses->ses_dkey[i] = ntohl(ses->ses_dkey[i]);
275			}
276
277			break;
278
279		/* Use hashing implementations from the cryptosoft code. */
280		case CRYPTO_MD5_HMAC:
281			axf = &swcr_auth_hash_hmac_md5;
282			goto authcommon;
283		case CRYPTO_MD5_HMAC_96:
284			axf = &swcr_auth_hash_hmac_md5_96;
285			goto authcommon;
286		case CRYPTO_SHA1_HMAC:
287			axf = &swcr_auth_hash_hmac_sha1;
288			goto authcommon;
289		case CRYPTO_SHA1_HMAC_96:
290			axf = &swcr_auth_hash_hmac_sha1_96;
291			goto authcommon;
292		case CRYPTO_RIPEMD160_HMAC:
293			axf = &swcr_auth_hash_hmac_ripemd_160;
294			goto authcommon;
295		case CRYPTO_RIPEMD160_HMAC_96:
296			axf = &swcr_auth_hash_hmac_ripemd_160_96;
297			goto authcommon;
298		case CRYPTO_SHA2_HMAC:
299			if (cri->cri_klen == 256)
300				axf = &swcr_auth_hash_hmac_sha2_256;
301			else if (cri->cri_klen == 384)
302				axf = &swcr_auth_hash_hmac_sha2_384;
303			else if (cri->cri_klen == 512)
304				axf = &swcr_auth_hash_hmac_sha2_512;
305			else {
306				return EINVAL;
307			}
308		authcommon:
309			swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
310			    M_NOWAIT|M_ZERO);
311			if (swd == NULL) {
312				via_padlock_crypto_freesession(sc, sesn);
313				return (ENOMEM);
314			}
315			ses->swd = swd;
316
317			swd->sw_ictx = malloc(axf->ctxsize,
318			    M_CRYPTO_DATA, M_NOWAIT);
319			if (swd->sw_ictx == NULL) {
320				via_padlock_crypto_freesession(sc, sesn);
321				return (ENOMEM);
322			}
323
324			swd->sw_octx = malloc(axf->ctxsize,
325			    M_CRYPTO_DATA, M_NOWAIT);
326			if (swd->sw_octx == NULL) {
327				via_padlock_crypto_freesession(sc, sesn);
328				return (ENOMEM);
329			}
330
331			for (i = 0; i < c->cri_klen / 8; i++)
332				c->cri_key[i] ^= HMAC_IPAD_VAL;
333
334			axf->Init(swd->sw_ictx);
335			axf->Update(swd->sw_ictx, c->cri_key, c->cri_klen / 8);
336			axf->Update(swd->sw_ictx, hmac_ipad_buffer,
337			    HMAC_BLOCK_LEN - (c->cri_klen / 8));
338
339			for (i = 0; i < c->cri_klen / 8; i++)
340				c->cri_key[i] ^= (HMAC_IPAD_VAL ^
341				    HMAC_OPAD_VAL);
342
343			axf->Init(swd->sw_octx);
344			axf->Update(swd->sw_octx, c->cri_key, c->cri_klen / 8);
345			axf->Update(swd->sw_octx, hmac_opad_buffer,
346			    HMAC_BLOCK_LEN - (c->cri_klen / 8));
347
348			for (i = 0; i < c->cri_klen / 8; i++)
349				c->cri_key[i] ^= HMAC_OPAD_VAL;
350
351			swd->sw_axf = axf;
352			swd->sw_alg = c->cri_alg;
353
354			break;
355		default:
356			return (EINVAL);
357		}
358	}
359
360	*sidp = VIAC3_SID(0, sesn);
361	return (0);
362}
363
364int
365via_padlock_crypto_freesession(void *arg, uint64_t tid)
366{
367	struct via_padlock_softc *sc = arg;
368	struct swcr_data *swd;
369	const struct swcr_auth_hash *axf;
370	int sesn;
371	uint32_t sid = ((uint32_t)tid) & 0xffffffff;
372
373	KASSERT(sc != NULL /*, ("via_padlock_crypto_freesession: null softc")*/);
374	if (sc == NULL)
375		return (EINVAL);
376
377	sesn = VIAC3_SESSION(sid);
378	if (sesn >= sc->sc_nsessions)
379		return (EINVAL);
380
381	if (sc->sc_sessions[sesn].swd) {
382		swd = sc->sc_sessions[sesn].swd;
383		axf = swd->sw_axf;
384
385		if (swd->sw_ictx) {
386			memset(swd->sw_ictx, 0, axf->ctxsize);
387			free(swd->sw_ictx, M_CRYPTO_DATA);
388		}
389		if (swd->sw_octx) {
390			memset(swd->sw_octx, 0, axf->ctxsize);
391			free(swd->sw_octx, M_CRYPTO_DATA);
392		}
393		free(swd, M_CRYPTO_DATA);
394	}
395
396	memset(&sc->sc_sessions[sesn], 0, sizeof(sc->sc_sessions[sesn]));
397	return (0);
398}
399
400static __inline void
401via_padlock_cbc(void *cw, void *src, void *dst, void *key, int rep,
402    void *iv)
403{
404	unsigned int creg0;
405
406	creg0 = rcr0();		/* Permit access to SIMD/FPU path */
407	lcr0(creg0 & ~(CR0_EM|CR0_TS));
408
409	/* Do the deed */
410	__asm __volatile("pushfl; popfl");	/* force key reload */
411	__asm __volatile(".byte 0xf3, 0x0f, 0xa7, 0xd0" : /* rep xcrypt-cbc */
412			: "a" (iv), "b" (key), "c" (rep), "d" (cw), "S" (src), "D" (dst)
413			: "memory", "cc");
414
415	lcr0(creg0);
416}
417
418int
419via_padlock_crypto_swauth(struct cryptop *crp, struct cryptodesc *crd,
420    struct swcr_data *sw, void *buf)
421{
422	int	type;
423
424	if (crp->crp_flags & CRYPTO_F_IMBUF)
425		type = CRYPTO_BUF_MBUF;
426	else
427		type= CRYPTO_BUF_IOV;
428
429	return (swcr_authcompute(crp, crd, sw, buf, type));
430}
431
432int
433via_padlock_crypto_encdec(struct cryptop *crp, struct cryptodesc *crd,
434    struct via_padlock_session *ses, struct via_padlock_softc *sc, void *buf)
435{
436	uint32_t *key;
437	int err = 0;
438
439	if ((crd->crd_len % 16) != 0) {
440		err = EINVAL;
441		return (err);
442	}
443
444	sc->op_buf = malloc(crd->crd_len, M_DEVBUF, M_NOWAIT);
445	if (sc->op_buf == NULL) {
446		err = ENOMEM;
447		return (err);
448	}
449
450	if (crd->crd_flags & CRD_F_ENCRYPT) {
451		sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_ENCRYPT;
452		key = ses->ses_ekey;
453		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
454			memcpy(sc->op_iv, crd->crd_iv, 16);
455		else
456			memcpy(sc->op_iv, ses->ses_iv, 16);
457
458		if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
459			if (crp->crp_flags & CRYPTO_F_IMBUF)
460				m_copyback((struct mbuf *)crp->crp_buf,
461				    crd->crd_inject, 16, sc->op_iv);
462			else if (crp->crp_flags & CRYPTO_F_IOV)
463				cuio_copyback((struct uio *)crp->crp_buf,
464				    crd->crd_inject, 16, sc->op_iv);
465			else
466				memcpy((char *)crp->crp_buf + crd->crd_inject,
467				    sc->op_iv, 16);
468		}
469	} else {
470		sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_DECRYPT;
471		key = ses->ses_dkey;
472		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
473			memcpy(sc->op_iv, crd->crd_iv, 16);
474		else {
475			if (crp->crp_flags & CRYPTO_F_IMBUF)
476				m_copydata((struct mbuf *)crp->crp_buf,
477				    crd->crd_inject, 16, sc->op_iv);
478			else if (crp->crp_flags & CRYPTO_F_IOV)
479				cuio_copydata((struct uio *)crp->crp_buf,
480				    crd->crd_inject, 16, sc->op_iv);
481			else
482				memcpy(sc->op_iv, (char *)crp->crp_buf +
483				    crd->crd_inject, 16);
484		}
485	}
486
487	if (crp->crp_flags & CRYPTO_F_IMBUF)
488		m_copydata((struct mbuf *)crp->crp_buf,
489		    crd->crd_skip, crd->crd_len, sc->op_buf);
490	else if (crp->crp_flags & CRYPTO_F_IOV)
491		cuio_copydata((struct uio *)crp->crp_buf,
492		    crd->crd_skip, crd->crd_len, sc->op_buf);
493	else
494		memcpy(sc->op_buf, (char *)crp->crp_buf + crd->crd_skip,
495		    crd->crd_len);
496
497	sc->op_cw[1] = sc->op_cw[2] = sc->op_cw[3] = 0;
498	via_padlock_cbc(&sc->op_cw, sc->op_buf, sc->op_buf, key,
499	    crd->crd_len / 16, sc->op_iv);
500
501	if (crp->crp_flags & CRYPTO_F_IMBUF)
502		m_copyback((struct mbuf *)crp->crp_buf,
503		    crd->crd_skip, crd->crd_len, sc->op_buf);
504	else if (crp->crp_flags & CRYPTO_F_IOV)
505		cuio_copyback((struct uio *)crp->crp_buf,
506		    crd->crd_skip, crd->crd_len, sc->op_buf);
507	else
508		memcpy((char *)crp->crp_buf + crd->crd_skip, sc->op_buf,
509		    crd->crd_len);
510
511	/* copy out last block for use as next session IV */
512	if (crd->crd_flags & CRD_F_ENCRYPT) {
513		if (crp->crp_flags & CRYPTO_F_IMBUF)
514			m_copydata((struct mbuf *)crp->crp_buf,
515			    crd->crd_skip + crd->crd_len - 16, 16,
516			    ses->ses_iv);
517		else if (crp->crp_flags & CRYPTO_F_IOV)
518			cuio_copydata((struct uio *)crp->crp_buf,
519			    crd->crd_skip + crd->crd_len - 16, 16,
520			    ses->ses_iv);
521		else
522			memcpy(ses->ses_iv, (char *)crp->crp_buf +
523			    crd->crd_skip + crd->crd_len - 16, 16);
524	}
525
526	if (sc->op_buf != NULL) {
527		memset(sc->op_buf, 0, crd->crd_len);
528		free(sc->op_buf, M_DEVBUF);
529		sc->op_buf = NULL;
530	}
531
532	return (err);
533}
534
535int
536via_padlock_crypto_process(void *arg, struct cryptop *crp, int hint)
537{
538	struct via_padlock_softc *sc = arg;
539	struct via_padlock_session *ses;
540	struct cryptodesc *crd;
541	int sesn, err = 0;
542
543	KASSERT(sc != NULL /*, ("via_padlock_crypto_process: null softc")*/);
544	if (crp == NULL || crp->crp_callback == NULL) {
545		err = EINVAL;
546		goto out;
547	}
548
549	sesn = VIAC3_SESSION(crp->crp_sid);
550	if (sesn >= sc->sc_nsessions) {
551		err = EINVAL;
552		goto out;
553	}
554	ses = &sc->sc_sessions[sesn];
555
556	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
557		switch (crd->crd_alg) {
558		case CRYPTO_AES_CBC:
559			if ((err = via_padlock_crypto_encdec(crp, crd, ses,
560			    sc, crp->crp_buf)) != 0)
561				goto out;
562			break;
563
564		case CRYPTO_MD5_HMAC:
565		case CRYPTO_SHA1_HMAC:
566		case CRYPTO_RIPEMD160_HMAC:
567		case CRYPTO_SHA2_HMAC:
568			if ((err = via_padlock_crypto_swauth(crp, crd,
569			    ses->swd, crp->crp_buf)) != 0)
570				goto out;
571			break;
572
573		default:
574			err = EINVAL;
575			goto out;
576		}
577	}
578out:
579	crp->crp_etype = err;
580	crypto_done(crp);
581	return (err);
582}
583
584static int
585via_padlock_match(device_t parent, cfdata_t cf, void *opaque)
586{
587	struct cpufeature_attach_args *cfaa = opaque;
588	struct cpu_info *ci = cfaa->ci;
589
590	if (strcmp(cfaa->name, "padlock") != 0)
591		return 0;
592	if ((cpu_feature[4] & (CPUID_VIA_HAS_ACE|CPUID_VIA_HAS_RNG)) == 0)
593		return 0;
594	if ((ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) == 0)
595		return 0;
596	return 1;
597}
598
599static void
600via_padlock_attach(device_t parent, device_t self, void *opaque)
601{
602	struct via_padlock_softc *sc = device_private(self);
603
604	sc->sc_dev = self;
605
606	aprint_naive("\n");
607	aprint_normal(": VIA PadLock\n");
608
609	pmf_device_register(self, NULL, NULL);
610
611	config_interrupts(self, via_padlock_attach_intr);
612}
613
614static void
615via_padlock_attach_intr(device_t self)
616{
617	struct via_padlock_softc *sc = device_private(self);
618
619	aprint_normal("%s:", device_xname(self));
620	if (cpu_feature[4] & CPUID_VIA_HAS_RNG) {
621		via_c3_rnd_init(sc);
622		aprint_normal(" RNG");
623	}
624	if (cpu_feature[4] & CPUID_VIA_HAS_ACE) {
625		via_c3_ace_init(sc);
626		aprint_normal(" ACE");
627	}
628	aprint_normal("\n");
629}
630
631static int
632via_padlock_detach(device_t self, int flags)
633{
634	struct via_padlock_softc *sc = device_private(self);
635
636	if (sc->sc_rnd_attached) {
637		callout_stop(&sc->sc_rnd_co);
638		callout_destroy(&sc->sc_rnd_co);
639		rnd_detach_source(&sc->sc_rnd_source);
640		sc->sc_rnd_attached = false;
641	}
642	if (sc->sc_cid_attached) {
643		crypto_unregister(sc->sc_cid, CRYPTO_AES_CBC);
644		crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC_96);
645		crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC);
646		crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC_96);
647		crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC);
648		crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC_96);
649		crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC);
650		crypto_unregister(sc->sc_cid, CRYPTO_SHA2_HMAC);
651		sc->sc_cid_attached = false;
652	}
653
654	pmf_device_deregister(self);
655
656	return 0;
657}
658
659MODULE(MODULE_CLASS_DRIVER, padlock, NULL);
660
661#ifdef _MODULE
662#include "ioconf.c"
663#endif
664
665static int
666padlock_modcmd(modcmd_t cmd, void *opaque)
667{
668	int error = 0;
669
670	switch (cmd) {
671	case MODULE_CMD_INIT:
672#ifdef _MODULE
673		error = config_init_component(cfdriver_ioconf_padlock,
674		    cfattach_ioconf_padlock, cfdata_ioconf_padlock);
675#endif
676		return error;
677	case MODULE_CMD_FINI:
678#ifdef _MODULE
679		error = config_fini_component(cfdriver_ioconf_padlock,
680		    cfattach_ioconf_padlock, cfdata_ioconf_padlock);
681#endif
682		return error;
683	default:
684		return ENOTTY;
685	}
686}
687