t4_crypto.c revision 318429
1318429Sjhb/*-
2318429Sjhb * Copyright (c) 2017 Chelsio Communications, Inc.
3318429Sjhb * All rights reserved.
4318429Sjhb * Written by: John Baldwin <jhb@FreeBSD.org>
5318429Sjhb *
6318429Sjhb * Redistribution and use in source and binary forms, with or without
7318429Sjhb * modification, are permitted provided that the following conditions
8318429Sjhb * are met:
9318429Sjhb * 1. Redistributions of source code must retain the above copyright
10318429Sjhb *    notice, this list of conditions and the following disclaimer.
11318429Sjhb * 2. Redistributions in binary form must reproduce the above copyright
12318429Sjhb *    notice, this list of conditions and the following disclaimer in the
13318429Sjhb *    documentation and/or other materials provided with the distribution.
14318429Sjhb *
15318429Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16318429Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17318429Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18318429Sjhb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19318429Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20318429Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21318429Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22318429Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23318429Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24318429Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25318429Sjhb * SUCH DAMAGE.
26318429Sjhb */
27318429Sjhb
28318429Sjhb#include <sys/cdefs.h>
29318429Sjhb__FBSDID("$FreeBSD: head/sys/dev/cxgbe/crypto/t4_crypto.c 318429 2017-05-17 22:13:07Z jhb $");
30318429Sjhb
31318429Sjhb#include <sys/types.h>
32318429Sjhb#include <sys/bus.h>
33318429Sjhb#include <sys/lock.h>
34318429Sjhb#include <sys/malloc.h>
35318429Sjhb#include <sys/mutex.h>
36318429Sjhb#include <sys/module.h>
37318429Sjhb#include <sys/sglist.h>
38318429Sjhb
39318429Sjhb#include <opencrypto/cryptodev.h>
40318429Sjhb#include <opencrypto/xform.h>
41318429Sjhb
42318429Sjhb#include "cryptodev_if.h"
43318429Sjhb
44318429Sjhb#include "common/common.h"
45318429Sjhb#include "crypto/t4_crypto.h"
46318429Sjhb
47318429Sjhb/*
48318429Sjhb * Requests consist of:
49318429Sjhb *
50318429Sjhb * +-------------------------------+
51318429Sjhb * | struct fw_crypto_lookaside_wr |
52318429Sjhb * +-------------------------------+
53318429Sjhb * | struct ulp_txpkt              |
54318429Sjhb * +-------------------------------+
55318429Sjhb * | struct ulptx_idata            |
56318429Sjhb * +-------------------------------+
57318429Sjhb * | struct cpl_tx_sec_pdu         |
58318429Sjhb * +-------------------------------+
59318429Sjhb * | struct cpl_tls_tx_scmd_fmt    |
60318429Sjhb * +-------------------------------+
61318429Sjhb * | key context header            |
62318429Sjhb * +-------------------------------+
63318429Sjhb * | AES key                       |  ----- For requests with AES
64318429Sjhb * +-------------------------------+ -
65318429Sjhb * | IPAD (16-byte aligned)        |  \
66318429Sjhb * +-------------------------------+  +---- For requests with HMAC
67318429Sjhb * | OPAD (16-byte aligned)        |  /
68318429Sjhb * +-------------------------------+ -
69318429Sjhb * | GMAC H                        |  ----- For AES-GCM
70318429Sjhb * +-------------------------------+ -
71318429Sjhb * | struct cpl_rx_phys_dsgl       |  \
72318429Sjhb * +-------------------------------+  +---- Destination buffer for
73318429Sjhb * | PHYS_DSGL entries             |  /     non-hash-only requests
74318429Sjhb * +-------------------------------+ -
75318429Sjhb * | 16 dummy bytes                |  ----- Only for hash-only requests
76318429Sjhb * +-------------------------------+
77318429Sjhb * | IV                            |  ----- If immediate IV
78318429Sjhb * +-------------------------------+
79318429Sjhb * | Payload                       |  ----- If immediate Payload
80318429Sjhb * +-------------------------------+ -
81318429Sjhb * | struct ulptx_sgl              |  \
82318429Sjhb * +-------------------------------+  +---- If payload via SGL
83318429Sjhb * | SGL entries                   |  /
84318429Sjhb * +-------------------------------+ -
85318429Sjhb *
86318429Sjhb * Note that the key context must be padded to ensure 16-byte alignment.
87318429Sjhb * For HMAC requests, the key consists of the partial hash of the IPAD
88318429Sjhb * followed by the partial hash of the OPAD.
89318429Sjhb *
90318429Sjhb * Replies consist of:
91318429Sjhb *
92318429Sjhb * +-------------------------------+
93318429Sjhb * | struct cpl_fw6_pld            |
94318429Sjhb * +-------------------------------+
95318429Sjhb * | hash digest                   |  ----- For HMAC request with
96318429Sjhb * +-------------------------------+        'hash_size' set in work request
97318429Sjhb *
98318429Sjhb * A 32-bit big-endian error status word is supplied in the last 4
99318429Sjhb * bytes of data[0] in the CPL_FW6_PLD message.  bit 0 indicates a
100318429Sjhb * "MAC" error and bit 1 indicates a "PAD" error.
101318429Sjhb *
102318429Sjhb * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message
103318429Sjhb * in the request is returned in data[1] of the CPL_FW6_PLD message.
104318429Sjhb *
105318429Sjhb * For block cipher replies, the updated IV is supplied in data[2] and
106318429Sjhb * data[3] of the CPL_FW6_PLD message.
107318429Sjhb *
108318429Sjhb * For hash replies where the work request set 'hash_size' to request
109318429Sjhb * a copy of the hash in the reply, the hash digest is supplied
110318429Sjhb * immediately following the CPL_FW6_PLD message.
111318429Sjhb */
112318429Sjhb
113318429Sjhb/*
114318429Sjhb * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32
115318429Sjhb * SG entries.
116318429Sjhb */
117318429Sjhb#define	MAX_RX_PHYS_DSGL_SGE	32
118318429Sjhb#define	DSGL_SGE_MAXLEN		65535
119318429Sjhb
120318429Sjhbstatic MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto");
121318429Sjhb
122318429Sjhbstruct ccr_session_hmac {
123318429Sjhb	struct auth_hash *auth_hash;
124318429Sjhb	int hash_len;
125318429Sjhb	unsigned int partial_digest_len;
126318429Sjhb	unsigned int auth_mode;
127318429Sjhb	unsigned int mk_size;
128318429Sjhb	char ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
129318429Sjhb	char opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
130318429Sjhb};
131318429Sjhb
132318429Sjhbstruct ccr_session_gmac {
133318429Sjhb	int hash_len;
134318429Sjhb	char ghash_h[GMAC_BLOCK_LEN];
135318429Sjhb};
136318429Sjhb
137318429Sjhbstruct ccr_session_blkcipher {
138318429Sjhb	unsigned int cipher_mode;
139318429Sjhb	unsigned int key_len;
140318429Sjhb	unsigned int iv_len;
141318429Sjhb	__be32 key_ctx_hdr;
142318429Sjhb	char enckey[CHCR_AES_MAX_KEY_LEN];
143318429Sjhb	char deckey[CHCR_AES_MAX_KEY_LEN];
144318429Sjhb};
145318429Sjhb
146318429Sjhbstruct ccr_session {
147318429Sjhb	bool active;
148318429Sjhb	int pending;
149318429Sjhb	enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode;
150318429Sjhb	union {
151318429Sjhb		struct ccr_session_hmac hmac;
152318429Sjhb		struct ccr_session_gmac gmac;
153318429Sjhb	};
154318429Sjhb	struct ccr_session_blkcipher blkcipher;
155318429Sjhb};
156318429Sjhb
157318429Sjhbstruct ccr_softc {
158318429Sjhb	struct adapter *adapter;
159318429Sjhb	device_t dev;
160318429Sjhb	uint32_t cid;
161318429Sjhb	int tx_channel_id;
162318429Sjhb	struct ccr_session *sessions;
163318429Sjhb	int nsessions;
164318429Sjhb	struct mtx lock;
165318429Sjhb	bool detaching;
166318429Sjhb	struct sge_wrq *txq;
167318429Sjhb	struct sge_rxq *rxq;
168318429Sjhb
169318429Sjhb	/*
170318429Sjhb	 * Pre-allocate S/G lists used when preparing a work request.
171318429Sjhb	 * 'sg_crp' contains an sglist describing the entire buffer
172318429Sjhb	 * for a 'struct cryptop'.  'sg_ulptx' is used to describe
173318429Sjhb	 * the data the engine should DMA as input via ULPTX_SGL.
174318429Sjhb	 * 'sg_dsgl' is used to describe the destination that cipher
175318429Sjhb	 * text and a tag should be written to.
176318429Sjhb	 */
177318429Sjhb	struct sglist *sg_crp;
178318429Sjhb	struct sglist *sg_ulptx;
179318429Sjhb	struct sglist *sg_dsgl;
180318429Sjhb
181318429Sjhb	/* Statistics. */
182318429Sjhb	uint64_t stats_blkcipher_encrypt;
183318429Sjhb	uint64_t stats_blkcipher_decrypt;
184318429Sjhb	uint64_t stats_hmac;
185318429Sjhb	uint64_t stats_authenc_encrypt;
186318429Sjhb	uint64_t stats_authenc_decrypt;
187318429Sjhb	uint64_t stats_gcm_encrypt;
188318429Sjhb	uint64_t stats_gcm_decrypt;
189318429Sjhb	uint64_t stats_wr_nomem;
190318429Sjhb	uint64_t stats_inflight;
191318429Sjhb	uint64_t stats_mac_error;
192318429Sjhb	uint64_t stats_pad_error;
193318429Sjhb	uint64_t stats_bad_session;
194318429Sjhb	uint64_t stats_sglist_error;
195318429Sjhb	uint64_t stats_process_error;
196318429Sjhb};
197318429Sjhb
198318429Sjhb/*
199318429Sjhb * Crypto requests involve two kind of scatter/gather lists.
200318429Sjhb *
201318429Sjhb * Non-hash-only requests require a PHYS_DSGL that describes the
202318429Sjhb * location to store the results of the encryption or decryption
203318429Sjhb * operation.  This SGL uses a different format (PHYS_DSGL) and should
204318429Sjhb * exclude the crd_skip bytes at the start of the data as well as
205318429Sjhb * any AAD or IV.  For authenticated encryption requests it should
206318429Sjhb * cover include the destination of the hash or tag.
207318429Sjhb *
208318429Sjhb * The input payload may either be supplied inline as immediate data,
209318429Sjhb * or via a standard ULP_TX SGL.  This SGL should include AAD,
210318429Sjhb * ciphertext, and the hash or tag for authenticated decryption
211318429Sjhb * requests.
212318429Sjhb *
213318429Sjhb * These scatter/gather lists can describe different subsets of the
214318429Sjhb * buffer described by the crypto operation.  ccr_populate_sglist()
215318429Sjhb * generates a scatter/gather list that covers the entire crypto
216318429Sjhb * operation buffer that is then used to construct the other
217318429Sjhb * scatter/gather lists.
218318429Sjhb */
219318429Sjhbstatic int
220318429Sjhbccr_populate_sglist(struct sglist *sg, struct cryptop *crp)
221318429Sjhb{
222318429Sjhb	int error;
223318429Sjhb
224318429Sjhb	sglist_reset(sg);
225318429Sjhb	if (crp->crp_flags & CRYPTO_F_IMBUF)
226318429Sjhb		error = sglist_append_mbuf(sg, (struct mbuf *)crp->crp_buf);
227318429Sjhb	else if (crp->crp_flags & CRYPTO_F_IOV)
228318429Sjhb		error = sglist_append_uio(sg, (struct uio *)crp->crp_buf);
229318429Sjhb	else
230318429Sjhb		error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
231318429Sjhb	return (error);
232318429Sjhb}
233318429Sjhb
234318429Sjhb/*
235318429Sjhb * Segments in 'sg' larger than 'maxsegsize' are counted as multiple
236318429Sjhb * segments.
237318429Sjhb */
238318429Sjhbstatic int
239318429Sjhbccr_count_sgl(struct sglist *sg, int maxsegsize)
240318429Sjhb{
241318429Sjhb	int i, nsegs;
242318429Sjhb
243318429Sjhb	nsegs = 0;
244318429Sjhb	for (i = 0; i < sg->sg_nseg; i++)
245318429Sjhb		nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize);
246318429Sjhb	return (nsegs);
247318429Sjhb}
248318429Sjhb
249318429Sjhb/* These functions deal with PHYS_DSGL for the reply buffer. */
250318429Sjhbstatic inline int
251318429Sjhbccr_phys_dsgl_len(int nsegs)
252318429Sjhb{
253318429Sjhb	int len;
254318429Sjhb
255318429Sjhb	len = (nsegs / 8) * sizeof(struct phys_sge_pairs);
256318429Sjhb	if ((nsegs % 8) != 0) {
257318429Sjhb		len += sizeof(uint16_t) * 8;
258318429Sjhb		len += roundup2(nsegs % 8, 2) * sizeof(uint64_t);
259318429Sjhb	}
260318429Sjhb	return (len);
261318429Sjhb}
262318429Sjhb
263318429Sjhbstatic void
264318429Sjhbccr_write_phys_dsgl(struct ccr_softc *sc, void *dst, int nsegs)
265318429Sjhb{
266318429Sjhb	struct sglist *sg;
267318429Sjhb	struct cpl_rx_phys_dsgl *cpl;
268318429Sjhb	struct phys_sge_pairs *sgl;
269318429Sjhb	vm_paddr_t paddr;
270318429Sjhb	size_t seglen;
271318429Sjhb	u_int i, j;
272318429Sjhb
273318429Sjhb	sg = sc->sg_dsgl;
274318429Sjhb	cpl = dst;
275318429Sjhb	cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) |
276318429Sjhb	    V_CPL_RX_PHYS_DSGL_ISRDMA(0));
277318429Sjhb	cpl->pcirlxorder_to_noofsgentr = htobe32(
278318429Sjhb	    V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) |
279318429Sjhb	    V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) |
280318429Sjhb	    V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) |
281318429Sjhb	    V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs));
282318429Sjhb	cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
283318429Sjhb	cpl->rss_hdr_int.qid = htobe16(sc->rxq->iq.abs_id);
284318429Sjhb	cpl->rss_hdr_int.hash_val = 0;
285318429Sjhb	sgl = (struct phys_sge_pairs *)(cpl + 1);
286318429Sjhb	j = 0;
287318429Sjhb	for (i = 0; i < sg->sg_nseg; i++) {
288318429Sjhb		seglen = sg->sg_segs[i].ss_len;
289318429Sjhb		paddr = sg->sg_segs[i].ss_paddr;
290318429Sjhb		do {
291318429Sjhb			sgl->addr[j] = htobe64(paddr);
292318429Sjhb			if (seglen > DSGL_SGE_MAXLEN) {
293318429Sjhb				sgl->len[j] = htobe16(DSGL_SGE_MAXLEN);
294318429Sjhb				paddr += DSGL_SGE_MAXLEN;
295318429Sjhb				seglen -= DSGL_SGE_MAXLEN;
296318429Sjhb			} else {
297318429Sjhb				sgl->len[j] = htobe16(seglen);
298318429Sjhb				seglen = 0;
299318429Sjhb			}
300318429Sjhb			j++;
301318429Sjhb			if (j == 8) {
302318429Sjhb				sgl++;
303318429Sjhb				j = 0;
304318429Sjhb			}
305318429Sjhb		} while (seglen != 0);
306318429Sjhb	}
307318429Sjhb	MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs);
308318429Sjhb}
309318429Sjhb
310318429Sjhb/* These functions deal with the ULPTX_SGL for input payload. */
311318429Sjhbstatic inline int
312318429Sjhbccr_ulptx_sgl_len(int nsegs)
313318429Sjhb{
314318429Sjhb	u_int n;
315318429Sjhb
316318429Sjhb	nsegs--; /* first segment is part of ulptx_sgl */
317318429Sjhb	n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
318318429Sjhb	return (roundup2(n, 16));
319318429Sjhb}
320318429Sjhb
321318429Sjhbstatic void
322318429Sjhbccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs)
323318429Sjhb{
324318429Sjhb	struct ulptx_sgl *usgl;
325318429Sjhb	struct sglist *sg;
326318429Sjhb	struct sglist_seg *ss;
327318429Sjhb	int i;
328318429Sjhb
329318429Sjhb	sg = sc->sg_ulptx;
330318429Sjhb	MPASS(nsegs == sg->sg_nseg);
331318429Sjhb	ss = &sg->sg_segs[0];
332318429Sjhb	usgl = dst;
333318429Sjhb	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
334318429Sjhb	    V_ULPTX_NSGE(nsegs));
335318429Sjhb	usgl->len0 = htobe32(ss->ss_len);
336318429Sjhb	usgl->addr0 = htobe64(ss->ss_paddr);
337318429Sjhb	ss++;
338318429Sjhb	for (i = 0; i < sg->sg_nseg - 1; i++) {
339318429Sjhb		usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len);
340318429Sjhb		usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr);
341318429Sjhb		ss++;
342318429Sjhb	}
343318429Sjhb
344318429Sjhb}
345318429Sjhb
346318429Sjhbstatic bool
347318429Sjhbccr_use_imm_data(u_int transhdr_len, u_int input_len)
348318429Sjhb{
349318429Sjhb
350318429Sjhb	if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN)
351318429Sjhb		return (false);
352318429Sjhb	if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) >
353318429Sjhb	    SGE_MAX_WR_LEN)
354318429Sjhb		return (false);
355318429Sjhb	return (true);
356318429Sjhb}
357318429Sjhb
358318429Sjhbstatic void
359318429Sjhbccr_populate_wreq(struct ccr_softc *sc, struct chcr_wr *crwr, u_int kctx_len,
360318429Sjhb    u_int wr_len, uint32_t sid, u_int imm_len, u_int sgl_len, u_int hash_size,
361318429Sjhb    u_int iv_loc, struct cryptop *crp)
362318429Sjhb{
363318429Sjhb	u_int cctx_size;
364318429Sjhb
365318429Sjhb	cctx_size = sizeof(struct _key_ctx) + kctx_len;
366318429Sjhb	crwr->wreq.op_to_cctx_size = htobe32(
367318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) |
368318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) |
369318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) |
370318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) |
371318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4));
372318429Sjhb	crwr->wreq.len16_pkd = htobe32(
373318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16));
374318429Sjhb	crwr->wreq.session_id = htobe32(sid);
375318429Sjhb	crwr->wreq.rx_chid_to_rx_q_id = htobe32(
376318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(sc->tx_channel_id) |
377318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) |
378318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) |
379318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_IV(iv_loc) |
380318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) |
381318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) |
382318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(sc->rxq->iq.abs_id));
383318429Sjhb	crwr->wreq.key_addr = 0;
384318429Sjhb	crwr->wreq.pld_size_hash_size = htobe32(
385318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) |
386318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size));
387318429Sjhb	crwr->wreq.cookie = htobe64((uintptr_t)crp);
388318429Sjhb
389318429Sjhb	crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
390318429Sjhb	    V_ULP_TXPKT_DATAMODIFY(0) |
391318429Sjhb	    V_ULP_TXPKT_CHANNELID(sc->tx_channel_id) | V_ULP_TXPKT_DEST(0) |
392318429Sjhb	    V_ULP_TXPKT_FID(0) | V_ULP_TXPKT_RO(1));
393318429Sjhb	crwr->ulptx.len = htobe32(
394318429Sjhb	    ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
395318429Sjhb
396318429Sjhb	crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
397318429Sjhb	    V_ULP_TX_SC_MORE(imm_len != 0 ? 0 : 1));
398318429Sjhb	crwr->sc_imm.len = htobe32(wr_len - offsetof(struct chcr_wr, sec_cpl) -
399318429Sjhb	    sgl_len);
400318429Sjhb}
401318429Sjhb
402318429Sjhbstatic int
403318429Sjhbccr_hmac(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
404318429Sjhb    struct cryptop *crp)
405318429Sjhb{
406318429Sjhb	struct chcr_wr *crwr;
407318429Sjhb	struct wrqe *wr;
408318429Sjhb	struct auth_hash *axf;
409318429Sjhb	struct cryptodesc *crd;
410318429Sjhb	char *dst;
411318429Sjhb	u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len;
412318429Sjhb	u_int imm_len, iopad_size;
413318429Sjhb	int error, sgl_nsegs, sgl_len;
414318429Sjhb
415318429Sjhb	axf = s->hmac.auth_hash;
416318429Sjhb
417318429Sjhb	/* PADs must be 128-bit aligned. */
418318429Sjhb	iopad_size = roundup2(s->hmac.partial_digest_len, 16);
419318429Sjhb
420318429Sjhb	/*
421318429Sjhb	 * The 'key' part of the context includes the aligned IPAD and
422318429Sjhb	 * OPAD.
423318429Sjhb	 */
424318429Sjhb	kctx_len = iopad_size * 2;
425318429Sjhb	hash_size_in_response = axf->hashsize;
426318429Sjhb	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
427318429Sjhb
428318429Sjhb	crd = crp->crp_desc;
429318429Sjhb	if (ccr_use_imm_data(transhdr_len, crd->crd_len)) {
430318429Sjhb		imm_len = crd->crd_len;
431318429Sjhb		sgl_nsegs = 0;
432318429Sjhb		sgl_len = 0;
433318429Sjhb	} else {
434318429Sjhb		imm_len = 0;
435318429Sjhb		sglist_reset(sc->sg_ulptx);
436318429Sjhb		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
437318429Sjhb		    crd->crd_skip, crd->crd_len);
438318429Sjhb		if (error)
439318429Sjhb			return (error);
440318429Sjhb		sgl_nsegs = sc->sg_ulptx->sg_nseg;
441318429Sjhb		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
442318429Sjhb	}
443318429Sjhb
444318429Sjhb	wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
445318429Sjhb	wr = alloc_wrqe(wr_len, sc->txq);
446318429Sjhb	if (wr == NULL) {
447318429Sjhb		sc->stats_wr_nomem++;
448318429Sjhb		return (ENOMEM);
449318429Sjhb	}
450318429Sjhb	crwr = wrtod(wr);
451318429Sjhb	memset(crwr, 0, wr_len);
452318429Sjhb
453318429Sjhb	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len,
454318429Sjhb	    hash_size_in_response, IV_NOP, crp);
455318429Sjhb
456318429Sjhb	/* XXX: Hardcodes SGE loopback channel of 0. */
457318429Sjhb	crwr->sec_cpl.op_ivinsrtofst = htobe32(
458318429Sjhb	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
459318429Sjhb	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
460318429Sjhb	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
461318429Sjhb	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
462318429Sjhb	    V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
463318429Sjhb
464318429Sjhb	crwr->sec_cpl.pldlen = htobe32(crd->crd_len);
465318429Sjhb
466318429Sjhb	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
467318429Sjhb	    V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0));
468318429Sjhb
469318429Sjhb	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
470318429Sjhb	crwr->sec_cpl.seqno_numivs = htobe32(
471318429Sjhb	    V_SCMD_SEQ_NO_CTRL(0) |
472318429Sjhb	    V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
473318429Sjhb	    V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_NOP) |
474318429Sjhb	    V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
475318429Sjhb	    V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NO_TRUNC));
476318429Sjhb	crwr->sec_cpl.ivgen_hdrlen = htobe32(
477318429Sjhb	    V_SCMD_LAST_FRAG(0) | V_SCMD_MORE_FRAGS(0) | V_SCMD_MAC_ONLY(1));
478318429Sjhb
479318429Sjhb	memcpy(crwr->key_ctx.key, s->hmac.ipad, s->hmac.partial_digest_len);
480318429Sjhb	memcpy(crwr->key_ctx.key + iopad_size, s->hmac.opad,
481318429Sjhb	    s->hmac.partial_digest_len);
482318429Sjhb
483318429Sjhb	/* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */
484318429Sjhb	kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
485318429Sjhb	crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
486318429Sjhb	    V_KEY_CONTEXT_OPAD_PRESENT(1) | V_KEY_CONTEXT_SALT_PRESENT(1) |
487318429Sjhb	    V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) |
488318429Sjhb	    V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1));
489318429Sjhb
490318429Sjhb	dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES;
491318429Sjhb	if (imm_len != 0)
492318429Sjhb		crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
493318429Sjhb		    crd->crd_len, dst);
494318429Sjhb	else
495318429Sjhb		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
496318429Sjhb
497318429Sjhb	/* XXX: TODO backpressure */
498318429Sjhb	t4_wrq_tx(sc->adapter, wr);
499318429Sjhb
500318429Sjhb	return (0);
501318429Sjhb}
502318429Sjhb
503318429Sjhbstatic int
504318429Sjhbccr_hmac_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
505318429Sjhb    const struct cpl_fw6_pld *cpl, int error)
506318429Sjhb{
507318429Sjhb	struct cryptodesc *crd;
508318429Sjhb
509318429Sjhb	crd = crp->crp_desc;
510318429Sjhb	if (error == 0) {
511318429Sjhb		crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
512318429Sjhb		    s->hmac.hash_len, (c_caddr_t)(cpl + 1));
513318429Sjhb	}
514318429Sjhb
515318429Sjhb	return (error);
516318429Sjhb}
517318429Sjhb
518318429Sjhbstatic int
519318429Sjhbccr_blkcipher(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
520318429Sjhb    struct cryptop *crp)
521318429Sjhb{
522318429Sjhb	char iv[CHCR_MAX_CRYPTO_IV_LEN];
523318429Sjhb	struct chcr_wr *crwr;
524318429Sjhb	struct wrqe *wr;
525318429Sjhb	struct cryptodesc *crd;
526318429Sjhb	char *dst;
527318429Sjhb	u_int iv_loc, kctx_len, key_half, op_type, transhdr_len, wr_len;
528318429Sjhb	u_int imm_len;
529318429Sjhb	int dsgl_nsegs, dsgl_len;
530318429Sjhb	int sgl_nsegs, sgl_len;
531318429Sjhb	int error;
532318429Sjhb
533318429Sjhb	crd = crp->crp_desc;
534318429Sjhb
535318429Sjhb	if (s->blkcipher.key_len == 0)
536318429Sjhb		return (EINVAL);
537318429Sjhb	if (crd->crd_alg == CRYPTO_AES_CBC &&
538318429Sjhb	    (crd->crd_len % AES_BLOCK_LEN) != 0)
539318429Sjhb		return (EINVAL);
540318429Sjhb
541318429Sjhb	iv_loc = IV_NOP;
542318429Sjhb	if (crd->crd_flags & CRD_F_ENCRYPT) {
543318429Sjhb		op_type = CHCR_ENCRYPT_OP;
544318429Sjhb		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
545318429Sjhb			memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
546318429Sjhb		else
547318429Sjhb			arc4rand(iv, s->blkcipher.iv_len, 0);
548318429Sjhb		iv_loc = IV_IMMEDIATE;
549318429Sjhb		if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0)
550318429Sjhb			crypto_copyback(crp->crp_flags, crp->crp_buf,
551318429Sjhb			    crd->crd_inject, s->blkcipher.iv_len, iv);
552318429Sjhb	} else {
553318429Sjhb		op_type = CHCR_DECRYPT_OP;
554318429Sjhb		if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
555318429Sjhb			memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
556318429Sjhb			iv_loc = IV_IMMEDIATE;
557318429Sjhb		} else
558318429Sjhb			iv_loc = IV_DSGL;
559318429Sjhb	}
560318429Sjhb
561318429Sjhb	sglist_reset(sc->sg_dsgl);
562318429Sjhb	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crd->crd_skip,
563318429Sjhb	    crd->crd_len);
564318429Sjhb	if (error)
565318429Sjhb		return (error);
566318429Sjhb	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
567318429Sjhb	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
568318429Sjhb		return (EFBIG);
569318429Sjhb	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
570318429Sjhb
571318429Sjhb	/* The 'key' must be 128-bit aligned. */
572318429Sjhb	kctx_len = roundup2(s->blkcipher.key_len, 16);
573318429Sjhb	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
574318429Sjhb
575318429Sjhb	if (ccr_use_imm_data(transhdr_len, crd->crd_len +
576318429Sjhb	    s->blkcipher.iv_len)) {
577318429Sjhb		imm_len = crd->crd_len;
578318429Sjhb		if (iv_loc == IV_DSGL) {
579318429Sjhb			crypto_copydata(crp->crp_flags, crp->crp_buf,
580318429Sjhb			    crd->crd_inject, s->blkcipher.iv_len, iv);
581318429Sjhb			iv_loc = IV_IMMEDIATE;
582318429Sjhb		}
583318429Sjhb		sgl_nsegs = 0;
584318429Sjhb		sgl_len = 0;
585318429Sjhb	} else {
586318429Sjhb		imm_len = 0;
587318429Sjhb		sglist_reset(sc->sg_ulptx);
588318429Sjhb		if (iv_loc == IV_DSGL) {
589318429Sjhb			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
590318429Sjhb			    crd->crd_inject, s->blkcipher.iv_len);
591318429Sjhb			if (error)
592318429Sjhb				return (error);
593318429Sjhb		}
594318429Sjhb		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
595318429Sjhb		    crd->crd_skip, crd->crd_len);
596318429Sjhb		if (error)
597318429Sjhb			return (error);
598318429Sjhb		sgl_nsegs = sc->sg_ulptx->sg_nseg;
599318429Sjhb		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
600318429Sjhb	}
601318429Sjhb
602318429Sjhb	wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
603318429Sjhb	if (iv_loc == IV_IMMEDIATE)
604318429Sjhb		wr_len += s->blkcipher.iv_len;
605318429Sjhb	wr = alloc_wrqe(wr_len, sc->txq);
606318429Sjhb	if (wr == NULL) {
607318429Sjhb		sc->stats_wr_nomem++;
608318429Sjhb		return (ENOMEM);
609318429Sjhb	}
610318429Sjhb	crwr = wrtod(wr);
611318429Sjhb	memset(crwr, 0, wr_len);
612318429Sjhb
613318429Sjhb	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, 0,
614318429Sjhb	    iv_loc, crp);
615318429Sjhb
616318429Sjhb	/* XXX: Hardcodes SGE loopback channel of 0. */
617318429Sjhb	crwr->sec_cpl.op_ivinsrtofst = htobe32(
618318429Sjhb	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
619318429Sjhb	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
620318429Sjhb	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
621318429Sjhb	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
622318429Sjhb	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
623318429Sjhb
624318429Sjhb	crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + crd->crd_len);
625318429Sjhb
626318429Sjhb	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
627318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTART(s->blkcipher.iv_len + 1) |
628318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
629318429Sjhb	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
630318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0));
631318429Sjhb
632318429Sjhb	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
633318429Sjhb	crwr->sec_cpl.seqno_numivs = htobe32(
634318429Sjhb	    V_SCMD_SEQ_NO_CTRL(0) |
635318429Sjhb	    V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
636318429Sjhb	    V_SCMD_ENC_DEC_CTRL(op_type) |
637318429Sjhb	    V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
638318429Sjhb	    V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_NOP) |
639318429Sjhb	    V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NOP) |
640318429Sjhb	    V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
641318429Sjhb	    V_SCMD_NUM_IVS(0));
642318429Sjhb	crwr->sec_cpl.ivgen_hdrlen = htobe32(
643318429Sjhb	    V_SCMD_IV_GEN_CTRL(0) |
644318429Sjhb	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
645318429Sjhb	    V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
646318429Sjhb
647318429Sjhb	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
648318429Sjhb	switch (crd->crd_alg) {
649318429Sjhb	case CRYPTO_AES_CBC:
650318429Sjhb		if (crd->crd_flags & CRD_F_ENCRYPT)
651318429Sjhb			memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
652318429Sjhb			    s->blkcipher.key_len);
653318429Sjhb		else
654318429Sjhb			memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
655318429Sjhb			    s->blkcipher.key_len);
656318429Sjhb		break;
657318429Sjhb	case CRYPTO_AES_ICM:
658318429Sjhb		memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
659318429Sjhb		    s->blkcipher.key_len);
660318429Sjhb		break;
661318429Sjhb	case CRYPTO_AES_XTS:
662318429Sjhb		key_half = s->blkcipher.key_len / 2;
663318429Sjhb		memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
664318429Sjhb		    key_half);
665318429Sjhb		if (crd->crd_flags & CRD_F_ENCRYPT)
666318429Sjhb			memcpy(crwr->key_ctx.key + key_half,
667318429Sjhb			    s->blkcipher.enckey, key_half);
668318429Sjhb		else
669318429Sjhb			memcpy(crwr->key_ctx.key + key_half,
670318429Sjhb			    s->blkcipher.deckey, key_half);
671318429Sjhb		break;
672318429Sjhb	}
673318429Sjhb
674318429Sjhb	dst = (char *)(crwr + 1) + kctx_len;
675318429Sjhb	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
676318429Sjhb	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
677318429Sjhb	if (iv_loc == IV_IMMEDIATE) {
678318429Sjhb		memcpy(dst, iv, s->blkcipher.iv_len);
679318429Sjhb		dst += s->blkcipher.iv_len;
680318429Sjhb	}
681318429Sjhb	if (imm_len != 0)
682318429Sjhb		crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
683318429Sjhb		    crd->crd_len, dst);
684318429Sjhb	else
685318429Sjhb		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
686318429Sjhb
687318429Sjhb	/* XXX: TODO backpressure */
688318429Sjhb	t4_wrq_tx(sc->adapter, wr);
689318429Sjhb
690318429Sjhb	return (0);
691318429Sjhb}
692318429Sjhb
693318429Sjhbstatic int
694318429Sjhbccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s,
695318429Sjhb    struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
696318429Sjhb{
697318429Sjhb
698318429Sjhb	/*
699318429Sjhb	 * The updated IV to permit chained requests is at
700318429Sjhb	 * cpl->data[2], but OCF doesn't permit chained requests.
701318429Sjhb	 */
702318429Sjhb	return (error);
703318429Sjhb}
704318429Sjhb
705318429Sjhb/*
706318429Sjhb * 'hashsize' is the length of a full digest.  'authsize' is the
707318429Sjhb * requested digest length for this operation which may be less
708318429Sjhb * than 'hashsize'.
709318429Sjhb */
710318429Sjhbstatic int
711318429Sjhbccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize)
712318429Sjhb{
713318429Sjhb
714318429Sjhb	if (authsize == 10)
715318429Sjhb		return (CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366);
716318429Sjhb	if (authsize == 12)
717318429Sjhb		return (CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT);
718318429Sjhb	if (authsize == hashsize / 2)
719318429Sjhb		return (CHCR_SCMD_HMAC_CTRL_DIV2);
720318429Sjhb	return (CHCR_SCMD_HMAC_CTRL_NO_TRUNC);
721318429Sjhb}
722318429Sjhb
723318429Sjhbstatic int
724318429Sjhbccr_authenc(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
725318429Sjhb    struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde)
726318429Sjhb{
727318429Sjhb	char iv[CHCR_MAX_CRYPTO_IV_LEN];
728318429Sjhb	struct chcr_wr *crwr;
729318429Sjhb	struct wrqe *wr;
730318429Sjhb	struct auth_hash *axf;
731318429Sjhb	char *dst;
732318429Sjhb	u_int iv_loc, kctx_len, key_half, op_type, transhdr_len, wr_len;
733318429Sjhb	u_int hash_size_in_response, imm_len, iopad_size;
734318429Sjhb	u_int aad_start, aad_len, aad_stop;
735318429Sjhb	u_int auth_start, auth_stop, auth_insert;
736318429Sjhb	u_int cipher_start, cipher_stop;
737318429Sjhb	u_int hmac_ctrl, input_len;
738318429Sjhb	int dsgl_nsegs, dsgl_len;
739318429Sjhb	int sgl_nsegs, sgl_len;
740318429Sjhb	int error;
741318429Sjhb
742318429Sjhb	if (s->blkcipher.key_len == 0)
743318429Sjhb		return (EINVAL);
744318429Sjhb	if (crde->crd_alg == CRYPTO_AES_CBC &&
745318429Sjhb	    (crde->crd_len % AES_BLOCK_LEN) != 0)
746318429Sjhb		return (EINVAL);
747318429Sjhb
748318429Sjhb	/*
749318429Sjhb	 * AAD is only permitted before the cipher/plain text, not
750318429Sjhb	 * after.
751318429Sjhb	 */
752318429Sjhb	if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
753318429Sjhb		return (EINVAL);
754318429Sjhb
755318429Sjhb	axf = s->hmac.auth_hash;
756318429Sjhb	hash_size_in_response = s->hmac.hash_len;
757318429Sjhb
758318429Sjhb	/*
759318429Sjhb	 * The IV is always stored at the start of the buffer even
760318429Sjhb	 * though it may be duplicated in the payload.  The crypto
761318429Sjhb	 * engine doesn't work properly if the IV offset points inside
762318429Sjhb	 * of the AAD region, so a second copy is always required.
763318429Sjhb	 */
764318429Sjhb	iv_loc = IV_IMMEDIATE;
765318429Sjhb	if (crde->crd_flags & CRD_F_ENCRYPT) {
766318429Sjhb		op_type = CHCR_ENCRYPT_OP;
767318429Sjhb		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
768318429Sjhb			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
769318429Sjhb		else
770318429Sjhb			arc4rand(iv, s->blkcipher.iv_len, 0);
771318429Sjhb		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
772318429Sjhb			crypto_copyback(crp->crp_flags, crp->crp_buf,
773318429Sjhb			    crde->crd_inject, s->blkcipher.iv_len, iv);
774318429Sjhb	} else {
775318429Sjhb		op_type = CHCR_DECRYPT_OP;
776318429Sjhb		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
777318429Sjhb			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
778318429Sjhb		else
779318429Sjhb			crypto_copydata(crp->crp_flags, crp->crp_buf,
780318429Sjhb			    crde->crd_inject, s->blkcipher.iv_len, iv);
781318429Sjhb	}
782318429Sjhb
783318429Sjhb	/*
784318429Sjhb	 * The output buffer consists of the cipher text followed by
785318429Sjhb	 * the hash when encrypting.  For decryption it only contains
786318429Sjhb	 * the plain text.
787318429Sjhb	 */
788318429Sjhb	sglist_reset(sc->sg_dsgl);
789318429Sjhb	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
790318429Sjhb	    crde->crd_len);
791318429Sjhb	if (error)
792318429Sjhb		return (error);
793318429Sjhb	if (op_type == CHCR_ENCRYPT_OP) {
794318429Sjhb		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
795318429Sjhb		    crda->crd_inject, hash_size_in_response);
796318429Sjhb		if (error)
797318429Sjhb			return (error);
798318429Sjhb	}
799318429Sjhb	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
800318429Sjhb	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
801318429Sjhb		return (EFBIG);
802318429Sjhb	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
803318429Sjhb
804318429Sjhb	/* PADs must be 128-bit aligned. */
805318429Sjhb	iopad_size = roundup2(s->hmac.partial_digest_len, 16);
806318429Sjhb
807318429Sjhb	/*
808318429Sjhb	 * The 'key' part of the key context consists of the key followed
809318429Sjhb	 * by the IPAD and OPAD.
810318429Sjhb	 */
811318429Sjhb	kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2;
812318429Sjhb	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
813318429Sjhb
814318429Sjhb	/*
815318429Sjhb	 * The input buffer consists of the IV, any AAD, and then the
816318429Sjhb	 * cipher/plain text.  For decryption requests the hash is
817318429Sjhb	 * appended after the cipher text.
818318429Sjhb	 */
819318429Sjhb	if (crda->crd_skip < crde->crd_skip) {
820318429Sjhb		if (crda->crd_skip + crda->crd_len > crde->crd_skip)
821318429Sjhb			aad_len = (crde->crd_skip - crda->crd_skip);
822318429Sjhb		else
823318429Sjhb			aad_len = crda->crd_len;
824318429Sjhb	} else
825318429Sjhb		aad_len = 0;
826318429Sjhb	input_len = aad_len + crde->crd_len;
827318429Sjhb	if (op_type == CHCR_DECRYPT_OP)
828318429Sjhb		input_len += hash_size_in_response;
829318429Sjhb	if (ccr_use_imm_data(transhdr_len, s->blkcipher.iv_len + input_len)) {
830318429Sjhb		imm_len = input_len;
831318429Sjhb		sgl_nsegs = 0;
832318429Sjhb		sgl_len = 0;
833318429Sjhb	} else {
834318429Sjhb		imm_len = 0;
835318429Sjhb		sglist_reset(sc->sg_ulptx);
836318429Sjhb		if (aad_len != 0) {
837318429Sjhb			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
838318429Sjhb			    crda->crd_skip, aad_len);
839318429Sjhb			if (error)
840318429Sjhb				return (error);
841318429Sjhb		}
842318429Sjhb		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
843318429Sjhb		    crde->crd_skip, crde->crd_len);
844318429Sjhb		if (error)
845318429Sjhb			return (error);
846318429Sjhb		if (op_type == CHCR_DECRYPT_OP) {
847318429Sjhb			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
848318429Sjhb			    crda->crd_inject, hash_size_in_response);
849318429Sjhb			if (error)
850318429Sjhb				return (error);
851318429Sjhb		}
852318429Sjhb		sgl_nsegs = sc->sg_ulptx->sg_nseg;
853318429Sjhb		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
854318429Sjhb	}
855318429Sjhb
856318429Sjhb	/*
857318429Sjhb	 * Any auth-only data before the cipher region is marked as AAD.
858318429Sjhb	 * Auth-data that overlaps with the cipher region is placed in
859318429Sjhb	 * the auth section.
860318429Sjhb	 */
861318429Sjhb	if (aad_len != 0) {
862318429Sjhb		aad_start = s->blkcipher.iv_len + 1;
863318429Sjhb		aad_stop = aad_start + aad_len - 1;
864318429Sjhb	} else {
865318429Sjhb		aad_start = 0;
866318429Sjhb		aad_stop = 0;
867318429Sjhb	}
868318429Sjhb	cipher_start = s->blkcipher.iv_len + aad_len + 1;
869318429Sjhb	if (op_type == CHCR_DECRYPT_OP)
870318429Sjhb		cipher_stop = hash_size_in_response;
871318429Sjhb	else
872318429Sjhb		cipher_stop = 0;
873318429Sjhb	if (aad_len == crda->crd_len) {
874318429Sjhb		auth_start = 0;
875318429Sjhb		auth_stop = 0;
876318429Sjhb	} else {
877318429Sjhb		if (aad_len != 0)
878318429Sjhb			auth_start = cipher_start;
879318429Sjhb		else
880318429Sjhb			auth_start = s->blkcipher.iv_len + crda->crd_skip -
881318429Sjhb			    crde->crd_skip + 1;
882318429Sjhb		auth_stop = (crde->crd_skip + crde->crd_len) -
883318429Sjhb		    (crda->crd_skip + crda->crd_len) + cipher_stop;
884318429Sjhb	}
885318429Sjhb	if (op_type == CHCR_DECRYPT_OP)
886318429Sjhb		auth_insert = hash_size_in_response;
887318429Sjhb	else
888318429Sjhb		auth_insert = 0;
889318429Sjhb
890318429Sjhb	wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
891318429Sjhb	if (iv_loc == IV_IMMEDIATE)
892318429Sjhb		wr_len += s->blkcipher.iv_len;
893318429Sjhb	wr = alloc_wrqe(wr_len, sc->txq);
894318429Sjhb	if (wr == NULL) {
895318429Sjhb		sc->stats_wr_nomem++;
896318429Sjhb		return (ENOMEM);
897318429Sjhb	}
898318429Sjhb	crwr = wrtod(wr);
899318429Sjhb	memset(crwr, 0, wr_len);
900318429Sjhb
901318429Sjhb	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len,
902318429Sjhb	    op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, iv_loc,
903318429Sjhb	    crp);
904318429Sjhb
905318429Sjhb	/* XXX: Hardcodes SGE loopback channel of 0. */
906318429Sjhb	crwr->sec_cpl.op_ivinsrtofst = htobe32(
907318429Sjhb	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
908318429Sjhb	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
909318429Sjhb	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
910318429Sjhb	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
911318429Sjhb	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
912318429Sjhb
913318429Sjhb	crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + input_len);
914318429Sjhb
915318429Sjhb	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
916318429Sjhb	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
917318429Sjhb	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
918318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
919318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
920318429Sjhb	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
921318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
922318429Sjhb	    V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
923318429Sjhb	    V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
924318429Sjhb	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
925318429Sjhb
926318429Sjhb	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
927318429Sjhb	hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response);
928318429Sjhb	crwr->sec_cpl.seqno_numivs = htobe32(
929318429Sjhb	    V_SCMD_SEQ_NO_CTRL(0) |
930318429Sjhb	    V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
931318429Sjhb	    V_SCMD_ENC_DEC_CTRL(op_type) |
932318429Sjhb	    V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
933318429Sjhb	    V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
934318429Sjhb	    V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
935318429Sjhb	    V_SCMD_HMAC_CTRL(hmac_ctrl) |
936318429Sjhb	    V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
937318429Sjhb	    V_SCMD_NUM_IVS(0));
938318429Sjhb	crwr->sec_cpl.ivgen_hdrlen = htobe32(
939318429Sjhb	    V_SCMD_IV_GEN_CTRL(0) |
940318429Sjhb	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
941318429Sjhb	    V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
942318429Sjhb
943318429Sjhb	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
944318429Sjhb	switch (crde->crd_alg) {
945318429Sjhb	case CRYPTO_AES_CBC:
946318429Sjhb		if (crde->crd_flags & CRD_F_ENCRYPT)
947318429Sjhb			memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
948318429Sjhb			    s->blkcipher.key_len);
949318429Sjhb		else
950318429Sjhb			memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
951318429Sjhb			    s->blkcipher.key_len);
952318429Sjhb		break;
953318429Sjhb	case CRYPTO_AES_ICM:
954318429Sjhb		memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
955318429Sjhb		    s->blkcipher.key_len);
956318429Sjhb		break;
957318429Sjhb	case CRYPTO_AES_XTS:
958318429Sjhb		key_half = s->blkcipher.key_len / 2;
959318429Sjhb		memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
960318429Sjhb		    key_half);
961318429Sjhb		if (crde->crd_flags & CRD_F_ENCRYPT)
962318429Sjhb			memcpy(crwr->key_ctx.key + key_half,
963318429Sjhb			    s->blkcipher.enckey, key_half);
964318429Sjhb		else
965318429Sjhb			memcpy(crwr->key_ctx.key + key_half,
966318429Sjhb			    s->blkcipher.deckey, key_half);
967318429Sjhb		break;
968318429Sjhb	}
969318429Sjhb
970318429Sjhb	dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
971318429Sjhb	memcpy(dst, s->hmac.ipad, s->hmac.partial_digest_len);
972318429Sjhb	memcpy(dst + iopad_size, s->hmac.opad, s->hmac.partial_digest_len);
973318429Sjhb
974318429Sjhb	dst = (char *)(crwr + 1) + kctx_len;
975318429Sjhb	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
976318429Sjhb	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
977318429Sjhb	if (iv_loc == IV_IMMEDIATE) {
978318429Sjhb		memcpy(dst, iv, s->blkcipher.iv_len);
979318429Sjhb		dst += s->blkcipher.iv_len;
980318429Sjhb	}
981318429Sjhb	if (imm_len != 0) {
982318429Sjhb		if (aad_len != 0) {
983318429Sjhb			crypto_copydata(crp->crp_flags, crp->crp_buf,
984318429Sjhb			    crda->crd_skip, aad_len, dst);
985318429Sjhb			dst += aad_len;
986318429Sjhb		}
987318429Sjhb		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
988318429Sjhb		    crde->crd_len, dst);
989318429Sjhb		dst += crde->crd_len;
990318429Sjhb		if (op_type == CHCR_DECRYPT_OP)
991318429Sjhb			crypto_copydata(crp->crp_flags, crp->crp_buf,
992318429Sjhb			    crda->crd_inject, hash_size_in_response, dst);
993318429Sjhb	} else
994318429Sjhb		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
995318429Sjhb
996318429Sjhb	/* XXX: TODO backpressure */
997318429Sjhb	t4_wrq_tx(sc->adapter, wr);
998318429Sjhb
999318429Sjhb	return (0);
1000318429Sjhb}
1001318429Sjhb
1002318429Sjhbstatic int
1003318429Sjhbccr_authenc_done(struct ccr_softc *sc, struct ccr_session *s,
1004318429Sjhb    struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1005318429Sjhb{
1006318429Sjhb	struct cryptodesc *crd;
1007318429Sjhb
1008318429Sjhb	/*
1009318429Sjhb	 * The updated IV to permit chained requests is at
1010318429Sjhb	 * cpl->data[2], but OCF doesn't permit chained requests.
1011318429Sjhb	 *
1012318429Sjhb	 * For a decryption request, the hardware may do a verification
1013318429Sjhb	 * of the HMAC which will fail if the existing HMAC isn't in the
1014318429Sjhb	 * buffer.  If that happens, clear the error and copy the HMAC
1015318429Sjhb	 * from the CPL reply into the buffer.
1016318429Sjhb	 *
1017318429Sjhb	 * For encryption requests, crd should be the cipher request
1018318429Sjhb	 * which will have CRD_F_ENCRYPT set.  For decryption
1019318429Sjhb	 * requests, crp_desc will be the HMAC request which should
1020318429Sjhb	 * not have this flag set.
1021318429Sjhb	 */
1022318429Sjhb	crd = crp->crp_desc;
1023318429Sjhb	if (error == EBADMSG && !CHK_PAD_ERR_BIT(be64toh(cpl->data[0])) &&
1024318429Sjhb	    !(crd->crd_flags & CRD_F_ENCRYPT)) {
1025318429Sjhb		crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
1026318429Sjhb		    s->hmac.hash_len, (c_caddr_t)(cpl + 1));
1027318429Sjhb		error = 0;
1028318429Sjhb	}
1029318429Sjhb	return (error);
1030318429Sjhb}
1031318429Sjhb
1032318429Sjhbstatic int
1033318429Sjhbccr_gcm(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
1034318429Sjhb    struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde)
1035318429Sjhb{
1036318429Sjhb	char iv[CHCR_MAX_CRYPTO_IV_LEN];
1037318429Sjhb	struct chcr_wr *crwr;
1038318429Sjhb	struct wrqe *wr;
1039318429Sjhb	char *dst;
1040318429Sjhb	u_int iv_len, iv_loc, kctx_len, op_type, transhdr_len, wr_len;
1041318429Sjhb	u_int hash_size_in_response, imm_len;
1042318429Sjhb	u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1043318429Sjhb	u_int hmac_ctrl, input_len;
1044318429Sjhb	int dsgl_nsegs, dsgl_len;
1045318429Sjhb	int sgl_nsegs, sgl_len;
1046318429Sjhb	int error;
1047318429Sjhb
1048318429Sjhb	if (s->blkcipher.key_len == 0)
1049318429Sjhb		return (EINVAL);
1050318429Sjhb
1051318429Sjhb	/*
1052318429Sjhb	 * AAD is only permitted before the cipher/plain text, not
1053318429Sjhb	 * after.
1054318429Sjhb	 */
1055318429Sjhb	if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
1056318429Sjhb		return (EINVAL);
1057318429Sjhb
1058318429Sjhb	hash_size_in_response = s->gmac.hash_len;
1059318429Sjhb
1060318429Sjhb	/*
1061318429Sjhb	 * The IV is always stored at the start of the buffer even
1062318429Sjhb	 * though it may be duplicated in the payload.  The crypto
1063318429Sjhb	 * engine doesn't work properly if the IV offset points inside
1064318429Sjhb	 * of the AAD region, so a second copy is always required.
1065318429Sjhb	 *
1066318429Sjhb	 * The IV for GCM is further complicated in that IPSec
1067318429Sjhb	 * provides a full 16-byte IV (including the counter), whereas
1068318429Sjhb	 * the /dev/crypto interface sometimes provides a full 16-byte
1069318429Sjhb	 * IV (if no IV is provided in the ioctl) and sometimes a
1070318429Sjhb	 * 12-byte IV (if the IV was explicit).  For now the driver
1071318429Sjhb	 * always assumes a 12-byte IV and initializes the low 4 byte
1072318429Sjhb	 * counter to 1.
1073318429Sjhb	 */
1074318429Sjhb	iv_loc = IV_IMMEDIATE;
1075318429Sjhb	if (crde->crd_flags & CRD_F_ENCRYPT) {
1076318429Sjhb		op_type = CHCR_ENCRYPT_OP;
1077318429Sjhb		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1078318429Sjhb			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1079318429Sjhb		else
1080318429Sjhb			arc4rand(iv, s->blkcipher.iv_len, 0);
1081318429Sjhb		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1082318429Sjhb			crypto_copyback(crp->crp_flags, crp->crp_buf,
1083318429Sjhb			    crde->crd_inject, s->blkcipher.iv_len, iv);
1084318429Sjhb	} else {
1085318429Sjhb		op_type = CHCR_DECRYPT_OP;
1086318429Sjhb		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1087318429Sjhb			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1088318429Sjhb		else
1089318429Sjhb			crypto_copydata(crp->crp_flags, crp->crp_buf,
1090318429Sjhb			    crde->crd_inject, s->blkcipher.iv_len, iv);
1091318429Sjhb	}
1092318429Sjhb
1093318429Sjhb	/*
1094318429Sjhb	 * If the input IV is 12 bytes, append an explicit counter of
1095318429Sjhb	 * 1.
1096318429Sjhb	 */
1097318429Sjhb	if (s->blkcipher.iv_len == 12) {
1098318429Sjhb		*(uint32_t *)&iv[12] = htobe32(1);
1099318429Sjhb		iv_len = AES_BLOCK_LEN;
1100318429Sjhb	} else
1101318429Sjhb		iv_len = s->blkcipher.iv_len;
1102318429Sjhb
1103318429Sjhb	/*
1104318429Sjhb	 * The output buffer consists of the cipher text followed by
1105318429Sjhb	 * the tag when encrypting.  For decryption it only contains
1106318429Sjhb	 * the plain text.
1107318429Sjhb	 */
1108318429Sjhb	sglist_reset(sc->sg_dsgl);
1109318429Sjhb	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
1110318429Sjhb	    crde->crd_len);
1111318429Sjhb	if (error)
1112318429Sjhb		return (error);
1113318429Sjhb	if (op_type == CHCR_ENCRYPT_OP) {
1114318429Sjhb		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
1115318429Sjhb		    crda->crd_inject, hash_size_in_response);
1116318429Sjhb		if (error)
1117318429Sjhb			return (error);
1118318429Sjhb	}
1119318429Sjhb	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
1120318429Sjhb	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1121318429Sjhb		return (EFBIG);
1122318429Sjhb	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1123318429Sjhb
1124318429Sjhb	/*
1125318429Sjhb	 * The 'key' part of the key context consists of the key followed
1126318429Sjhb	 * by the Galois hash key.
1127318429Sjhb	 */
1128318429Sjhb	kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN;
1129318429Sjhb	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1130318429Sjhb
1131318429Sjhb	/*
1132318429Sjhb	 * The input buffer consists of the IV, any AAD, and then the
1133318429Sjhb	 * cipher/plain text.  For decryption requests the hash is
1134318429Sjhb	 * appended after the cipher text.
1135318429Sjhb	 */
1136318429Sjhb	input_len = crda->crd_len + crde->crd_len;
1137318429Sjhb	if (op_type == CHCR_DECRYPT_OP)
1138318429Sjhb		input_len += hash_size_in_response;
1139318429Sjhb	if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1140318429Sjhb		imm_len = input_len;
1141318429Sjhb		sgl_nsegs = 0;
1142318429Sjhb		sgl_len = 0;
1143318429Sjhb	} else {
1144318429Sjhb		imm_len = 0;
1145318429Sjhb		sglist_reset(sc->sg_ulptx);
1146318429Sjhb		if (crda->crd_len != 0) {
1147318429Sjhb			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1148318429Sjhb			    crda->crd_skip, crda->crd_len);
1149318429Sjhb			if (error)
1150318429Sjhb				return (error);
1151318429Sjhb		}
1152318429Sjhb		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1153318429Sjhb		    crde->crd_skip, crde->crd_len);
1154318429Sjhb		if (error)
1155318429Sjhb			return (error);
1156318429Sjhb		if (op_type == CHCR_DECRYPT_OP) {
1157318429Sjhb			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1158318429Sjhb			    crda->crd_inject, hash_size_in_response);
1159318429Sjhb			if (error)
1160318429Sjhb				return (error);
1161318429Sjhb		}
1162318429Sjhb		sgl_nsegs = sc->sg_ulptx->sg_nseg;
1163318429Sjhb		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1164318429Sjhb	}
1165318429Sjhb
1166318429Sjhb	if (crda->crd_len != 0) {
1167318429Sjhb		aad_start = iv_len + 1;
1168318429Sjhb		aad_stop = aad_start + crda->crd_len - 1;
1169318429Sjhb	} else {
1170318429Sjhb		aad_start = 0;
1171318429Sjhb		aad_stop = 0;
1172318429Sjhb	}
1173318429Sjhb	cipher_start = iv_len + crda->crd_len + 1;
1174318429Sjhb	if (op_type == CHCR_DECRYPT_OP)
1175318429Sjhb		cipher_stop = hash_size_in_response;
1176318429Sjhb	else
1177318429Sjhb		cipher_stop = 0;
1178318429Sjhb	if (op_type == CHCR_DECRYPT_OP)
1179318429Sjhb		auth_insert = hash_size_in_response;
1180318429Sjhb	else
1181318429Sjhb		auth_insert = 0;
1182318429Sjhb
1183318429Sjhb	wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
1184318429Sjhb	if (iv_loc == IV_IMMEDIATE)
1185318429Sjhb		wr_len += iv_len;
1186318429Sjhb	wr = alloc_wrqe(wr_len, sc->txq);
1187318429Sjhb	if (wr == NULL) {
1188318429Sjhb		sc->stats_wr_nomem++;
1189318429Sjhb		return (ENOMEM);
1190318429Sjhb	}
1191318429Sjhb	crwr = wrtod(wr);
1192318429Sjhb	memset(crwr, 0, wr_len);
1193318429Sjhb
1194318429Sjhb	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len,
1195318429Sjhb	    0, iv_loc, crp);
1196318429Sjhb
1197318429Sjhb	/* XXX: Hardcodes SGE loopback channel of 0. */
1198318429Sjhb	crwr->sec_cpl.op_ivinsrtofst = htobe32(
1199318429Sjhb	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1200318429Sjhb	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
1201318429Sjhb	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1202318429Sjhb	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1203318429Sjhb	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1204318429Sjhb
1205318429Sjhb	crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1206318429Sjhb
1207318429Sjhb	/*
1208318429Sjhb	 * NB: cipherstop is explicitly set to 0.  On encrypt it
1209318429Sjhb	 * should normally be set to 0 anyway (as the encrypt crd ends
1210318429Sjhb	 * at the end of the input).  However, for decrypt the cipher
1211318429Sjhb	 * ends before the tag in the AUTHENC case (and authstop is
1212318429Sjhb	 * set to stop before the tag), but for GCM the cipher still
1213318429Sjhb	 * runs to the end of the buffer.  Not sure if this is
1214318429Sjhb	 * intentional or a firmware quirk, but it is required for
1215318429Sjhb	 * working tag validation with GCM decryption.
1216318429Sjhb	 */
1217318429Sjhb	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1218318429Sjhb	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1219318429Sjhb	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1220318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1221318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1222318429Sjhb	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1223318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1224318429Sjhb	    V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1225318429Sjhb	    V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1226318429Sjhb	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1227318429Sjhb
1228318429Sjhb	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1229318429Sjhb	hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response);
1230318429Sjhb	crwr->sec_cpl.seqno_numivs = htobe32(
1231318429Sjhb	    V_SCMD_SEQ_NO_CTRL(0) |
1232318429Sjhb	    V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
1233318429Sjhb	    V_SCMD_ENC_DEC_CTRL(op_type) |
1234318429Sjhb	    V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1235318429Sjhb	    V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_AES_GCM) |
1236318429Sjhb	    V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_GHASH) |
1237318429Sjhb	    V_SCMD_HMAC_CTRL(hmac_ctrl) |
1238318429Sjhb	    V_SCMD_IV_SIZE(iv_len / 2) |
1239318429Sjhb	    V_SCMD_NUM_IVS(0));
1240318429Sjhb	crwr->sec_cpl.ivgen_hdrlen = htobe32(
1241318429Sjhb	    V_SCMD_IV_GEN_CTRL(0) |
1242318429Sjhb	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1243318429Sjhb	    V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
1244318429Sjhb
1245318429Sjhb	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1246318429Sjhb	memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
1247318429Sjhb	dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1248318429Sjhb	memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
1249318429Sjhb
1250318429Sjhb	dst = (char *)(crwr + 1) + kctx_len;
1251318429Sjhb	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1252318429Sjhb	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1253318429Sjhb	if (iv_loc == IV_IMMEDIATE) {
1254318429Sjhb		memcpy(dst, iv, iv_len);
1255318429Sjhb		dst += iv_len;
1256318429Sjhb	}
1257318429Sjhb	if (imm_len != 0) {
1258318429Sjhb		if (crda->crd_len != 0) {
1259318429Sjhb			crypto_copydata(crp->crp_flags, crp->crp_buf,
1260318429Sjhb			    crda->crd_skip, crda->crd_len, dst);
1261318429Sjhb			dst += crda->crd_len;
1262318429Sjhb		}
1263318429Sjhb		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1264318429Sjhb		    crde->crd_len, dst);
1265318429Sjhb		dst += crde->crd_len;
1266318429Sjhb		if (op_type == CHCR_DECRYPT_OP)
1267318429Sjhb			crypto_copydata(crp->crp_flags, crp->crp_buf,
1268318429Sjhb			    crda->crd_inject, hash_size_in_response, dst);
1269318429Sjhb	} else
1270318429Sjhb		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1271318429Sjhb
1272318429Sjhb	/* XXX: TODO backpressure */
1273318429Sjhb	t4_wrq_tx(sc->adapter, wr);
1274318429Sjhb
1275318429Sjhb	return (0);
1276318429Sjhb}
1277318429Sjhb
1278318429Sjhbstatic int
1279318429Sjhbccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s,
1280318429Sjhb    struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1281318429Sjhb{
1282318429Sjhb
1283318429Sjhb	/*
1284318429Sjhb	 * The updated IV to permit chained requests is at
1285318429Sjhb	 * cpl->data[2], but OCF doesn't permit chained requests.
1286318429Sjhb	 *
1287318429Sjhb	 * Note that the hardware should always verify the GMAC hash.
1288318429Sjhb	 */
1289318429Sjhb	return (error);
1290318429Sjhb}
1291318429Sjhb
1292318429Sjhbstatic void
1293318429Sjhbccr_identify(driver_t *driver, device_t parent)
1294318429Sjhb{
1295318429Sjhb	struct adapter *sc;
1296318429Sjhb
1297318429Sjhb	sc = device_get_softc(parent);
1298318429Sjhb	if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE &&
1299318429Sjhb	    device_find_child(parent, "ccr", -1) == NULL)
1300318429Sjhb		device_add_child(parent, "ccr", -1);
1301318429Sjhb}
1302318429Sjhb
1303318429Sjhbstatic int
1304318429Sjhbccr_probe(device_t dev)
1305318429Sjhb{
1306318429Sjhb
1307318429Sjhb	device_set_desc(dev, "Chelsio Crypto Accelerator");
1308318429Sjhb	return (BUS_PROBE_DEFAULT);
1309318429Sjhb}
1310318429Sjhb
1311318429Sjhbstatic void
1312318429Sjhbccr_sysctls(struct ccr_softc *sc)
1313318429Sjhb{
1314318429Sjhb	struct sysctl_ctx_list *ctx;
1315318429Sjhb	struct sysctl_oid *oid;
1316318429Sjhb	struct sysctl_oid_list *children;
1317318429Sjhb
1318318429Sjhb	ctx = device_get_sysctl_ctx(sc->dev);
1319318429Sjhb
1320318429Sjhb	/*
1321318429Sjhb	 * dev.ccr.X.
1322318429Sjhb	 */
1323318429Sjhb	oid = device_get_sysctl_tree(sc->dev);
1324318429Sjhb	children = SYSCTL_CHILDREN(oid);
1325318429Sjhb
1326318429Sjhb	/*
1327318429Sjhb	 * dev.ccr.X.stats.
1328318429Sjhb	 */
1329318429Sjhb	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
1330318429Sjhb	    NULL, "statistics");
1331318429Sjhb	children = SYSCTL_CHILDREN(oid);
1332318429Sjhb
1333318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
1334318429Sjhb	    &sc->stats_hmac, 0, "HMAC requests submitted");
1335318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD,
1336318429Sjhb	    &sc->stats_blkcipher_encrypt, 0,
1337318429Sjhb	    "Cipher encryption requests submitted");
1338318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD,
1339318429Sjhb	    &sc->stats_blkcipher_decrypt, 0,
1340318429Sjhb	    "Cipher decryption requests submitted");
1341318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_encrypt", CTLFLAG_RD,
1342318429Sjhb	    &sc->stats_authenc_encrypt, 0,
1343318429Sjhb	    "Combined AES+HMAC encryption requests submitted");
1344318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_decrypt", CTLFLAG_RD,
1345318429Sjhb	    &sc->stats_authenc_decrypt, 0,
1346318429Sjhb	    "Combined AES+HMAC decryption requests submitted");
1347318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD,
1348318429Sjhb	    &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted");
1349318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD,
1350318429Sjhb	    &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted");
1351318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
1352318429Sjhb	    &sc->stats_wr_nomem, 0, "Work request memory allocation failures");
1353318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
1354318429Sjhb	    &sc->stats_inflight, 0, "Requests currently pending");
1355318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
1356318429Sjhb	    &sc->stats_mac_error, 0, "MAC errors");
1357318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
1358318429Sjhb	    &sc->stats_pad_error, 0, "Padding errors");
1359318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD,
1360318429Sjhb	    &sc->stats_pad_error, 0, "Requests with invalid session ID");
1361318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD,
1362318429Sjhb	    &sc->stats_pad_error, 0, "Requests for which DMA mapping failed");
1363318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD,
1364318429Sjhb	    &sc->stats_pad_error, 0, "Requests failed during queueing");
1365318429Sjhb}
1366318429Sjhb
1367318429Sjhbstatic int
1368318429Sjhbccr_attach(device_t dev)
1369318429Sjhb{
1370318429Sjhb	struct ccr_softc *sc;
1371318429Sjhb	int32_t cid;
1372318429Sjhb
1373318429Sjhb	/*
1374318429Sjhb	 * TODO: Crypto requests will panic if the parent device isn't
1375318429Sjhb	 * initialized so that the queues are up and running.  Need to
1376318429Sjhb	 * figure out how to handle that correctly, maybe just reject
1377318429Sjhb	 * requests if the adapter isn't fully initialized?
1378318429Sjhb	 */
1379318429Sjhb	sc = device_get_softc(dev);
1380318429Sjhb	sc->dev = dev;
1381318429Sjhb	sc->adapter = device_get_softc(device_get_parent(dev));
1382318429Sjhb	sc->txq = &sc->adapter->sge.ctrlq[0];
1383318429Sjhb	sc->rxq = &sc->adapter->sge.rxq[0];
1384318429Sjhb	cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
1385318429Sjhb	if (cid < 0) {
1386318429Sjhb		device_printf(dev, "could not get crypto driver id\n");
1387318429Sjhb		return (ENXIO);
1388318429Sjhb	}
1389318429Sjhb	sc->cid = cid;
1390318429Sjhb	sc->adapter->ccr_softc = sc;
1391318429Sjhb
1392318429Sjhb	/* XXX: TODO? */
1393318429Sjhb	sc->tx_channel_id = 0;
1394318429Sjhb
1395318429Sjhb	mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
1396318429Sjhb	sc->sg_crp = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
1397318429Sjhb	sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
1398318429Sjhb	sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK);
1399318429Sjhb	ccr_sysctls(sc);
1400318429Sjhb
1401318429Sjhb	crypto_register(cid, CRYPTO_SHA1_HMAC, 0, 0);
1402318429Sjhb	crypto_register(cid, CRYPTO_SHA2_256_HMAC, 0, 0);
1403318429Sjhb	crypto_register(cid, CRYPTO_SHA2_384_HMAC, 0, 0);
1404318429Sjhb	crypto_register(cid, CRYPTO_SHA2_512_HMAC, 0, 0);
1405318429Sjhb	crypto_register(cid, CRYPTO_AES_CBC, 0, 0);
1406318429Sjhb	crypto_register(cid, CRYPTO_AES_ICM, 0, 0);
1407318429Sjhb	crypto_register(cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
1408318429Sjhb	crypto_register(cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
1409318429Sjhb	crypto_register(cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
1410318429Sjhb	crypto_register(cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
1411318429Sjhb	crypto_register(cid, CRYPTO_AES_XTS, 0, 0);
1412318429Sjhb	return (0);
1413318429Sjhb}
1414318429Sjhb
1415318429Sjhbstatic int
1416318429Sjhbccr_detach(device_t dev)
1417318429Sjhb{
1418318429Sjhb	struct ccr_softc *sc;
1419318429Sjhb	int i;
1420318429Sjhb
1421318429Sjhb	sc = device_get_softc(dev);
1422318429Sjhb
1423318429Sjhb	mtx_lock(&sc->lock);
1424318429Sjhb	for (i = 0; i < sc->nsessions; i++) {
1425318429Sjhb		if (sc->sessions[i].active || sc->sessions[i].pending != 0) {
1426318429Sjhb			mtx_unlock(&sc->lock);
1427318429Sjhb			return (EBUSY);
1428318429Sjhb		}
1429318429Sjhb	}
1430318429Sjhb	sc->detaching = true;
1431318429Sjhb	mtx_unlock(&sc->lock);
1432318429Sjhb
1433318429Sjhb	crypto_unregister_all(sc->cid);
1434318429Sjhb	free(sc->sessions, M_CCR);
1435318429Sjhb	mtx_destroy(&sc->lock);
1436318429Sjhb	sglist_free(sc->sg_dsgl);
1437318429Sjhb	sglist_free(sc->sg_ulptx);
1438318429Sjhb	sglist_free(sc->sg_crp);
1439318429Sjhb	sc->adapter->ccr_softc = NULL;
1440318429Sjhb	return (0);
1441318429Sjhb}
1442318429Sjhb
1443318429Sjhbstatic void
1444318429Sjhbccr_copy_partial_hash(void *dst, int cri_alg, union authctx *auth_ctx)
1445318429Sjhb{
1446318429Sjhb	uint32_t *u32;
1447318429Sjhb	uint64_t *u64;
1448318429Sjhb	u_int i;
1449318429Sjhb
1450318429Sjhb	u32 = (uint32_t *)dst;
1451318429Sjhb	u64 = (uint64_t *)dst;
1452318429Sjhb	switch (cri_alg) {
1453318429Sjhb	case CRYPTO_SHA1_HMAC:
1454318429Sjhb		for (i = 0; i < SHA1_HASH_LEN / 4; i++)
1455318429Sjhb			u32[i] = htobe32(auth_ctx->sha1ctx.h.b32[i]);
1456318429Sjhb		break;
1457318429Sjhb	case CRYPTO_SHA2_256_HMAC:
1458318429Sjhb		for (i = 0; i < SHA2_256_HASH_LEN / 4; i++)
1459318429Sjhb			u32[i] = htobe32(auth_ctx->sha256ctx.state[i]);
1460318429Sjhb		break;
1461318429Sjhb	case CRYPTO_SHA2_384_HMAC:
1462318429Sjhb		for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
1463318429Sjhb			u64[i] = htobe64(auth_ctx->sha384ctx.state[i]);
1464318429Sjhb		break;
1465318429Sjhb	case CRYPTO_SHA2_512_HMAC:
1466318429Sjhb		for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
1467318429Sjhb			u64[i] = htobe64(auth_ctx->sha512ctx.state[i]);
1468318429Sjhb		break;
1469318429Sjhb	}
1470318429Sjhb}
1471318429Sjhb
1472318429Sjhbstatic void
1473318429Sjhbccr_init_hmac_digest(struct ccr_session *s, int cri_alg, char *key,
1474318429Sjhb    int klen)
1475318429Sjhb{
1476318429Sjhb	union authctx auth_ctx;
1477318429Sjhb	struct auth_hash *axf;
1478318429Sjhb	u_int i;
1479318429Sjhb
1480318429Sjhb	/*
1481318429Sjhb	 * If the key is larger than the block size, use the digest of
1482318429Sjhb	 * the key as the key instead.
1483318429Sjhb	 */
1484318429Sjhb	axf = s->hmac.auth_hash;
1485318429Sjhb	klen /= 8;
1486318429Sjhb	if (klen > axf->blocksize) {
1487318429Sjhb		axf->Init(&auth_ctx);
1488318429Sjhb		axf->Update(&auth_ctx, key, klen);
1489318429Sjhb		axf->Final(s->hmac.ipad, &auth_ctx);
1490318429Sjhb		klen = axf->hashsize;
1491318429Sjhb	} else
1492318429Sjhb		memcpy(s->hmac.ipad, key, klen);
1493318429Sjhb
1494318429Sjhb	memset(s->hmac.ipad + klen, 0, axf->blocksize);
1495318429Sjhb	memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize);
1496318429Sjhb
1497318429Sjhb	for (i = 0; i < axf->blocksize; i++) {
1498318429Sjhb		s->hmac.ipad[i] ^= HMAC_IPAD_VAL;
1499318429Sjhb		s->hmac.opad[i] ^= HMAC_OPAD_VAL;
1500318429Sjhb	}
1501318429Sjhb
1502318429Sjhb	/*
1503318429Sjhb	 * Hash the raw ipad and opad and store the partial result in
1504318429Sjhb	 * the same buffer.
1505318429Sjhb	 */
1506318429Sjhb	axf->Init(&auth_ctx);
1507318429Sjhb	axf->Update(&auth_ctx, s->hmac.ipad, axf->blocksize);
1508318429Sjhb	ccr_copy_partial_hash(s->hmac.ipad, cri_alg, &auth_ctx);
1509318429Sjhb
1510318429Sjhb	axf->Init(&auth_ctx);
1511318429Sjhb	axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize);
1512318429Sjhb	ccr_copy_partial_hash(s->hmac.opad, cri_alg, &auth_ctx);
1513318429Sjhb}
1514318429Sjhb
1515318429Sjhb/*
1516318429Sjhb * Borrowed from AES_GMAC_Setkey().
1517318429Sjhb */
1518318429Sjhbstatic void
1519318429Sjhbccr_init_gmac_hash(struct ccr_session *s, char *key, int klen)
1520318429Sjhb{
1521318429Sjhb	static char zeroes[GMAC_BLOCK_LEN];
1522318429Sjhb	uint32_t keysched[4 * (RIJNDAEL_MAXNR + 1)];
1523318429Sjhb	int rounds;
1524318429Sjhb
1525318429Sjhb	rounds = rijndaelKeySetupEnc(keysched, key, klen);
1526318429Sjhb	rijndaelEncrypt(keysched, rounds, zeroes, s->gmac.ghash_h);
1527318429Sjhb}
1528318429Sjhb
1529318429Sjhbstatic int
1530318429Sjhbccr_aes_check_keylen(int alg, int klen)
1531318429Sjhb{
1532318429Sjhb
1533318429Sjhb	switch (klen) {
1534318429Sjhb	case 128:
1535318429Sjhb	case 192:
1536318429Sjhb		if (alg == CRYPTO_AES_XTS)
1537318429Sjhb			return (EINVAL);
1538318429Sjhb		break;
1539318429Sjhb	case 256:
1540318429Sjhb		break;
1541318429Sjhb	case 512:
1542318429Sjhb		if (alg != CRYPTO_AES_XTS)
1543318429Sjhb			return (EINVAL);
1544318429Sjhb		break;
1545318429Sjhb	default:
1546318429Sjhb		return (EINVAL);
1547318429Sjhb	}
1548318429Sjhb	return (0);
1549318429Sjhb}
1550318429Sjhb
1551318429Sjhb/*
1552318429Sjhb * Borrowed from cesa_prep_aes_key().  We should perhaps have a public
1553318429Sjhb * function to generate this instead.
1554318429Sjhb *
1555318429Sjhb * NB: The crypto engine wants the words in the decryption key in reverse
1556318429Sjhb * order.
1557318429Sjhb */
1558318429Sjhbstatic void
1559318429Sjhbccr_aes_getdeckey(void *dec_key, const void *enc_key, unsigned int kbits)
1560318429Sjhb{
1561318429Sjhb	uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
1562318429Sjhb	uint32_t *dkey;
1563318429Sjhb	int i;
1564318429Sjhb
1565318429Sjhb	rijndaelKeySetupEnc(ek, enc_key, kbits);
1566318429Sjhb	dkey = dec_key;
1567318429Sjhb	dkey += (kbits / 8) / 4;
1568318429Sjhb
1569318429Sjhb	switch (kbits) {
1570318429Sjhb	case 128:
1571318429Sjhb		for (i = 0; i < 4; i++)
1572318429Sjhb			*--dkey = htobe32(ek[4 * 10 + i]);
1573318429Sjhb		break;
1574318429Sjhb	case 192:
1575318429Sjhb		for (i = 0; i < 2; i++)
1576318429Sjhb			*--dkey = htobe32(ek[4 * 11 + 2 + i]);
1577318429Sjhb		for (i = 0; i < 4; i++)
1578318429Sjhb			*--dkey = htobe32(ek[4 * 12 + i]);
1579318429Sjhb		break;
1580318429Sjhb	case 256:
1581318429Sjhb		for (i = 0; i < 4; i++)
1582318429Sjhb			*--dkey = htobe32(ek[4 * 13 + i]);
1583318429Sjhb		for (i = 0; i < 4; i++)
1584318429Sjhb			*--dkey = htobe32(ek[4 * 14 + i]);
1585318429Sjhb		break;
1586318429Sjhb	}
1587318429Sjhb	MPASS(dkey == dec_key);
1588318429Sjhb}
1589318429Sjhb
1590318429Sjhbstatic void
1591318429Sjhbccr_aes_setkey(struct ccr_session *s, int alg, const void *key, int klen)
1592318429Sjhb{
1593318429Sjhb	unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size;
1594318429Sjhb	unsigned int opad_present;
1595318429Sjhb
1596318429Sjhb	if (alg == CRYPTO_AES_XTS)
1597318429Sjhb		kbits = klen / 2;
1598318429Sjhb	else
1599318429Sjhb		kbits = klen;
1600318429Sjhb	switch (kbits) {
1601318429Sjhb	case 128:
1602318429Sjhb		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
1603318429Sjhb		break;
1604318429Sjhb	case 192:
1605318429Sjhb		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
1606318429Sjhb		break;
1607318429Sjhb	case 256:
1608318429Sjhb		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
1609318429Sjhb		break;
1610318429Sjhb	default:
1611318429Sjhb		panic("should not get here");
1612318429Sjhb	}
1613318429Sjhb
1614318429Sjhb	s->blkcipher.key_len = klen / 8;
1615318429Sjhb	memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
1616318429Sjhb	switch (alg) {
1617318429Sjhb	case CRYPTO_AES_CBC:
1618318429Sjhb	case CRYPTO_AES_XTS:
1619318429Sjhb		ccr_aes_getdeckey(s->blkcipher.deckey, key, kbits);
1620318429Sjhb		break;
1621318429Sjhb	}
1622318429Sjhb
1623318429Sjhb	kctx_len = roundup2(s->blkcipher.key_len, 16);
1624318429Sjhb	switch (s->mode) {
1625318429Sjhb	case AUTHENC:
1626318429Sjhb		mk_size = s->hmac.mk_size;
1627318429Sjhb		opad_present = 1;
1628318429Sjhb		iopad_size = roundup2(s->hmac.partial_digest_len, 16);
1629318429Sjhb		kctx_len += iopad_size * 2;
1630318429Sjhb		break;
1631318429Sjhb	case GCM:
1632318429Sjhb		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
1633318429Sjhb		opad_present = 0;
1634318429Sjhb		kctx_len += GMAC_BLOCK_LEN;
1635318429Sjhb		break;
1636318429Sjhb	default:
1637318429Sjhb		mk_size = CHCR_KEYCTX_NO_KEY;
1638318429Sjhb		opad_present = 0;
1639318429Sjhb		break;
1640318429Sjhb	}
1641318429Sjhb	kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
1642318429Sjhb	s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
1643318429Sjhb	    V_KEY_CONTEXT_DUAL_CK(alg == CRYPTO_AES_XTS) |
1644318429Sjhb	    V_KEY_CONTEXT_OPAD_PRESENT(opad_present) |
1645318429Sjhb	    V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) |
1646318429Sjhb	    V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1));
1647318429Sjhb}
1648318429Sjhb
1649318429Sjhbstatic int
1650318429Sjhbccr_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
1651318429Sjhb{
1652318429Sjhb	struct ccr_softc *sc;
1653318429Sjhb	struct ccr_session *s;
1654318429Sjhb	struct auth_hash *auth_hash;
1655318429Sjhb	struct cryptoini *c, *hash, *cipher;
1656318429Sjhb	unsigned int auth_mode, cipher_mode, iv_len, mk_size;
1657318429Sjhb	unsigned int partial_digest_len;
1658318429Sjhb	int error, i, sess;
1659318429Sjhb	bool gcm_hash;
1660318429Sjhb
1661318429Sjhb	if (sidp == NULL || cri == NULL)
1662318429Sjhb		return (EINVAL);
1663318429Sjhb
1664318429Sjhb	gcm_hash = false;
1665318429Sjhb	cipher = NULL;
1666318429Sjhb	hash = NULL;
1667318429Sjhb	auth_hash = NULL;
1668318429Sjhb	auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
1669318429Sjhb	cipher_mode = CHCR_SCMD_CIPHER_MODE_NOP;
1670318429Sjhb	iv_len = 0;
1671318429Sjhb	mk_size = 0;
1672318429Sjhb	partial_digest_len = 0;
1673318429Sjhb	for (c = cri; c != NULL; c = c->cri_next) {
1674318429Sjhb		switch (c->cri_alg) {
1675318429Sjhb		case CRYPTO_SHA1_HMAC:
1676318429Sjhb		case CRYPTO_SHA2_256_HMAC:
1677318429Sjhb		case CRYPTO_SHA2_384_HMAC:
1678318429Sjhb		case CRYPTO_SHA2_512_HMAC:
1679318429Sjhb		case CRYPTO_AES_128_NIST_GMAC:
1680318429Sjhb		case CRYPTO_AES_192_NIST_GMAC:
1681318429Sjhb		case CRYPTO_AES_256_NIST_GMAC:
1682318429Sjhb			if (hash)
1683318429Sjhb				return (EINVAL);
1684318429Sjhb			hash = c;
1685318429Sjhb			switch (c->cri_alg) {
1686318429Sjhb			case CRYPTO_SHA1_HMAC:
1687318429Sjhb				auth_hash = &auth_hash_hmac_sha1;
1688318429Sjhb				auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1689318429Sjhb				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1690318429Sjhb				partial_digest_len = SHA1_HASH_LEN;
1691318429Sjhb				break;
1692318429Sjhb			case CRYPTO_SHA2_256_HMAC:
1693318429Sjhb				auth_hash = &auth_hash_hmac_sha2_256;
1694318429Sjhb				auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1695318429Sjhb				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1696318429Sjhb				partial_digest_len = SHA2_256_HASH_LEN;
1697318429Sjhb				break;
1698318429Sjhb			case CRYPTO_SHA2_384_HMAC:
1699318429Sjhb				auth_hash = &auth_hash_hmac_sha2_384;
1700318429Sjhb				auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1701318429Sjhb				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1702318429Sjhb				partial_digest_len = SHA2_512_HASH_LEN;
1703318429Sjhb				break;
1704318429Sjhb			case CRYPTO_SHA2_512_HMAC:
1705318429Sjhb				auth_hash = &auth_hash_hmac_sha2_512;
1706318429Sjhb				auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1707318429Sjhb				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1708318429Sjhb				partial_digest_len = SHA2_512_HASH_LEN;
1709318429Sjhb				break;
1710318429Sjhb			case CRYPTO_AES_128_NIST_GMAC:
1711318429Sjhb			case CRYPTO_AES_192_NIST_GMAC:
1712318429Sjhb			case CRYPTO_AES_256_NIST_GMAC:
1713318429Sjhb				gcm_hash = true;
1714318429Sjhb				auth_mode = CHCR_SCMD_AUTH_MODE_GHASH;
1715318429Sjhb				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
1716318429Sjhb				break;
1717318429Sjhb			}
1718318429Sjhb			break;
1719318429Sjhb		case CRYPTO_AES_CBC:
1720318429Sjhb		case CRYPTO_AES_ICM:
1721318429Sjhb		case CRYPTO_AES_NIST_GCM_16:
1722318429Sjhb		case CRYPTO_AES_XTS:
1723318429Sjhb			if (cipher)
1724318429Sjhb				return (EINVAL);
1725318429Sjhb			cipher = c;
1726318429Sjhb			switch (c->cri_alg) {
1727318429Sjhb			case CRYPTO_AES_CBC:
1728318429Sjhb				cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
1729318429Sjhb				iv_len = AES_BLOCK_LEN;
1730318429Sjhb				break;
1731318429Sjhb			case CRYPTO_AES_ICM:
1732318429Sjhb				cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1733318429Sjhb				iv_len = AES_BLOCK_LEN;
1734318429Sjhb				break;
1735318429Sjhb			case CRYPTO_AES_NIST_GCM_16:
1736318429Sjhb				cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_GCM;
1737318429Sjhb				iv_len = AES_GCM_IV_LEN;
1738318429Sjhb				break;
1739318429Sjhb			case CRYPTO_AES_XTS:
1740318429Sjhb				cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1741318429Sjhb				iv_len = AES_BLOCK_LEN;
1742318429Sjhb				break;
1743318429Sjhb			}
1744318429Sjhb			if (c->cri_key != NULL) {
1745318429Sjhb				error = ccr_aes_check_keylen(c->cri_alg,
1746318429Sjhb				    c->cri_klen);
1747318429Sjhb				if (error)
1748318429Sjhb					return (error);
1749318429Sjhb			}
1750318429Sjhb			break;
1751318429Sjhb		default:
1752318429Sjhb			return (EINVAL);
1753318429Sjhb		}
1754318429Sjhb	}
1755318429Sjhb	if (gcm_hash != (cipher_mode == CHCR_SCMD_CIPHER_MODE_AES_GCM))
1756318429Sjhb		return (EINVAL);
1757318429Sjhb	if (hash == NULL && cipher == NULL)
1758318429Sjhb		return (EINVAL);
1759318429Sjhb	if (hash != NULL && hash->cri_key == NULL)
1760318429Sjhb		return (EINVAL);
1761318429Sjhb
1762318429Sjhb	sc = device_get_softc(dev);
1763318429Sjhb	mtx_lock(&sc->lock);
1764318429Sjhb	if (sc->detaching) {
1765318429Sjhb		mtx_unlock(&sc->lock);
1766318429Sjhb		return (ENXIO);
1767318429Sjhb	}
1768318429Sjhb	sess = -1;
1769318429Sjhb	for (i = 0; i < sc->nsessions; i++) {
1770318429Sjhb		if (!sc->sessions[i].active && sc->sessions[i].pending == 0) {
1771318429Sjhb			sess = i;
1772318429Sjhb			break;
1773318429Sjhb		}
1774318429Sjhb	}
1775318429Sjhb	if (sess == -1) {
1776318429Sjhb		s = malloc(sizeof(*s) * (sc->nsessions + 1), M_CCR,
1777318429Sjhb		    M_NOWAIT | M_ZERO);
1778318429Sjhb		if (s == NULL) {
1779318429Sjhb			mtx_unlock(&sc->lock);
1780318429Sjhb			return (ENOMEM);
1781318429Sjhb		}
1782318429Sjhb		if (sc->sessions != NULL)
1783318429Sjhb			memcpy(s, sc->sessions, sizeof(*s) * sc->nsessions);
1784318429Sjhb		sess = sc->nsessions;
1785318429Sjhb		free(sc->sessions, M_CCR);
1786318429Sjhb		sc->sessions = s;
1787318429Sjhb		sc->nsessions++;
1788318429Sjhb	}
1789318429Sjhb
1790318429Sjhb	s = &sc->sessions[sess];
1791318429Sjhb
1792318429Sjhb	if (gcm_hash)
1793318429Sjhb		s->mode = GCM;
1794318429Sjhb	else if (hash != NULL && cipher != NULL)
1795318429Sjhb		s->mode = AUTHENC;
1796318429Sjhb	else if (hash != NULL)
1797318429Sjhb		s->mode = HMAC;
1798318429Sjhb	else {
1799318429Sjhb		MPASS(cipher != NULL);
1800318429Sjhb		s->mode = BLKCIPHER;
1801318429Sjhb	}
1802318429Sjhb	if (gcm_hash) {
1803318429Sjhb		if (hash->cri_mlen == 0)
1804318429Sjhb			s->gmac.hash_len = AES_GMAC_HASH_LEN;
1805318429Sjhb		else
1806318429Sjhb			s->gmac.hash_len = hash->cri_mlen;
1807318429Sjhb		ccr_init_gmac_hash(s, hash->cri_key, hash->cri_klen);
1808318429Sjhb	} else if (hash != NULL) {
1809318429Sjhb		s->hmac.auth_hash = auth_hash;
1810318429Sjhb		s->hmac.auth_mode = auth_mode;
1811318429Sjhb		s->hmac.mk_size = mk_size;
1812318429Sjhb		s->hmac.partial_digest_len = partial_digest_len;
1813318429Sjhb		if (hash->cri_mlen == 0)
1814318429Sjhb			s->hmac.hash_len = auth_hash->hashsize;
1815318429Sjhb		else
1816318429Sjhb			s->hmac.hash_len = hash->cri_mlen;
1817318429Sjhb		ccr_init_hmac_digest(s, hash->cri_alg, hash->cri_key,
1818318429Sjhb		    hash->cri_klen);
1819318429Sjhb	}
1820318429Sjhb	if (cipher != NULL) {
1821318429Sjhb		s->blkcipher.cipher_mode = cipher_mode;
1822318429Sjhb		s->blkcipher.iv_len = iv_len;
1823318429Sjhb		if (cipher->cri_key != NULL)
1824318429Sjhb			ccr_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
1825318429Sjhb			    cipher->cri_klen);
1826318429Sjhb	}
1827318429Sjhb
1828318429Sjhb	s->active = true;
1829318429Sjhb	mtx_unlock(&sc->lock);
1830318429Sjhb
1831318429Sjhb	*sidp = sess;
1832318429Sjhb	return (0);
1833318429Sjhb}
1834318429Sjhb
1835318429Sjhbstatic int
1836318429Sjhbccr_freesession(device_t dev, uint64_t tid)
1837318429Sjhb{
1838318429Sjhb	struct ccr_softc *sc;
1839318429Sjhb	uint32_t sid;
1840318429Sjhb	int error;
1841318429Sjhb
1842318429Sjhb	sc = device_get_softc(dev);
1843318429Sjhb	sid = CRYPTO_SESID2LID(tid);
1844318429Sjhb	mtx_lock(&sc->lock);
1845318429Sjhb	if (sid >= sc->nsessions || !sc->sessions[sid].active)
1846318429Sjhb		error = EINVAL;
1847318429Sjhb	else {
1848318429Sjhb		if (sc->sessions[sid].pending != 0)
1849318429Sjhb			device_printf(dev,
1850318429Sjhb			    "session %d freed with %d pending requests\n", sid,
1851318429Sjhb			    sc->sessions[sid].pending);
1852318429Sjhb		sc->sessions[sid].active = false;
1853318429Sjhb		error = 0;
1854318429Sjhb	}
1855318429Sjhb	mtx_unlock(&sc->lock);
1856318429Sjhb	return (error);
1857318429Sjhb}
1858318429Sjhb
1859318429Sjhbstatic int
1860318429Sjhbccr_process(device_t dev, struct cryptop *crp, int hint)
1861318429Sjhb{
1862318429Sjhb	struct ccr_softc *sc;
1863318429Sjhb	struct ccr_session *s;
1864318429Sjhb	struct cryptodesc *crd, *crda, *crde;
1865318429Sjhb	uint32_t sid;
1866318429Sjhb	int error;
1867318429Sjhb
1868318429Sjhb	if (crp == NULL)
1869318429Sjhb		return (EINVAL);
1870318429Sjhb
1871318429Sjhb	crd = crp->crp_desc;
1872318429Sjhb	sid = CRYPTO_SESID2LID(crp->crp_sid);
1873318429Sjhb	sc = device_get_softc(dev);
1874318429Sjhb	mtx_lock(&sc->lock);
1875318429Sjhb	if (sid >= sc->nsessions || !sc->sessions[sid].active) {
1876318429Sjhb		sc->stats_bad_session++;
1877318429Sjhb		error = EINVAL;
1878318429Sjhb		goto out;
1879318429Sjhb	}
1880318429Sjhb
1881318429Sjhb	error = ccr_populate_sglist(sc->sg_crp, crp);
1882318429Sjhb	if (error) {
1883318429Sjhb		sc->stats_sglist_error++;
1884318429Sjhb		goto out;
1885318429Sjhb	}
1886318429Sjhb
1887318429Sjhb	s = &sc->sessions[sid];
1888318429Sjhb	switch (s->mode) {
1889318429Sjhb	case HMAC:
1890318429Sjhb		if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
1891318429Sjhb			ccr_init_hmac_digest(s, crd->crd_alg, crd->crd_key,
1892318429Sjhb			    crd->crd_klen);
1893318429Sjhb		error = ccr_hmac(sc, sid, s, crp);
1894318429Sjhb		if (error == 0)
1895318429Sjhb			sc->stats_hmac++;
1896318429Sjhb		break;
1897318429Sjhb	case BLKCIPHER:
1898318429Sjhb		if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
1899318429Sjhb			error = ccr_aes_check_keylen(crd->crd_alg,
1900318429Sjhb			    crd->crd_klen);
1901318429Sjhb			if (error)
1902318429Sjhb				break;
1903318429Sjhb			ccr_aes_setkey(s, crd->crd_alg, crd->crd_key,
1904318429Sjhb			    crd->crd_klen);
1905318429Sjhb		}
1906318429Sjhb		error = ccr_blkcipher(sc, sid, s, crp);
1907318429Sjhb		if (error == 0) {
1908318429Sjhb			if (crd->crd_flags & CRD_F_ENCRYPT)
1909318429Sjhb				sc->stats_blkcipher_encrypt++;
1910318429Sjhb			else
1911318429Sjhb				sc->stats_blkcipher_decrypt++;
1912318429Sjhb		}
1913318429Sjhb		break;
1914318429Sjhb	case AUTHENC:
1915318429Sjhb		error = 0;
1916318429Sjhb		switch (crd->crd_alg) {
1917318429Sjhb		case CRYPTO_AES_CBC:
1918318429Sjhb		case CRYPTO_AES_ICM:
1919318429Sjhb		case CRYPTO_AES_XTS:
1920318429Sjhb			/* Only encrypt-then-authenticate supported. */
1921318429Sjhb			crde = crd;
1922318429Sjhb			crda = crd->crd_next;
1923318429Sjhb			if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
1924318429Sjhb				error = EINVAL;
1925318429Sjhb				break;
1926318429Sjhb			}
1927318429Sjhb			break;
1928318429Sjhb		default:
1929318429Sjhb			crda = crd;
1930318429Sjhb			crde = crd->crd_next;
1931318429Sjhb			if (crde->crd_flags & CRD_F_ENCRYPT) {
1932318429Sjhb				error = EINVAL;
1933318429Sjhb				break;
1934318429Sjhb			}
1935318429Sjhb			break;
1936318429Sjhb		}
1937318429Sjhb		if (error)
1938318429Sjhb			break;
1939318429Sjhb		if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
1940318429Sjhb			ccr_init_hmac_digest(s, crda->crd_alg, crda->crd_key,
1941318429Sjhb			    crda->crd_klen);
1942318429Sjhb		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
1943318429Sjhb			error = ccr_aes_check_keylen(crde->crd_alg,
1944318429Sjhb			    crde->crd_klen);
1945318429Sjhb			if (error)
1946318429Sjhb				break;
1947318429Sjhb			ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
1948318429Sjhb			    crde->crd_klen);
1949318429Sjhb		}
1950318429Sjhb		error = ccr_authenc(sc, sid, s, crp, crda, crde);
1951318429Sjhb		if (error == 0) {
1952318429Sjhb			if (crde->crd_flags & CRD_F_ENCRYPT)
1953318429Sjhb				sc->stats_authenc_encrypt++;
1954318429Sjhb			else
1955318429Sjhb				sc->stats_authenc_decrypt++;
1956318429Sjhb		}
1957318429Sjhb		break;
1958318429Sjhb	case GCM:
1959318429Sjhb		error = 0;
1960318429Sjhb		if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
1961318429Sjhb			crde = crd;
1962318429Sjhb			crda = crd->crd_next;
1963318429Sjhb		} else {
1964318429Sjhb			crda = crd;
1965318429Sjhb			crde = crd->crd_next;
1966318429Sjhb		}
1967318429Sjhb		if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
1968318429Sjhb			ccr_init_gmac_hash(s, crda->crd_key, crda->crd_klen);
1969318429Sjhb		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
1970318429Sjhb			error = ccr_aes_check_keylen(crde->crd_alg,
1971318429Sjhb			    crde->crd_klen);
1972318429Sjhb			if (error)
1973318429Sjhb				break;
1974318429Sjhb			ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
1975318429Sjhb			    crde->crd_klen);
1976318429Sjhb		}
1977318429Sjhb		error = ccr_gcm(sc, sid, s, crp, crda, crde);
1978318429Sjhb		if (error == 0) {
1979318429Sjhb			if (crde->crd_flags & CRD_F_ENCRYPT)
1980318429Sjhb				sc->stats_gcm_encrypt++;
1981318429Sjhb			else
1982318429Sjhb				sc->stats_gcm_decrypt++;
1983318429Sjhb		}
1984318429Sjhb		break;
1985318429Sjhb	}
1986318429Sjhb
1987318429Sjhb	if (error == 0) {
1988318429Sjhb		s->pending++;
1989318429Sjhb		sc->stats_inflight++;
1990318429Sjhb	} else
1991318429Sjhb		sc->stats_process_error++;
1992318429Sjhb
1993318429Sjhbout:
1994318429Sjhb	mtx_unlock(&sc->lock);
1995318429Sjhb
1996318429Sjhb	if (error) {
1997318429Sjhb		crp->crp_etype = error;
1998318429Sjhb		crypto_done(crp);
1999318429Sjhb	}
2000318429Sjhb
2001318429Sjhb	return (0);
2002318429Sjhb}
2003318429Sjhb
2004318429Sjhbstatic int
2005318429Sjhbdo_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
2006318429Sjhb    struct mbuf *m)
2007318429Sjhb{
2008318429Sjhb	struct ccr_softc *sc = iq->adapter->ccr_softc;
2009318429Sjhb	struct ccr_session *s;
2010318429Sjhb	const struct cpl_fw6_pld *cpl;
2011318429Sjhb	struct cryptop *crp;
2012318429Sjhb	uint32_t sid, status;
2013318429Sjhb	int error;
2014318429Sjhb
2015318429Sjhb	if (m != NULL)
2016318429Sjhb		cpl = mtod(m, const void *);
2017318429Sjhb	else
2018318429Sjhb		cpl = (const void *)(rss + 1);
2019318429Sjhb
2020318429Sjhb	crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
2021318429Sjhb	sid = CRYPTO_SESID2LID(crp->crp_sid);
2022318429Sjhb	status = be64toh(cpl->data[0]);
2023318429Sjhb	if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
2024318429Sjhb		error = EBADMSG;
2025318429Sjhb	else
2026318429Sjhb		error = 0;
2027318429Sjhb
2028318429Sjhb	mtx_lock(&sc->lock);
2029318429Sjhb	MPASS(sid < sc->nsessions);
2030318429Sjhb	s = &sc->sessions[sid];
2031318429Sjhb	s->pending--;
2032318429Sjhb	sc->stats_inflight--;
2033318429Sjhb
2034318429Sjhb	switch (s->mode) {
2035318429Sjhb	case HMAC:
2036318429Sjhb		error = ccr_hmac_done(sc, s, crp, cpl, error);
2037318429Sjhb		break;
2038318429Sjhb	case BLKCIPHER:
2039318429Sjhb		error = ccr_blkcipher_done(sc, s, crp, cpl, error);
2040318429Sjhb		break;
2041318429Sjhb	case AUTHENC:
2042318429Sjhb		error = ccr_authenc_done(sc, s, crp, cpl, error);
2043318429Sjhb		break;
2044318429Sjhb	case GCM:
2045318429Sjhb		error = ccr_gcm_done(sc, s, crp, cpl, error);
2046318429Sjhb		break;
2047318429Sjhb	}
2048318429Sjhb
2049318429Sjhb	if (error == EBADMSG) {
2050318429Sjhb		if (CHK_MAC_ERR_BIT(status))
2051318429Sjhb			sc->stats_mac_error++;
2052318429Sjhb		if (CHK_PAD_ERR_BIT(status))
2053318429Sjhb			sc->stats_pad_error++;
2054318429Sjhb	}
2055318429Sjhb	mtx_unlock(&sc->lock);
2056318429Sjhb	crp->crp_etype = error;
2057318429Sjhb	crypto_done(crp);
2058318429Sjhb	m_freem(m);
2059318429Sjhb	return (0);
2060318429Sjhb}
2061318429Sjhb
2062318429Sjhbstatic int
2063318429Sjhbccr_modevent(module_t mod, int cmd, void *arg)
2064318429Sjhb{
2065318429Sjhb
2066318429Sjhb	switch (cmd) {
2067318429Sjhb	case MOD_LOAD:
2068318429Sjhb		t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
2069318429Sjhb		return (0);
2070318429Sjhb	case MOD_UNLOAD:
2071318429Sjhb		t4_register_cpl_handler(CPL_FW6_PLD, NULL);
2072318429Sjhb		return (0);
2073318429Sjhb	default:
2074318429Sjhb		return (EOPNOTSUPP);
2075318429Sjhb	}
2076318429Sjhb}
2077318429Sjhb
2078318429Sjhbstatic device_method_t ccr_methods[] = {
2079318429Sjhb	DEVMETHOD(device_identify,	ccr_identify),
2080318429Sjhb	DEVMETHOD(device_probe,		ccr_probe),
2081318429Sjhb	DEVMETHOD(device_attach,	ccr_attach),
2082318429Sjhb	DEVMETHOD(device_detach,	ccr_detach),
2083318429Sjhb
2084318429Sjhb	DEVMETHOD(cryptodev_newsession,	ccr_newsession),
2085318429Sjhb	DEVMETHOD(cryptodev_freesession, ccr_freesession),
2086318429Sjhb	DEVMETHOD(cryptodev_process,	ccr_process),
2087318429Sjhb
2088318429Sjhb	DEVMETHOD_END
2089318429Sjhb};
2090318429Sjhb
2091318429Sjhbstatic driver_t ccr_driver = {
2092318429Sjhb	"ccr",
2093318429Sjhb	ccr_methods,
2094318429Sjhb	sizeof(struct ccr_softc)
2095318429Sjhb};
2096318429Sjhb
2097318429Sjhbstatic devclass_t ccr_devclass;
2098318429Sjhb
2099318429SjhbDRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL);
2100318429SjhbMODULE_VERSION(ccr, 1);
2101318429SjhbMODULE_DEPEND(ccr, crypto, 1, 1, 1);
2102318429SjhbMODULE_DEPEND(ccr, t6nex, 1, 1, 1);
2103