1318429Sjhb/*-
2318429Sjhb * Copyright (c) 2017 Chelsio Communications, Inc.
3318429Sjhb * All rights reserved.
4318429Sjhb * Written by: John Baldwin <jhb@FreeBSD.org>
5318429Sjhb *
6318429Sjhb * Redistribution and use in source and binary forms, with or without
7318429Sjhb * modification, are permitted provided that the following conditions
8318429Sjhb * are met:
9318429Sjhb * 1. Redistributions of source code must retain the above copyright
10318429Sjhb *    notice, this list of conditions and the following disclaimer.
11318429Sjhb * 2. Redistributions in binary form must reproduce the above copyright
12318429Sjhb *    notice, this list of conditions and the following disclaimer in the
13318429Sjhb *    documentation and/or other materials provided with the distribution.
14318429Sjhb *
15318429Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16318429Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17318429Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18318429Sjhb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19318429Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20318429Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21318429Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22318429Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23318429Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24318429Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25318429Sjhb * SUCH DAMAGE.
26318429Sjhb */
27318429Sjhb
28318429Sjhb#include <sys/cdefs.h>
29318429Sjhb__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/crypto/t4_crypto.c 354098 2019-10-25 21:14:43Z jhb $");
30318429Sjhb
31318429Sjhb#include <sys/types.h>
32318429Sjhb#include <sys/bus.h>
33318429Sjhb#include <sys/lock.h>
34318429Sjhb#include <sys/malloc.h>
35318429Sjhb#include <sys/mutex.h>
36318429Sjhb#include <sys/module.h>
37318429Sjhb#include <sys/sglist.h>
38318429Sjhb
39318429Sjhb#include <opencrypto/cryptodev.h>
40318429Sjhb#include <opencrypto/xform.h>
41318429Sjhb
42318429Sjhb#include "cryptodev_if.h"
43318429Sjhb
44318429Sjhb#include "common/common.h"
45318429Sjhb#include "crypto/t4_crypto.h"
46318429Sjhb
47318429Sjhb/*
48318429Sjhb * Requests consist of:
49318429Sjhb *
50318429Sjhb * +-------------------------------+
51318429Sjhb * | struct fw_crypto_lookaside_wr |
52318429Sjhb * +-------------------------------+
53318429Sjhb * | struct ulp_txpkt              |
54318429Sjhb * +-------------------------------+
55318429Sjhb * | struct ulptx_idata            |
56318429Sjhb * +-------------------------------+
57318429Sjhb * | struct cpl_tx_sec_pdu         |
58318429Sjhb * +-------------------------------+
59318429Sjhb * | struct cpl_tls_tx_scmd_fmt    |
60318429Sjhb * +-------------------------------+
61318429Sjhb * | key context header            |
62318429Sjhb * +-------------------------------+
63318429Sjhb * | AES key                       |  ----- For requests with AES
64318429Sjhb * +-------------------------------+ -
65318429Sjhb * | IPAD (16-byte aligned)        |  \
66318429Sjhb * +-------------------------------+  +---- For requests with HMAC
67318429Sjhb * | OPAD (16-byte aligned)        |  /
68318429Sjhb * +-------------------------------+ -
69318429Sjhb * | GMAC H                        |  ----- For AES-GCM
70318429Sjhb * +-------------------------------+ -
71318429Sjhb * | struct cpl_rx_phys_dsgl       |  \
72318429Sjhb * +-------------------------------+  +---- Destination buffer for
73318429Sjhb * | PHYS_DSGL entries             |  /     non-hash-only requests
74318429Sjhb * +-------------------------------+ -
75318429Sjhb * | 16 dummy bytes                |  ----- Only for hash-only requests
76318429Sjhb * +-------------------------------+
77318429Sjhb * | IV                            |  ----- If immediate IV
78318429Sjhb * +-------------------------------+
79318429Sjhb * | Payload                       |  ----- If immediate Payload
80318429Sjhb * +-------------------------------+ -
81318429Sjhb * | struct ulptx_sgl              |  \
82318429Sjhb * +-------------------------------+  +---- If payload via SGL
83318429Sjhb * | SGL entries                   |  /
84318429Sjhb * +-------------------------------+ -
85318429Sjhb *
86318429Sjhb * Note that the key context must be padded to ensure 16-byte alignment.
87318429Sjhb * For HMAC requests, the key consists of the partial hash of the IPAD
88318429Sjhb * followed by the partial hash of the OPAD.
89318429Sjhb *
90318429Sjhb * Replies consist of:
91318429Sjhb *
92318429Sjhb * +-------------------------------+
93318429Sjhb * | struct cpl_fw6_pld            |
94318429Sjhb * +-------------------------------+
95318429Sjhb * | hash digest                   |  ----- For HMAC request with
96318429Sjhb * +-------------------------------+        'hash_size' set in work request
97318429Sjhb *
98318429Sjhb * A 32-bit big-endian error status word is supplied in the last 4
99318429Sjhb * bytes of data[0] in the CPL_FW6_PLD message.  bit 0 indicates a
100318429Sjhb * "MAC" error and bit 1 indicates a "PAD" error.
101318429Sjhb *
102318429Sjhb * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message
103318429Sjhb * in the request is returned in data[1] of the CPL_FW6_PLD message.
104318429Sjhb *
105318429Sjhb * For block cipher replies, the updated IV is supplied in data[2] and
106318429Sjhb * data[3] of the CPL_FW6_PLD message.
107318429Sjhb *
108318429Sjhb * For hash replies where the work request set 'hash_size' to request
109318429Sjhb * a copy of the hash in the reply, the hash digest is supplied
110318429Sjhb * immediately following the CPL_FW6_PLD message.
111318429Sjhb */
112318429Sjhb
113318429Sjhb/*
114345040Sjhb * The crypto engine supports a maximum AAD size of 511 bytes.
115318429Sjhb */
116345040Sjhb#define	MAX_AAD_LEN		511
117345040Sjhb
118345040Sjhb/*
119345040Sjhb * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG
120345040Sjhb * entries.  While the CPL includes a 16-bit length field, the T6 can
121345040Sjhb * sometimes hang if an error occurs while processing a request with a
122345040Sjhb * single DSGL entry larger than 2k.
123345040Sjhb */
124318429Sjhb#define	MAX_RX_PHYS_DSGL_SGE	32
125345040Sjhb#define	DSGL_SGE_MAXLEN		2048
126318429Sjhb
127345040Sjhb/*
128345040Sjhb * The adapter only supports requests with a total input or output
129345040Sjhb * length of 64k-1 or smaller.  Longer requests either result in hung
130345040Sjhb * requests or incorrect results.
131345040Sjhb */
132345040Sjhb#define	MAX_REQUEST_SIZE	65535
133345040Sjhb
134318429Sjhbstatic MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto");
135318429Sjhb
136318429Sjhbstruct ccr_session_hmac {
137318429Sjhb	struct auth_hash *auth_hash;
138318429Sjhb	int hash_len;
139318429Sjhb	unsigned int partial_digest_len;
140318429Sjhb	unsigned int auth_mode;
141318429Sjhb	unsigned int mk_size;
142318429Sjhb	char ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
143318429Sjhb	char opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
144318429Sjhb};
145318429Sjhb
146318429Sjhbstruct ccr_session_gmac {
147318429Sjhb	int hash_len;
148318429Sjhb	char ghash_h[GMAC_BLOCK_LEN];
149318429Sjhb};
150318429Sjhb
151318429Sjhbstruct ccr_session_blkcipher {
152318429Sjhb	unsigned int cipher_mode;
153318429Sjhb	unsigned int key_len;
154318429Sjhb	unsigned int iv_len;
155318429Sjhb	__be32 key_ctx_hdr;
156318429Sjhb	char enckey[CHCR_AES_MAX_KEY_LEN];
157318429Sjhb	char deckey[CHCR_AES_MAX_KEY_LEN];
158318429Sjhb};
159318429Sjhb
160318429Sjhbstruct ccr_session {
161318429Sjhb	bool active;
162318429Sjhb	int pending;
163318429Sjhb	enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode;
164318429Sjhb	union {
165318429Sjhb		struct ccr_session_hmac hmac;
166318429Sjhb		struct ccr_session_gmac gmac;
167318429Sjhb	};
168318429Sjhb	struct ccr_session_blkcipher blkcipher;
169318429Sjhb};
170318429Sjhb
171318429Sjhbstruct ccr_softc {
172318429Sjhb	struct adapter *adapter;
173318429Sjhb	device_t dev;
174318429Sjhb	uint32_t cid;
175318429Sjhb	int tx_channel_id;
176318429Sjhb	struct ccr_session *sessions;
177318429Sjhb	int nsessions;
178318429Sjhb	struct mtx lock;
179318429Sjhb	bool detaching;
180318429Sjhb	struct sge_wrq *txq;
181318429Sjhb	struct sge_rxq *rxq;
182318429Sjhb
183318429Sjhb	/*
184318429Sjhb	 * Pre-allocate S/G lists used when preparing a work request.
185318429Sjhb	 * 'sg_crp' contains an sglist describing the entire buffer
186318429Sjhb	 * for a 'struct cryptop'.  'sg_ulptx' is used to describe
187318429Sjhb	 * the data the engine should DMA as input via ULPTX_SGL.
188318429Sjhb	 * 'sg_dsgl' is used to describe the destination that cipher
189318429Sjhb	 * text and a tag should be written to.
190318429Sjhb	 */
191318429Sjhb	struct sglist *sg_crp;
192318429Sjhb	struct sglist *sg_ulptx;
193318429Sjhb	struct sglist *sg_dsgl;
194318429Sjhb
195345040Sjhb	/*
196345040Sjhb	 * Pre-allocate a dummy output buffer for the IV and AAD for
197345040Sjhb	 * AEAD requests.
198345040Sjhb	 */
199345040Sjhb	char *iv_aad_buf;
200345040Sjhb	struct sglist *sg_iv_aad;
201345040Sjhb
202318429Sjhb	/* Statistics. */
203318429Sjhb	uint64_t stats_blkcipher_encrypt;
204318429Sjhb	uint64_t stats_blkcipher_decrypt;
205318429Sjhb	uint64_t stats_hmac;
206318429Sjhb	uint64_t stats_authenc_encrypt;
207318429Sjhb	uint64_t stats_authenc_decrypt;
208318429Sjhb	uint64_t stats_gcm_encrypt;
209318429Sjhb	uint64_t stats_gcm_decrypt;
210318429Sjhb	uint64_t stats_wr_nomem;
211318429Sjhb	uint64_t stats_inflight;
212318429Sjhb	uint64_t stats_mac_error;
213318429Sjhb	uint64_t stats_pad_error;
214318429Sjhb	uint64_t stats_bad_session;
215318429Sjhb	uint64_t stats_sglist_error;
216318429Sjhb	uint64_t stats_process_error;
217345040Sjhb	uint64_t stats_sw_fallback;
218318429Sjhb};
219318429Sjhb
220318429Sjhb/*
221318429Sjhb * Crypto requests involve two kind of scatter/gather lists.
222318429Sjhb *
223318429Sjhb * Non-hash-only requests require a PHYS_DSGL that describes the
224318429Sjhb * location to store the results of the encryption or decryption
225318429Sjhb * operation.  This SGL uses a different format (PHYS_DSGL) and should
226318429Sjhb * exclude the crd_skip bytes at the start of the data as well as
227318429Sjhb * any AAD or IV.  For authenticated encryption requests it should
228318429Sjhb * cover include the destination of the hash or tag.
229318429Sjhb *
230318429Sjhb * The input payload may either be supplied inline as immediate data,
231318429Sjhb * or via a standard ULP_TX SGL.  This SGL should include AAD,
232318429Sjhb * ciphertext, and the hash or tag for authenticated decryption
233318429Sjhb * requests.
234318429Sjhb *
235318429Sjhb * These scatter/gather lists can describe different subsets of the
236318429Sjhb * buffer described by the crypto operation.  ccr_populate_sglist()
237318429Sjhb * generates a scatter/gather list that covers the entire crypto
238318429Sjhb * operation buffer that is then used to construct the other
239318429Sjhb * scatter/gather lists.
240318429Sjhb */
241318429Sjhbstatic int
242318429Sjhbccr_populate_sglist(struct sglist *sg, struct cryptop *crp)
243318429Sjhb{
244318429Sjhb	int error;
245318429Sjhb
246318429Sjhb	sglist_reset(sg);
247318429Sjhb	if (crp->crp_flags & CRYPTO_F_IMBUF)
248318429Sjhb		error = sglist_append_mbuf(sg, (struct mbuf *)crp->crp_buf);
249318429Sjhb	else if (crp->crp_flags & CRYPTO_F_IOV)
250318429Sjhb		error = sglist_append_uio(sg, (struct uio *)crp->crp_buf);
251318429Sjhb	else
252318429Sjhb		error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
253318429Sjhb	return (error);
254318429Sjhb}
255318429Sjhb
256318429Sjhb/*
257318429Sjhb * Segments in 'sg' larger than 'maxsegsize' are counted as multiple
258318429Sjhb * segments.
259318429Sjhb */
260318429Sjhbstatic int
261318429Sjhbccr_count_sgl(struct sglist *sg, int maxsegsize)
262318429Sjhb{
263318429Sjhb	int i, nsegs;
264318429Sjhb
265318429Sjhb	nsegs = 0;
266318429Sjhb	for (i = 0; i < sg->sg_nseg; i++)
267318429Sjhb		nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize);
268318429Sjhb	return (nsegs);
269318429Sjhb}
270318429Sjhb
271318429Sjhb/* These functions deal with PHYS_DSGL for the reply buffer. */
272318429Sjhbstatic inline int
273318429Sjhbccr_phys_dsgl_len(int nsegs)
274318429Sjhb{
275318429Sjhb	int len;
276318429Sjhb
277318429Sjhb	len = (nsegs / 8) * sizeof(struct phys_sge_pairs);
278318429Sjhb	if ((nsegs % 8) != 0) {
279318429Sjhb		len += sizeof(uint16_t) * 8;
280318429Sjhb		len += roundup2(nsegs % 8, 2) * sizeof(uint64_t);
281318429Sjhb	}
282318429Sjhb	return (len);
283318429Sjhb}
284318429Sjhb
285318429Sjhbstatic void
286318429Sjhbccr_write_phys_dsgl(struct ccr_softc *sc, void *dst, int nsegs)
287318429Sjhb{
288318429Sjhb	struct sglist *sg;
289318429Sjhb	struct cpl_rx_phys_dsgl *cpl;
290318429Sjhb	struct phys_sge_pairs *sgl;
291318429Sjhb	vm_paddr_t paddr;
292318429Sjhb	size_t seglen;
293318429Sjhb	u_int i, j;
294318429Sjhb
295318429Sjhb	sg = sc->sg_dsgl;
296318429Sjhb	cpl = dst;
297318429Sjhb	cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) |
298318429Sjhb	    V_CPL_RX_PHYS_DSGL_ISRDMA(0));
299318429Sjhb	cpl->pcirlxorder_to_noofsgentr = htobe32(
300318429Sjhb	    V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) |
301318429Sjhb	    V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) |
302318429Sjhb	    V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) |
303318429Sjhb	    V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs));
304318429Sjhb	cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
305318429Sjhb	cpl->rss_hdr_int.qid = htobe16(sc->rxq->iq.abs_id);
306318429Sjhb	cpl->rss_hdr_int.hash_val = 0;
307318429Sjhb	sgl = (struct phys_sge_pairs *)(cpl + 1);
308318429Sjhb	j = 0;
309318429Sjhb	for (i = 0; i < sg->sg_nseg; i++) {
310318429Sjhb		seglen = sg->sg_segs[i].ss_len;
311318429Sjhb		paddr = sg->sg_segs[i].ss_paddr;
312318429Sjhb		do {
313318429Sjhb			sgl->addr[j] = htobe64(paddr);
314318429Sjhb			if (seglen > DSGL_SGE_MAXLEN) {
315318429Sjhb				sgl->len[j] = htobe16(DSGL_SGE_MAXLEN);
316318429Sjhb				paddr += DSGL_SGE_MAXLEN;
317318429Sjhb				seglen -= DSGL_SGE_MAXLEN;
318318429Sjhb			} else {
319318429Sjhb				sgl->len[j] = htobe16(seglen);
320318429Sjhb				seglen = 0;
321318429Sjhb			}
322318429Sjhb			j++;
323318429Sjhb			if (j == 8) {
324318429Sjhb				sgl++;
325318429Sjhb				j = 0;
326318429Sjhb			}
327318429Sjhb		} while (seglen != 0);
328318429Sjhb	}
329318429Sjhb	MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs);
330318429Sjhb}
331318429Sjhb
332318429Sjhb/* These functions deal with the ULPTX_SGL for input payload. */
333318429Sjhbstatic inline int
334318429Sjhbccr_ulptx_sgl_len(int nsegs)
335318429Sjhb{
336318429Sjhb	u_int n;
337318429Sjhb
338318429Sjhb	nsegs--; /* first segment is part of ulptx_sgl */
339318429Sjhb	n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
340318429Sjhb	return (roundup2(n, 16));
341318429Sjhb}
342318429Sjhb
343318429Sjhbstatic void
344318429Sjhbccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs)
345318429Sjhb{
346318429Sjhb	struct ulptx_sgl *usgl;
347318429Sjhb	struct sglist *sg;
348318429Sjhb	struct sglist_seg *ss;
349318429Sjhb	int i;
350318429Sjhb
351318429Sjhb	sg = sc->sg_ulptx;
352318429Sjhb	MPASS(nsegs == sg->sg_nseg);
353318429Sjhb	ss = &sg->sg_segs[0];
354318429Sjhb	usgl = dst;
355318429Sjhb	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
356318429Sjhb	    V_ULPTX_NSGE(nsegs));
357318429Sjhb	usgl->len0 = htobe32(ss->ss_len);
358318429Sjhb	usgl->addr0 = htobe64(ss->ss_paddr);
359318429Sjhb	ss++;
360318429Sjhb	for (i = 0; i < sg->sg_nseg - 1; i++) {
361318429Sjhb		usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len);
362318429Sjhb		usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr);
363318429Sjhb		ss++;
364318429Sjhb	}
365318429Sjhb
366318429Sjhb}
367318429Sjhb
368318429Sjhbstatic bool
369318429Sjhbccr_use_imm_data(u_int transhdr_len, u_int input_len)
370318429Sjhb{
371318429Sjhb
372318429Sjhb	if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN)
373318429Sjhb		return (false);
374318429Sjhb	if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) >
375318429Sjhb	    SGE_MAX_WR_LEN)
376318429Sjhb		return (false);
377318429Sjhb	return (true);
378318429Sjhb}
379318429Sjhb
380318429Sjhbstatic void
381318429Sjhbccr_populate_wreq(struct ccr_softc *sc, struct chcr_wr *crwr, u_int kctx_len,
382318429Sjhb    u_int wr_len, uint32_t sid, u_int imm_len, u_int sgl_len, u_int hash_size,
383345040Sjhb    struct cryptop *crp)
384318429Sjhb{
385318429Sjhb	u_int cctx_size;
386318429Sjhb
387318429Sjhb	cctx_size = sizeof(struct _key_ctx) + kctx_len;
388318429Sjhb	crwr->wreq.op_to_cctx_size = htobe32(
389318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) |
390318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) |
391318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) |
392318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) |
393318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4));
394318429Sjhb	crwr->wreq.len16_pkd = htobe32(
395318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16));
396318429Sjhb	crwr->wreq.session_id = htobe32(sid);
397318429Sjhb	crwr->wreq.rx_chid_to_rx_q_id = htobe32(
398318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(sc->tx_channel_id) |
399318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) |
400318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) |
401345040Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) |
402318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) |
403318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) |
404318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(sc->rxq->iq.abs_id));
405318429Sjhb	crwr->wreq.key_addr = 0;
406318429Sjhb	crwr->wreq.pld_size_hash_size = htobe32(
407318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) |
408318429Sjhb	    V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size));
409318429Sjhb	crwr->wreq.cookie = htobe64((uintptr_t)crp);
410318429Sjhb
411318429Sjhb	crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
412318429Sjhb	    V_ULP_TXPKT_DATAMODIFY(0) |
413318429Sjhb	    V_ULP_TXPKT_CHANNELID(sc->tx_channel_id) | V_ULP_TXPKT_DEST(0) |
414354098Sjhb	    V_ULP_TXPKT_FID(sc->rxq->iq.abs_id) | V_ULP_TXPKT_RO(1));
415318429Sjhb	crwr->ulptx.len = htobe32(
416318429Sjhb	    ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
417318429Sjhb
418318429Sjhb	crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
419318429Sjhb	    V_ULP_TX_SC_MORE(imm_len != 0 ? 0 : 1));
420318429Sjhb	crwr->sc_imm.len = htobe32(wr_len - offsetof(struct chcr_wr, sec_cpl) -
421318429Sjhb	    sgl_len);
422318429Sjhb}
423318429Sjhb
424318429Sjhbstatic int
425318429Sjhbccr_hmac(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
426318429Sjhb    struct cryptop *crp)
427318429Sjhb{
428318429Sjhb	struct chcr_wr *crwr;
429318429Sjhb	struct wrqe *wr;
430318429Sjhb	struct auth_hash *axf;
431318429Sjhb	struct cryptodesc *crd;
432318429Sjhb	char *dst;
433318429Sjhb	u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len;
434318429Sjhb	u_int imm_len, iopad_size;
435318429Sjhb	int error, sgl_nsegs, sgl_len;
436318429Sjhb
437345040Sjhb	crd = crp->crp_desc;
438345040Sjhb
439345040Sjhb	/* Reject requests with too large of an input buffer. */
440345040Sjhb	if (crd->crd_len > MAX_REQUEST_SIZE)
441345040Sjhb		return (EFBIG);
442345040Sjhb
443318429Sjhb	axf = s->hmac.auth_hash;
444318429Sjhb
445318429Sjhb	/* PADs must be 128-bit aligned. */
446318429Sjhb	iopad_size = roundup2(s->hmac.partial_digest_len, 16);
447318429Sjhb
448318429Sjhb	/*
449318429Sjhb	 * The 'key' part of the context includes the aligned IPAD and
450318429Sjhb	 * OPAD.
451318429Sjhb	 */
452318429Sjhb	kctx_len = iopad_size * 2;
453318429Sjhb	hash_size_in_response = axf->hashsize;
454318429Sjhb	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
455318429Sjhb
456345040Sjhb	if (crd->crd_len == 0) {
457345040Sjhb		imm_len = axf->blocksize;
458345040Sjhb		sgl_nsegs = 0;
459345040Sjhb		sgl_len = 0;
460345040Sjhb	} else if (ccr_use_imm_data(transhdr_len, crd->crd_len)) {
461318429Sjhb		imm_len = crd->crd_len;
462318429Sjhb		sgl_nsegs = 0;
463318429Sjhb		sgl_len = 0;
464318429Sjhb	} else {
465318429Sjhb		imm_len = 0;
466318429Sjhb		sglist_reset(sc->sg_ulptx);
467318429Sjhb		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
468318429Sjhb		    crd->crd_skip, crd->crd_len);
469318429Sjhb		if (error)
470318429Sjhb			return (error);
471318429Sjhb		sgl_nsegs = sc->sg_ulptx->sg_nseg;
472318429Sjhb		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
473318429Sjhb	}
474318429Sjhb
475318429Sjhb	wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
476345040Sjhb	if (wr_len > SGE_MAX_WR_LEN)
477345040Sjhb		return (EFBIG);
478318429Sjhb	wr = alloc_wrqe(wr_len, sc->txq);
479318429Sjhb	if (wr == NULL) {
480318429Sjhb		sc->stats_wr_nomem++;
481318429Sjhb		return (ENOMEM);
482318429Sjhb	}
483318429Sjhb	crwr = wrtod(wr);
484318429Sjhb	memset(crwr, 0, wr_len);
485318429Sjhb
486318429Sjhb	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len,
487345040Sjhb	    hash_size_in_response, crp);
488318429Sjhb
489318429Sjhb	/* XXX: Hardcodes SGE loopback channel of 0. */
490318429Sjhb	crwr->sec_cpl.op_ivinsrtofst = htobe32(
491318429Sjhb	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
492318429Sjhb	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
493318429Sjhb	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
494318429Sjhb	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
495318429Sjhb	    V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
496318429Sjhb
497345040Sjhb	crwr->sec_cpl.pldlen = htobe32(crd->crd_len == 0 ? axf->blocksize :
498345040Sjhb	    crd->crd_len);
499318429Sjhb
500318429Sjhb	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
501318429Sjhb	    V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0));
502318429Sjhb
503318429Sjhb	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
504318429Sjhb	crwr->sec_cpl.seqno_numivs = htobe32(
505318429Sjhb	    V_SCMD_SEQ_NO_CTRL(0) |
506318429Sjhb	    V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
507318429Sjhb	    V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_NOP) |
508318429Sjhb	    V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
509318429Sjhb	    V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NO_TRUNC));
510318429Sjhb	crwr->sec_cpl.ivgen_hdrlen = htobe32(
511345040Sjhb	    V_SCMD_LAST_FRAG(0) |
512345040Sjhb	    V_SCMD_MORE_FRAGS(crd->crd_len == 0 ? 1 : 0) | V_SCMD_MAC_ONLY(1));
513318429Sjhb
514318429Sjhb	memcpy(crwr->key_ctx.key, s->hmac.ipad, s->hmac.partial_digest_len);
515318429Sjhb	memcpy(crwr->key_ctx.key + iopad_size, s->hmac.opad,
516318429Sjhb	    s->hmac.partial_digest_len);
517318429Sjhb
518318429Sjhb	/* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */
519318429Sjhb	kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
520318429Sjhb	crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
521318429Sjhb	    V_KEY_CONTEXT_OPAD_PRESENT(1) | V_KEY_CONTEXT_SALT_PRESENT(1) |
522318429Sjhb	    V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) |
523318429Sjhb	    V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1));
524318429Sjhb
525318429Sjhb	dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES;
526345040Sjhb	if (crd->crd_len == 0) {
527345040Sjhb		dst[0] = 0x80;
528345040Sjhb		*(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) =
529345040Sjhb		    htobe64(axf->blocksize << 3);
530345040Sjhb	} else if (imm_len != 0)
531318429Sjhb		crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
532318429Sjhb		    crd->crd_len, dst);
533318429Sjhb	else
534318429Sjhb		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
535318429Sjhb
536318429Sjhb	/* XXX: TODO backpressure */
537318429Sjhb	t4_wrq_tx(sc->adapter, wr);
538318429Sjhb
539318429Sjhb	return (0);
540318429Sjhb}
541318429Sjhb
542318429Sjhbstatic int
543318429Sjhbccr_hmac_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
544318429Sjhb    const struct cpl_fw6_pld *cpl, int error)
545318429Sjhb{
546318429Sjhb	struct cryptodesc *crd;
547318429Sjhb
548318429Sjhb	crd = crp->crp_desc;
549318429Sjhb	if (error == 0) {
550318429Sjhb		crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
551318429Sjhb		    s->hmac.hash_len, (c_caddr_t)(cpl + 1));
552318429Sjhb	}
553318429Sjhb
554318429Sjhb	return (error);
555318429Sjhb}
556318429Sjhb
557318429Sjhbstatic int
558318429Sjhbccr_blkcipher(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
559318429Sjhb    struct cryptop *crp)
560318429Sjhb{
561318429Sjhb	char iv[CHCR_MAX_CRYPTO_IV_LEN];
562318429Sjhb	struct chcr_wr *crwr;
563318429Sjhb	struct wrqe *wr;
564318429Sjhb	struct cryptodesc *crd;
565318429Sjhb	char *dst;
566345040Sjhb	u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
567318429Sjhb	u_int imm_len;
568318429Sjhb	int dsgl_nsegs, dsgl_len;
569318429Sjhb	int sgl_nsegs, sgl_len;
570318429Sjhb	int error;
571318429Sjhb
572318429Sjhb	crd = crp->crp_desc;
573318429Sjhb
574345040Sjhb	if (s->blkcipher.key_len == 0 || crd->crd_len == 0)
575318429Sjhb		return (EINVAL);
576318429Sjhb	if (crd->crd_alg == CRYPTO_AES_CBC &&
577318429Sjhb	    (crd->crd_len % AES_BLOCK_LEN) != 0)
578318429Sjhb		return (EINVAL);
579318429Sjhb
580345040Sjhb	/* Reject requests with too large of an input buffer. */
581345040Sjhb	if (crd->crd_len > MAX_REQUEST_SIZE)
582345040Sjhb		return (EFBIG);
583345040Sjhb
584345040Sjhb	if (crd->crd_flags & CRD_F_ENCRYPT)
585318429Sjhb		op_type = CHCR_ENCRYPT_OP;
586345040Sjhb	else
587318429Sjhb		op_type = CHCR_DECRYPT_OP;
588345040Sjhb
589318429Sjhb	sglist_reset(sc->sg_dsgl);
590318429Sjhb	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crd->crd_skip,
591318429Sjhb	    crd->crd_len);
592318429Sjhb	if (error)
593318429Sjhb		return (error);
594318429Sjhb	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
595318429Sjhb	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
596318429Sjhb		return (EFBIG);
597318429Sjhb	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
598318429Sjhb
599318429Sjhb	/* The 'key' must be 128-bit aligned. */
600318429Sjhb	kctx_len = roundup2(s->blkcipher.key_len, 16);
601318429Sjhb	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
602318429Sjhb
603318429Sjhb	if (ccr_use_imm_data(transhdr_len, crd->crd_len +
604318429Sjhb	    s->blkcipher.iv_len)) {
605318429Sjhb		imm_len = crd->crd_len;
606318429Sjhb		sgl_nsegs = 0;
607318429Sjhb		sgl_len = 0;
608318429Sjhb	} else {
609318429Sjhb		imm_len = 0;
610318429Sjhb		sglist_reset(sc->sg_ulptx);
611318429Sjhb		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
612318429Sjhb		    crd->crd_skip, crd->crd_len);
613318429Sjhb		if (error)
614318429Sjhb			return (error);
615318429Sjhb		sgl_nsegs = sc->sg_ulptx->sg_nseg;
616318429Sjhb		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
617318429Sjhb	}
618318429Sjhb
619345040Sjhb	wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len +
620345040Sjhb	    roundup2(imm_len, 16) + sgl_len;
621345040Sjhb	if (wr_len > SGE_MAX_WR_LEN)
622345040Sjhb		return (EFBIG);
623318429Sjhb	wr = alloc_wrqe(wr_len, sc->txq);
624318429Sjhb	if (wr == NULL) {
625318429Sjhb		sc->stats_wr_nomem++;
626318429Sjhb		return (ENOMEM);
627318429Sjhb	}
628318429Sjhb	crwr = wrtod(wr);
629318429Sjhb	memset(crwr, 0, wr_len);
630318429Sjhb
631345040Sjhb	/*
632345040Sjhb	 * Read the existing IV from the request or generate a random
633345040Sjhb	 * one if none is provided.  Optionally copy the generated IV
634345040Sjhb	 * into the output buffer if requested.
635345040Sjhb	 */
636345040Sjhb	if (op_type == CHCR_ENCRYPT_OP) {
637345040Sjhb		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
638345040Sjhb			memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
639345040Sjhb		else
640345040Sjhb			arc4rand(iv, s->blkcipher.iv_len, 0);
641345040Sjhb		if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0)
642345040Sjhb			crypto_copyback(crp->crp_flags, crp->crp_buf,
643345040Sjhb			    crd->crd_inject, s->blkcipher.iv_len, iv);
644345040Sjhb	} else {
645345040Sjhb		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
646345040Sjhb			memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
647345040Sjhb		else
648345040Sjhb			crypto_copydata(crp->crp_flags, crp->crp_buf,
649345040Sjhb			    crd->crd_inject, s->blkcipher.iv_len, iv);
650345040Sjhb	}
651345040Sjhb
652318429Sjhb	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, 0,
653345040Sjhb	    crp);
654318429Sjhb
655318429Sjhb	/* XXX: Hardcodes SGE loopback channel of 0. */
656318429Sjhb	crwr->sec_cpl.op_ivinsrtofst = htobe32(
657318429Sjhb	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
658318429Sjhb	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
659318429Sjhb	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
660318429Sjhb	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
661318429Sjhb	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
662318429Sjhb
663318429Sjhb	crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + crd->crd_len);
664318429Sjhb
665318429Sjhb	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
666318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTART(s->blkcipher.iv_len + 1) |
667318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
668318429Sjhb	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
669318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0));
670318429Sjhb
671318429Sjhb	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
672318429Sjhb	crwr->sec_cpl.seqno_numivs = htobe32(
673318429Sjhb	    V_SCMD_SEQ_NO_CTRL(0) |
674318429Sjhb	    V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
675318429Sjhb	    V_SCMD_ENC_DEC_CTRL(op_type) |
676318429Sjhb	    V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
677318429Sjhb	    V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_NOP) |
678318429Sjhb	    V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NOP) |
679318429Sjhb	    V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
680318429Sjhb	    V_SCMD_NUM_IVS(0));
681318429Sjhb	crwr->sec_cpl.ivgen_hdrlen = htobe32(
682318429Sjhb	    V_SCMD_IV_GEN_CTRL(0) |
683318429Sjhb	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
684318429Sjhb	    V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
685318429Sjhb
686318429Sjhb	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
687318429Sjhb	switch (crd->crd_alg) {
688318429Sjhb	case CRYPTO_AES_CBC:
689318429Sjhb		if (crd->crd_flags & CRD_F_ENCRYPT)
690318429Sjhb			memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
691318429Sjhb			    s->blkcipher.key_len);
692318429Sjhb		else
693318429Sjhb			memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
694318429Sjhb			    s->blkcipher.key_len);
695318429Sjhb		break;
696318429Sjhb	case CRYPTO_AES_ICM:
697318429Sjhb		memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
698318429Sjhb		    s->blkcipher.key_len);
699318429Sjhb		break;
700318429Sjhb	case CRYPTO_AES_XTS:
701318429Sjhb		key_half = s->blkcipher.key_len / 2;
702318429Sjhb		memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
703318429Sjhb		    key_half);
704318429Sjhb		if (crd->crd_flags & CRD_F_ENCRYPT)
705318429Sjhb			memcpy(crwr->key_ctx.key + key_half,
706318429Sjhb			    s->blkcipher.enckey, key_half);
707318429Sjhb		else
708318429Sjhb			memcpy(crwr->key_ctx.key + key_half,
709318429Sjhb			    s->blkcipher.deckey, key_half);
710318429Sjhb		break;
711318429Sjhb	}
712318429Sjhb
713318429Sjhb	dst = (char *)(crwr + 1) + kctx_len;
714318429Sjhb	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
715318429Sjhb	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
716345040Sjhb	memcpy(dst, iv, s->blkcipher.iv_len);
717345040Sjhb	dst += s->blkcipher.iv_len;
718318429Sjhb	if (imm_len != 0)
719318429Sjhb		crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
720318429Sjhb		    crd->crd_len, dst);
721318429Sjhb	else
722318429Sjhb		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
723318429Sjhb
724318429Sjhb	/* XXX: TODO backpressure */
725318429Sjhb	t4_wrq_tx(sc->adapter, wr);
726318429Sjhb
727318429Sjhb	return (0);
728318429Sjhb}
729318429Sjhb
730318429Sjhbstatic int
731318429Sjhbccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s,
732318429Sjhb    struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
733318429Sjhb{
734318429Sjhb
735318429Sjhb	/*
736318429Sjhb	 * The updated IV to permit chained requests is at
737318429Sjhb	 * cpl->data[2], but OCF doesn't permit chained requests.
738318429Sjhb	 */
739318429Sjhb	return (error);
740318429Sjhb}
741318429Sjhb
742318429Sjhb/*
743318429Sjhb * 'hashsize' is the length of a full digest.  'authsize' is the
744318429Sjhb * requested digest length for this operation which may be less
745318429Sjhb * than 'hashsize'.
746318429Sjhb */
747318429Sjhbstatic int
748318429Sjhbccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize)
749318429Sjhb{
750318429Sjhb
751318429Sjhb	if (authsize == 10)
752318429Sjhb		return (CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366);
753318429Sjhb	if (authsize == 12)
754318429Sjhb		return (CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT);
755318429Sjhb	if (authsize == hashsize / 2)
756318429Sjhb		return (CHCR_SCMD_HMAC_CTRL_DIV2);
757318429Sjhb	return (CHCR_SCMD_HMAC_CTRL_NO_TRUNC);
758318429Sjhb}
759318429Sjhb
760318429Sjhbstatic int
761318429Sjhbccr_authenc(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
762318429Sjhb    struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde)
763318429Sjhb{
764318429Sjhb	char iv[CHCR_MAX_CRYPTO_IV_LEN];
765318429Sjhb	struct chcr_wr *crwr;
766318429Sjhb	struct wrqe *wr;
767318429Sjhb	struct auth_hash *axf;
768318429Sjhb	char *dst;
769345040Sjhb	u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
770318429Sjhb	u_int hash_size_in_response, imm_len, iopad_size;
771318429Sjhb	u_int aad_start, aad_len, aad_stop;
772318429Sjhb	u_int auth_start, auth_stop, auth_insert;
773318429Sjhb	u_int cipher_start, cipher_stop;
774318429Sjhb	u_int hmac_ctrl, input_len;
775318429Sjhb	int dsgl_nsegs, dsgl_len;
776318429Sjhb	int sgl_nsegs, sgl_len;
777318429Sjhb	int error;
778318429Sjhb
779345040Sjhb	/*
780345040Sjhb	 * If there is a need in the future, requests with an empty
781345040Sjhb	 * payload could be supported as HMAC-only requests.
782345040Sjhb	 */
783345040Sjhb	if (s->blkcipher.key_len == 0 || crde->crd_len == 0)
784318429Sjhb		return (EINVAL);
785318429Sjhb	if (crde->crd_alg == CRYPTO_AES_CBC &&
786318429Sjhb	    (crde->crd_len % AES_BLOCK_LEN) != 0)
787318429Sjhb		return (EINVAL);
788318429Sjhb
789318429Sjhb	/*
790345040Sjhb	 * Compute the length of the AAD (data covered by the
791345040Sjhb	 * authentication descriptor but not the encryption
792345040Sjhb	 * descriptor).  To simplify the logic, AAD is only permitted
793345040Sjhb	 * before the cipher/plain text, not after.  This is true of
794345040Sjhb	 * all currently-generated requests.
795318429Sjhb	 */
796318429Sjhb	if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
797318429Sjhb		return (EINVAL);
798345040Sjhb	if (crda->crd_skip < crde->crd_skip) {
799345040Sjhb		if (crda->crd_skip + crda->crd_len > crde->crd_skip)
800345040Sjhb			aad_len = (crde->crd_skip - crda->crd_skip);
801345040Sjhb		else
802345040Sjhb			aad_len = crda->crd_len;
803345040Sjhb	} else
804345040Sjhb		aad_len = 0;
805345040Sjhb	if (aad_len + s->blkcipher.iv_len > MAX_AAD_LEN)
806345040Sjhb		return (EINVAL);
807318429Sjhb
808318429Sjhb	axf = s->hmac.auth_hash;
809318429Sjhb	hash_size_in_response = s->hmac.hash_len;
810345040Sjhb	if (crde->crd_flags & CRD_F_ENCRYPT)
811318429Sjhb		op_type = CHCR_ENCRYPT_OP;
812345040Sjhb	else
813318429Sjhb		op_type = CHCR_DECRYPT_OP;
814318429Sjhb
815318429Sjhb	/*
816318429Sjhb	 * The output buffer consists of the cipher text followed by
817318429Sjhb	 * the hash when encrypting.  For decryption it only contains
818318429Sjhb	 * the plain text.
819345040Sjhb	 *
820345040Sjhb	 * Due to a firmware bug, the output buffer must include a
821345040Sjhb	 * dummy output buffer for the IV and AAD prior to the real
822345040Sjhb	 * output buffer.
823318429Sjhb	 */
824345040Sjhb	if (op_type == CHCR_ENCRYPT_OP) {
825345040Sjhb		if (s->blkcipher.iv_len + aad_len + crde->crd_len +
826345040Sjhb		    hash_size_in_response > MAX_REQUEST_SIZE)
827345040Sjhb			return (EFBIG);
828345040Sjhb	} else {
829345040Sjhb		if (s->blkcipher.iv_len + aad_len + crde->crd_len >
830345040Sjhb		    MAX_REQUEST_SIZE)
831345040Sjhb			return (EFBIG);
832345040Sjhb	}
833318429Sjhb	sglist_reset(sc->sg_dsgl);
834345040Sjhb	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0,
835345040Sjhb	    s->blkcipher.iv_len + aad_len);
836345040Sjhb	if (error)
837345040Sjhb		return (error);
838318429Sjhb	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
839318429Sjhb	    crde->crd_len);
840318429Sjhb	if (error)
841318429Sjhb		return (error);
842318429Sjhb	if (op_type == CHCR_ENCRYPT_OP) {
843318429Sjhb		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
844318429Sjhb		    crda->crd_inject, hash_size_in_response);
845318429Sjhb		if (error)
846318429Sjhb			return (error);
847318429Sjhb	}
848318429Sjhb	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
849318429Sjhb	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
850318429Sjhb		return (EFBIG);
851318429Sjhb	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
852318429Sjhb
853318429Sjhb	/* PADs must be 128-bit aligned. */
854318429Sjhb	iopad_size = roundup2(s->hmac.partial_digest_len, 16);
855318429Sjhb
856318429Sjhb	/*
857318429Sjhb	 * The 'key' part of the key context consists of the key followed
858318429Sjhb	 * by the IPAD and OPAD.
859318429Sjhb	 */
860318429Sjhb	kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2;
861318429Sjhb	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
862318429Sjhb
863318429Sjhb	/*
864318429Sjhb	 * The input buffer consists of the IV, any AAD, and then the
865318429Sjhb	 * cipher/plain text.  For decryption requests the hash is
866318429Sjhb	 * appended after the cipher text.
867345040Sjhb	 *
868345040Sjhb	 * The IV is always stored at the start of the input buffer
869345040Sjhb	 * even though it may be duplicated in the payload.  The
870345040Sjhb	 * crypto engine doesn't work properly if the IV offset points
871345040Sjhb	 * inside of the AAD region, so a second copy is always
872345040Sjhb	 * required.
873318429Sjhb	 */
874318429Sjhb	input_len = aad_len + crde->crd_len;
875345040Sjhb
876345040Sjhb	/*
877345040Sjhb	 * The firmware hangs if sent a request which is a
878345040Sjhb	 * bit smaller than MAX_REQUEST_SIZE.  In particular, the
879345040Sjhb	 * firmware appears to require 512 - 16 bytes of spare room
880345040Sjhb	 * along with the size of the hash even if the hash isn't
881345040Sjhb	 * included in the input buffer.
882345040Sjhb	 */
883345040Sjhb	if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) >
884345040Sjhb	    MAX_REQUEST_SIZE)
885345040Sjhb		return (EFBIG);
886318429Sjhb	if (op_type == CHCR_DECRYPT_OP)
887318429Sjhb		input_len += hash_size_in_response;
888318429Sjhb	if (ccr_use_imm_data(transhdr_len, s->blkcipher.iv_len + input_len)) {
889318429Sjhb		imm_len = input_len;
890318429Sjhb		sgl_nsegs = 0;
891318429Sjhb		sgl_len = 0;
892318429Sjhb	} else {
893318429Sjhb		imm_len = 0;
894318429Sjhb		sglist_reset(sc->sg_ulptx);
895318429Sjhb		if (aad_len != 0) {
896318429Sjhb			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
897318429Sjhb			    crda->crd_skip, aad_len);
898318429Sjhb			if (error)
899318429Sjhb				return (error);
900318429Sjhb		}
901318429Sjhb		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
902318429Sjhb		    crde->crd_skip, crde->crd_len);
903318429Sjhb		if (error)
904318429Sjhb			return (error);
905318429Sjhb		if (op_type == CHCR_DECRYPT_OP) {
906318429Sjhb			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
907318429Sjhb			    crda->crd_inject, hash_size_in_response);
908318429Sjhb			if (error)
909318429Sjhb				return (error);
910318429Sjhb		}
911318429Sjhb		sgl_nsegs = sc->sg_ulptx->sg_nseg;
912318429Sjhb		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
913318429Sjhb	}
914318429Sjhb
915318429Sjhb	/*
916318429Sjhb	 * Any auth-only data before the cipher region is marked as AAD.
917318429Sjhb	 * Auth-data that overlaps with the cipher region is placed in
918318429Sjhb	 * the auth section.
919318429Sjhb	 */
920318429Sjhb	if (aad_len != 0) {
921318429Sjhb		aad_start = s->blkcipher.iv_len + 1;
922318429Sjhb		aad_stop = aad_start + aad_len - 1;
923318429Sjhb	} else {
924318429Sjhb		aad_start = 0;
925318429Sjhb		aad_stop = 0;
926318429Sjhb	}
927318429Sjhb	cipher_start = s->blkcipher.iv_len + aad_len + 1;
928318429Sjhb	if (op_type == CHCR_DECRYPT_OP)
929318429Sjhb		cipher_stop = hash_size_in_response;
930318429Sjhb	else
931318429Sjhb		cipher_stop = 0;
932318429Sjhb	if (aad_len == crda->crd_len) {
933318429Sjhb		auth_start = 0;
934318429Sjhb		auth_stop = 0;
935318429Sjhb	} else {
936318429Sjhb		if (aad_len != 0)
937318429Sjhb			auth_start = cipher_start;
938318429Sjhb		else
939318429Sjhb			auth_start = s->blkcipher.iv_len + crda->crd_skip -
940318429Sjhb			    crde->crd_skip + 1;
941318429Sjhb		auth_stop = (crde->crd_skip + crde->crd_len) -
942318429Sjhb		    (crda->crd_skip + crda->crd_len) + cipher_stop;
943318429Sjhb	}
944318429Sjhb	if (op_type == CHCR_DECRYPT_OP)
945318429Sjhb		auth_insert = hash_size_in_response;
946318429Sjhb	else
947318429Sjhb		auth_insert = 0;
948318429Sjhb
949345040Sjhb	wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len +
950345040Sjhb	    roundup2(imm_len, 16) + sgl_len;
951345040Sjhb	if (wr_len > SGE_MAX_WR_LEN)
952345040Sjhb		return (EFBIG);
953318429Sjhb	wr = alloc_wrqe(wr_len, sc->txq);
954318429Sjhb	if (wr == NULL) {
955318429Sjhb		sc->stats_wr_nomem++;
956318429Sjhb		return (ENOMEM);
957318429Sjhb	}
958318429Sjhb	crwr = wrtod(wr);
959318429Sjhb	memset(crwr, 0, wr_len);
960318429Sjhb
961345040Sjhb	/*
962345040Sjhb	 * Read the existing IV from the request or generate a random
963345040Sjhb	 * one if none is provided.  Optionally copy the generated IV
964345040Sjhb	 * into the output buffer if requested.
965345040Sjhb	 */
966345040Sjhb	if (op_type == CHCR_ENCRYPT_OP) {
967345040Sjhb		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
968345040Sjhb			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
969345040Sjhb		else
970345040Sjhb			arc4rand(iv, s->blkcipher.iv_len, 0);
971345040Sjhb		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
972345040Sjhb			crypto_copyback(crp->crp_flags, crp->crp_buf,
973345040Sjhb			    crde->crd_inject, s->blkcipher.iv_len, iv);
974345040Sjhb	} else {
975345040Sjhb		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
976345040Sjhb			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
977345040Sjhb		else
978345040Sjhb			crypto_copydata(crp->crp_flags, crp->crp_buf,
979345040Sjhb			    crde->crd_inject, s->blkcipher.iv_len, iv);
980345040Sjhb	}
981345040Sjhb
982318429Sjhb	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len,
983345040Sjhb	    op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp);
984318429Sjhb
985318429Sjhb	/* XXX: Hardcodes SGE loopback channel of 0. */
986318429Sjhb	crwr->sec_cpl.op_ivinsrtofst = htobe32(
987318429Sjhb	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
988318429Sjhb	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
989318429Sjhb	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
990318429Sjhb	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
991318429Sjhb	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
992318429Sjhb
993318429Sjhb	crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + input_len);
994318429Sjhb
995318429Sjhb	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
996318429Sjhb	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
997318429Sjhb	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
998318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
999318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
1000318429Sjhb	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1001318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
1002318429Sjhb	    V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
1003318429Sjhb	    V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
1004318429Sjhb	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1005318429Sjhb
1006318429Sjhb	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1007318429Sjhb	hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response);
1008318429Sjhb	crwr->sec_cpl.seqno_numivs = htobe32(
1009318429Sjhb	    V_SCMD_SEQ_NO_CTRL(0) |
1010318429Sjhb	    V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
1011318429Sjhb	    V_SCMD_ENC_DEC_CTRL(op_type) |
1012318429Sjhb	    V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1013318429Sjhb	    V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
1014318429Sjhb	    V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
1015318429Sjhb	    V_SCMD_HMAC_CTRL(hmac_ctrl) |
1016318429Sjhb	    V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
1017318429Sjhb	    V_SCMD_NUM_IVS(0));
1018318429Sjhb	crwr->sec_cpl.ivgen_hdrlen = htobe32(
1019318429Sjhb	    V_SCMD_IV_GEN_CTRL(0) |
1020318429Sjhb	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1021345040Sjhb	    V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1022318429Sjhb
1023318429Sjhb	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1024318429Sjhb	switch (crde->crd_alg) {
1025318429Sjhb	case CRYPTO_AES_CBC:
1026318429Sjhb		if (crde->crd_flags & CRD_F_ENCRYPT)
1027318429Sjhb			memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1028318429Sjhb			    s->blkcipher.key_len);
1029318429Sjhb		else
1030318429Sjhb			memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
1031318429Sjhb			    s->blkcipher.key_len);
1032318429Sjhb		break;
1033318429Sjhb	case CRYPTO_AES_ICM:
1034318429Sjhb		memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
1035318429Sjhb		    s->blkcipher.key_len);
1036318429Sjhb		break;
1037318429Sjhb	case CRYPTO_AES_XTS:
1038318429Sjhb		key_half = s->blkcipher.key_len / 2;
1039318429Sjhb		memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
1040318429Sjhb		    key_half);
1041318429Sjhb		if (crde->crd_flags & CRD_F_ENCRYPT)
1042318429Sjhb			memcpy(crwr->key_ctx.key + key_half,
1043318429Sjhb			    s->blkcipher.enckey, key_half);
1044318429Sjhb		else
1045318429Sjhb			memcpy(crwr->key_ctx.key + key_half,
1046318429Sjhb			    s->blkcipher.deckey, key_half);
1047318429Sjhb		break;
1048318429Sjhb	}
1049318429Sjhb
1050318429Sjhb	dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1051318429Sjhb	memcpy(dst, s->hmac.ipad, s->hmac.partial_digest_len);
1052318429Sjhb	memcpy(dst + iopad_size, s->hmac.opad, s->hmac.partial_digest_len);
1053318429Sjhb
1054318429Sjhb	dst = (char *)(crwr + 1) + kctx_len;
1055318429Sjhb	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1056318429Sjhb	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1057345040Sjhb	memcpy(dst, iv, s->blkcipher.iv_len);
1058345040Sjhb	dst += s->blkcipher.iv_len;
1059318429Sjhb	if (imm_len != 0) {
1060318429Sjhb		if (aad_len != 0) {
1061318429Sjhb			crypto_copydata(crp->crp_flags, crp->crp_buf,
1062318429Sjhb			    crda->crd_skip, aad_len, dst);
1063318429Sjhb			dst += aad_len;
1064318429Sjhb		}
1065318429Sjhb		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1066318429Sjhb		    crde->crd_len, dst);
1067318429Sjhb		dst += crde->crd_len;
1068318429Sjhb		if (op_type == CHCR_DECRYPT_OP)
1069318429Sjhb			crypto_copydata(crp->crp_flags, crp->crp_buf,
1070318429Sjhb			    crda->crd_inject, hash_size_in_response, dst);
1071318429Sjhb	} else
1072318429Sjhb		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1073318429Sjhb
1074318429Sjhb	/* XXX: TODO backpressure */
1075318429Sjhb	t4_wrq_tx(sc->adapter, wr);
1076318429Sjhb
1077318429Sjhb	return (0);
1078318429Sjhb}
1079318429Sjhb
1080318429Sjhbstatic int
1081318429Sjhbccr_authenc_done(struct ccr_softc *sc, struct ccr_session *s,
1082318429Sjhb    struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1083318429Sjhb{
1084318429Sjhb	struct cryptodesc *crd;
1085318429Sjhb
1086318429Sjhb	/*
1087318429Sjhb	 * The updated IV to permit chained requests is at
1088318429Sjhb	 * cpl->data[2], but OCF doesn't permit chained requests.
1089318429Sjhb	 *
1090318429Sjhb	 * For a decryption request, the hardware may do a verification
1091318429Sjhb	 * of the HMAC which will fail if the existing HMAC isn't in the
1092318429Sjhb	 * buffer.  If that happens, clear the error and copy the HMAC
1093318429Sjhb	 * from the CPL reply into the buffer.
1094318429Sjhb	 *
1095318429Sjhb	 * For encryption requests, crd should be the cipher request
1096318429Sjhb	 * which will have CRD_F_ENCRYPT set.  For decryption
1097318429Sjhb	 * requests, crp_desc will be the HMAC request which should
1098318429Sjhb	 * not have this flag set.
1099318429Sjhb	 */
1100318429Sjhb	crd = crp->crp_desc;
1101318429Sjhb	if (error == EBADMSG && !CHK_PAD_ERR_BIT(be64toh(cpl->data[0])) &&
1102318429Sjhb	    !(crd->crd_flags & CRD_F_ENCRYPT)) {
1103318429Sjhb		crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
1104318429Sjhb		    s->hmac.hash_len, (c_caddr_t)(cpl + 1));
1105318429Sjhb		error = 0;
1106318429Sjhb	}
1107318429Sjhb	return (error);
1108318429Sjhb}
1109318429Sjhb
1110318429Sjhbstatic int
1111318429Sjhbccr_gcm(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s,
1112318429Sjhb    struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde)
1113318429Sjhb{
1114318429Sjhb	char iv[CHCR_MAX_CRYPTO_IV_LEN];
1115318429Sjhb	struct chcr_wr *crwr;
1116318429Sjhb	struct wrqe *wr;
1117318429Sjhb	char *dst;
1118345040Sjhb	u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1119318429Sjhb	u_int hash_size_in_response, imm_len;
1120318429Sjhb	u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1121318429Sjhb	u_int hmac_ctrl, input_len;
1122318429Sjhb	int dsgl_nsegs, dsgl_len;
1123318429Sjhb	int sgl_nsegs, sgl_len;
1124318429Sjhb	int error;
1125318429Sjhb
1126318429Sjhb	if (s->blkcipher.key_len == 0)
1127318429Sjhb		return (EINVAL);
1128318429Sjhb
1129318429Sjhb	/*
1130345040Sjhb	 * The crypto engine doesn't handle GCM requests with an empty
1131345040Sjhb	 * payload, so handle those in software instead.
1132345040Sjhb	 */
1133345040Sjhb	if (crde->crd_len == 0)
1134345040Sjhb		return (EMSGSIZE);
1135345040Sjhb
1136345040Sjhb	/*
1137318429Sjhb	 * AAD is only permitted before the cipher/plain text, not
1138318429Sjhb	 * after.
1139318429Sjhb	 */
1140318429Sjhb	if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
1141345040Sjhb		return (EMSGSIZE);
1142318429Sjhb
1143345040Sjhb	if (crda->crd_len + AES_BLOCK_LEN > MAX_AAD_LEN)
1144345040Sjhb		return (EMSGSIZE);
1145345040Sjhb
1146318429Sjhb	hash_size_in_response = s->gmac.hash_len;
1147345040Sjhb	if (crde->crd_flags & CRD_F_ENCRYPT)
1148318429Sjhb		op_type = CHCR_ENCRYPT_OP;
1149345040Sjhb	else
1150318429Sjhb		op_type = CHCR_DECRYPT_OP;
1151318429Sjhb
1152318429Sjhb	/*
1153345040Sjhb	 * The IV handling for GCM in OCF is a bit more complicated in
1154345040Sjhb	 * that IPSec provides a full 16-byte IV (including the
1155345040Sjhb	 * counter), whereas the /dev/crypto interface sometimes
1156345040Sjhb	 * provides a full 16-byte IV (if no IV is provided in the
1157345040Sjhb	 * ioctl) and sometimes a 12-byte IV (if the IV was explicit).
1158345040Sjhb	 *
1159345040Sjhb	 * When provided a 12-byte IV, assume the IV is really 16 bytes
1160345040Sjhb	 * with a counter in the last 4 bytes initialized to 1.
1161345040Sjhb	 *
1162345040Sjhb	 * While iv_len is checked below, the value is currently
1163345040Sjhb	 * always set to 12 when creating a GCM session in this driver
1164345040Sjhb	 * due to limitations in OCF (there is no way to know what the
1165345040Sjhb	 * IV length of a given request will be).  This means that the
1166345040Sjhb	 * driver always assumes as 12-byte IV for now.
1167318429Sjhb	 */
1168345040Sjhb	if (s->blkcipher.iv_len == 12)
1169318429Sjhb		iv_len = AES_BLOCK_LEN;
1170345040Sjhb	else
1171318429Sjhb		iv_len = s->blkcipher.iv_len;
1172318429Sjhb
1173318429Sjhb	/*
1174318429Sjhb	 * The output buffer consists of the cipher text followed by
1175318429Sjhb	 * the tag when encrypting.  For decryption it only contains
1176318429Sjhb	 * the plain text.
1177345040Sjhb	 *
1178345040Sjhb	 * Due to a firmware bug, the output buffer must include a
1179345040Sjhb	 * dummy output buffer for the IV and AAD prior to the real
1180345040Sjhb	 * output buffer.
1181318429Sjhb	 */
1182345040Sjhb	if (op_type == CHCR_ENCRYPT_OP) {
1183345040Sjhb		if (iv_len + crda->crd_len + crde->crd_len +
1184345040Sjhb		    hash_size_in_response > MAX_REQUEST_SIZE)
1185345040Sjhb			return (EFBIG);
1186345040Sjhb	} else {
1187345040Sjhb		if (iv_len + crda->crd_len + crde->crd_len > MAX_REQUEST_SIZE)
1188345040Sjhb			return (EFBIG);
1189345040Sjhb	}
1190318429Sjhb	sglist_reset(sc->sg_dsgl);
1191345040Sjhb	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1192345040Sjhb	    crda->crd_len);
1193345040Sjhb	if (error)
1194345040Sjhb		return (error);
1195318429Sjhb	error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
1196318429Sjhb	    crde->crd_len);
1197318429Sjhb	if (error)
1198318429Sjhb		return (error);
1199318429Sjhb	if (op_type == CHCR_ENCRYPT_OP) {
1200318429Sjhb		error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
1201318429Sjhb		    crda->crd_inject, hash_size_in_response);
1202318429Sjhb		if (error)
1203318429Sjhb			return (error);
1204318429Sjhb	}
1205318429Sjhb	dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
1206318429Sjhb	if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1207318429Sjhb		return (EFBIG);
1208318429Sjhb	dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1209318429Sjhb
1210318429Sjhb	/*
1211318429Sjhb	 * The 'key' part of the key context consists of the key followed
1212318429Sjhb	 * by the Galois hash key.
1213318429Sjhb	 */
1214318429Sjhb	kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN;
1215318429Sjhb	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1216318429Sjhb
1217318429Sjhb	/*
1218318429Sjhb	 * The input buffer consists of the IV, any AAD, and then the
1219318429Sjhb	 * cipher/plain text.  For decryption requests the hash is
1220318429Sjhb	 * appended after the cipher text.
1221345040Sjhb	 *
1222345040Sjhb	 * The IV is always stored at the start of the input buffer
1223345040Sjhb	 * even though it may be duplicated in the payload.  The
1224345040Sjhb	 * crypto engine doesn't work properly if the IV offset points
1225345040Sjhb	 * inside of the AAD region, so a second copy is always
1226345040Sjhb	 * required.
1227318429Sjhb	 */
1228318429Sjhb	input_len = crda->crd_len + crde->crd_len;
1229318429Sjhb	if (op_type == CHCR_DECRYPT_OP)
1230318429Sjhb		input_len += hash_size_in_response;
1231345040Sjhb	if (input_len > MAX_REQUEST_SIZE)
1232345040Sjhb		return (EFBIG);
1233318429Sjhb	if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1234318429Sjhb		imm_len = input_len;
1235318429Sjhb		sgl_nsegs = 0;
1236318429Sjhb		sgl_len = 0;
1237318429Sjhb	} else {
1238318429Sjhb		imm_len = 0;
1239318429Sjhb		sglist_reset(sc->sg_ulptx);
1240318429Sjhb		if (crda->crd_len != 0) {
1241318429Sjhb			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1242318429Sjhb			    crda->crd_skip, crda->crd_len);
1243318429Sjhb			if (error)
1244318429Sjhb				return (error);
1245318429Sjhb		}
1246318429Sjhb		error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1247318429Sjhb		    crde->crd_skip, crde->crd_len);
1248318429Sjhb		if (error)
1249318429Sjhb			return (error);
1250318429Sjhb		if (op_type == CHCR_DECRYPT_OP) {
1251318429Sjhb			error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
1252318429Sjhb			    crda->crd_inject, hash_size_in_response);
1253318429Sjhb			if (error)
1254318429Sjhb				return (error);
1255318429Sjhb		}
1256318429Sjhb		sgl_nsegs = sc->sg_ulptx->sg_nseg;
1257318429Sjhb		sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1258318429Sjhb	}
1259318429Sjhb
1260318429Sjhb	if (crda->crd_len != 0) {
1261318429Sjhb		aad_start = iv_len + 1;
1262318429Sjhb		aad_stop = aad_start + crda->crd_len - 1;
1263318429Sjhb	} else {
1264318429Sjhb		aad_start = 0;
1265318429Sjhb		aad_stop = 0;
1266318429Sjhb	}
1267318429Sjhb	cipher_start = iv_len + crda->crd_len + 1;
1268318429Sjhb	if (op_type == CHCR_DECRYPT_OP)
1269318429Sjhb		cipher_stop = hash_size_in_response;
1270318429Sjhb	else
1271318429Sjhb		cipher_stop = 0;
1272318429Sjhb	if (op_type == CHCR_DECRYPT_OP)
1273318429Sjhb		auth_insert = hash_size_in_response;
1274318429Sjhb	else
1275318429Sjhb		auth_insert = 0;
1276318429Sjhb
1277345040Sjhb	wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1278345040Sjhb	    sgl_len;
1279345040Sjhb	if (wr_len > SGE_MAX_WR_LEN)
1280345040Sjhb		return (EFBIG);
1281318429Sjhb	wr = alloc_wrqe(wr_len, sc->txq);
1282318429Sjhb	if (wr == NULL) {
1283318429Sjhb		sc->stats_wr_nomem++;
1284318429Sjhb		return (ENOMEM);
1285318429Sjhb	}
1286318429Sjhb	crwr = wrtod(wr);
1287318429Sjhb	memset(crwr, 0, wr_len);
1288318429Sjhb
1289345040Sjhb	/*
1290345040Sjhb	 * Read the existing IV from the request or generate a random
1291345040Sjhb	 * one if none is provided.  Optionally copy the generated IV
1292345040Sjhb	 * into the output buffer if requested.
1293345040Sjhb	 *
1294345040Sjhb	 * If the input IV is 12 bytes, append an explicit 4-byte
1295345040Sjhb	 * counter of 1.
1296345040Sjhb	 */
1297345040Sjhb	if (op_type == CHCR_ENCRYPT_OP) {
1298345040Sjhb		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1299345040Sjhb			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1300345040Sjhb		else
1301345040Sjhb			arc4rand(iv, s->blkcipher.iv_len, 0);
1302345040Sjhb		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1303345040Sjhb			crypto_copyback(crp->crp_flags, crp->crp_buf,
1304345040Sjhb			    crde->crd_inject, s->blkcipher.iv_len, iv);
1305345040Sjhb	} else {
1306345040Sjhb		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1307345040Sjhb			memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
1308345040Sjhb		else
1309345040Sjhb			crypto_copydata(crp->crp_flags, crp->crp_buf,
1310345040Sjhb			    crde->crd_inject, s->blkcipher.iv_len, iv);
1311345040Sjhb	}
1312345040Sjhb	if (s->blkcipher.iv_len == 12)
1313345040Sjhb		*(uint32_t *)&iv[12] = htobe32(1);
1314345040Sjhb
1315318429Sjhb	ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len,
1316345040Sjhb	    0, crp);
1317318429Sjhb
1318318429Sjhb	/* XXX: Hardcodes SGE loopback channel of 0. */
1319318429Sjhb	crwr->sec_cpl.op_ivinsrtofst = htobe32(
1320318429Sjhb	    V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1321318429Sjhb	    V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) |
1322318429Sjhb	    V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1323318429Sjhb	    V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1324318429Sjhb	    V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1325318429Sjhb
1326318429Sjhb	crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1327318429Sjhb
1328318429Sjhb	/*
1329318429Sjhb	 * NB: cipherstop is explicitly set to 0.  On encrypt it
1330318429Sjhb	 * should normally be set to 0 anyway (as the encrypt crd ends
1331318429Sjhb	 * at the end of the input).  However, for decrypt the cipher
1332318429Sjhb	 * ends before the tag in the AUTHENC case (and authstop is
1333318429Sjhb	 * set to stop before the tag), but for GCM the cipher still
1334318429Sjhb	 * runs to the end of the buffer.  Not sure if this is
1335318429Sjhb	 * intentional or a firmware quirk, but it is required for
1336318429Sjhb	 * working tag validation with GCM decryption.
1337318429Sjhb	 */
1338318429Sjhb	crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1339318429Sjhb	    V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1340318429Sjhb	    V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1341318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1342318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1343318429Sjhb	crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1344318429Sjhb	    V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1345318429Sjhb	    V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1346318429Sjhb	    V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1347318429Sjhb	    V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1348318429Sjhb
1349318429Sjhb	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1350318429Sjhb	hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response);
1351318429Sjhb	crwr->sec_cpl.seqno_numivs = htobe32(
1352318429Sjhb	    V_SCMD_SEQ_NO_CTRL(0) |
1353318429Sjhb	    V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) |
1354318429Sjhb	    V_SCMD_ENC_DEC_CTRL(op_type) |
1355318429Sjhb	    V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1356318429Sjhb	    V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_AES_GCM) |
1357318429Sjhb	    V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_GHASH) |
1358318429Sjhb	    V_SCMD_HMAC_CTRL(hmac_ctrl) |
1359318429Sjhb	    V_SCMD_IV_SIZE(iv_len / 2) |
1360318429Sjhb	    V_SCMD_NUM_IVS(0));
1361318429Sjhb	crwr->sec_cpl.ivgen_hdrlen = htobe32(
1362318429Sjhb	    V_SCMD_IV_GEN_CTRL(0) |
1363318429Sjhb	    V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1364345040Sjhb	    V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1365318429Sjhb
1366318429Sjhb	crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
1367318429Sjhb	memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len);
1368318429Sjhb	dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16);
1369318429Sjhb	memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
1370318429Sjhb
1371318429Sjhb	dst = (char *)(crwr + 1) + kctx_len;
1372318429Sjhb	ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
1373318429Sjhb	dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1374345040Sjhb	memcpy(dst, iv, iv_len);
1375345040Sjhb	dst += iv_len;
1376318429Sjhb	if (imm_len != 0) {
1377318429Sjhb		if (crda->crd_len != 0) {
1378318429Sjhb			crypto_copydata(crp->crp_flags, crp->crp_buf,
1379318429Sjhb			    crda->crd_skip, crda->crd_len, dst);
1380318429Sjhb			dst += crda->crd_len;
1381318429Sjhb		}
1382318429Sjhb		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
1383318429Sjhb		    crde->crd_len, dst);
1384318429Sjhb		dst += crde->crd_len;
1385318429Sjhb		if (op_type == CHCR_DECRYPT_OP)
1386318429Sjhb			crypto_copydata(crp->crp_flags, crp->crp_buf,
1387318429Sjhb			    crda->crd_inject, hash_size_in_response, dst);
1388318429Sjhb	} else
1389318429Sjhb		ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
1390318429Sjhb
1391318429Sjhb	/* XXX: TODO backpressure */
1392318429Sjhb	t4_wrq_tx(sc->adapter, wr);
1393318429Sjhb
1394318429Sjhb	return (0);
1395318429Sjhb}
1396318429Sjhb
1397318429Sjhbstatic int
1398318429Sjhbccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s,
1399318429Sjhb    struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1400318429Sjhb{
1401318429Sjhb
1402318429Sjhb	/*
1403318429Sjhb	 * The updated IV to permit chained requests is at
1404318429Sjhb	 * cpl->data[2], but OCF doesn't permit chained requests.
1405318429Sjhb	 *
1406318429Sjhb	 * Note that the hardware should always verify the GMAC hash.
1407318429Sjhb	 */
1408318429Sjhb	return (error);
1409318429Sjhb}
1410318429Sjhb
1411345040Sjhb/*
1412345040Sjhb * Handle a GCM request that is not supported by the crypto engine by
1413345040Sjhb * performing the operation in software.  Derived from swcr_authenc().
1414345040Sjhb */
1415318429Sjhbstatic void
1416345040Sjhbccr_gcm_soft(struct ccr_session *s, struct cryptop *crp,
1417345040Sjhb    struct cryptodesc *crda, struct cryptodesc *crde)
1418345040Sjhb{
1419345040Sjhb	struct auth_hash *axf;
1420345040Sjhb	struct enc_xform *exf;
1421345040Sjhb	void *auth_ctx;
1422345040Sjhb	uint8_t *kschedule;
1423345040Sjhb	char block[GMAC_BLOCK_LEN];
1424345040Sjhb	char digest[GMAC_DIGEST_LEN];
1425345040Sjhb	char iv[AES_BLOCK_LEN];
1426345040Sjhb	int error, i, len;
1427345040Sjhb
1428345040Sjhb	auth_ctx = NULL;
1429345040Sjhb	kschedule = NULL;
1430345040Sjhb
1431345040Sjhb	/* Initialize the MAC. */
1432345040Sjhb	switch (s->blkcipher.key_len) {
1433345040Sjhb	case 16:
1434345040Sjhb		axf = &auth_hash_nist_gmac_aes_128;
1435345040Sjhb		break;
1436345040Sjhb	case 24:
1437345040Sjhb		axf = &auth_hash_nist_gmac_aes_192;
1438345040Sjhb		break;
1439345040Sjhb	case 32:
1440345040Sjhb		axf = &auth_hash_nist_gmac_aes_256;
1441345040Sjhb		break;
1442345040Sjhb	default:
1443345040Sjhb		error = EINVAL;
1444345040Sjhb		goto out;
1445345040Sjhb	}
1446345040Sjhb	auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT);
1447345040Sjhb	if (auth_ctx == NULL) {
1448345040Sjhb		error = ENOMEM;
1449345040Sjhb		goto out;
1450345040Sjhb	}
1451345040Sjhb	axf->Init(auth_ctx);
1452345040Sjhb	axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len);
1453345040Sjhb
1454345040Sjhb	/* Initialize the cipher. */
1455345040Sjhb	exf = &enc_xform_aes_nist_gcm;
1456345040Sjhb	error = exf->setkey(&kschedule, s->blkcipher.enckey,
1457345040Sjhb	    s->blkcipher.key_len);
1458345040Sjhb	if (error)
1459345040Sjhb		goto out;
1460345040Sjhb
1461345040Sjhb	/*
1462345040Sjhb	 * This assumes a 12-byte IV from the crp.  See longer comment
1463345040Sjhb	 * above in ccr_gcm() for more details.
1464345040Sjhb	 */
1465345040Sjhb	if (crde->crd_flags & CRD_F_ENCRYPT) {
1466345040Sjhb		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1467345040Sjhb			memcpy(iv, crde->crd_iv, 12);
1468345040Sjhb		else
1469345040Sjhb			arc4rand(iv, 12, 0);
1470345040Sjhb		if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
1471345040Sjhb			crypto_copyback(crp->crp_flags, crp->crp_buf,
1472345040Sjhb			    crde->crd_inject, 12, iv);
1473345040Sjhb	} else {
1474345040Sjhb		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
1475345040Sjhb			memcpy(iv, crde->crd_iv, 12);
1476345040Sjhb		else
1477345040Sjhb			crypto_copydata(crp->crp_flags, crp->crp_buf,
1478345040Sjhb			    crde->crd_inject, 12, iv);
1479345040Sjhb	}
1480345040Sjhb	*(uint32_t *)&iv[12] = htobe32(1);
1481345040Sjhb
1482345040Sjhb	axf->Reinit(auth_ctx, iv, sizeof(iv));
1483345040Sjhb
1484345040Sjhb	/* MAC the AAD. */
1485345040Sjhb	for (i = 0; i < crda->crd_len; i += sizeof(block)) {
1486345040Sjhb		len = imin(crda->crd_len - i, sizeof(block));
1487345040Sjhb		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
1488345040Sjhb		    i, len, block);
1489345040Sjhb		bzero(block + len, sizeof(block) - len);
1490345040Sjhb		axf->Update(auth_ctx, block, sizeof(block));
1491345040Sjhb	}
1492345040Sjhb
1493345040Sjhb	exf->reinit(kschedule, iv);
1494345040Sjhb
1495345040Sjhb	/* Do encryption with MAC */
1496345040Sjhb	for (i = 0; i < crde->crd_len; i += sizeof(block)) {
1497345040Sjhb		len = imin(crde->crd_len - i, sizeof(block));
1498345040Sjhb		crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip +
1499345040Sjhb		    i, len, block);
1500345040Sjhb		bzero(block + len, sizeof(block) - len);
1501345040Sjhb		if (crde->crd_flags & CRD_F_ENCRYPT) {
1502345040Sjhb			exf->encrypt(kschedule, block);
1503345040Sjhb			axf->Update(auth_ctx, block, len);
1504345040Sjhb			crypto_copyback(crp->crp_flags, crp->crp_buf,
1505345040Sjhb			    crde->crd_skip + i, len, block);
1506345040Sjhb		} else {
1507345040Sjhb			axf->Update(auth_ctx, block, len);
1508345040Sjhb		}
1509345040Sjhb	}
1510345040Sjhb
1511345040Sjhb	/* Length block. */
1512345040Sjhb	bzero(block, sizeof(block));
1513345040Sjhb	((uint32_t *)block)[1] = htobe32(crda->crd_len * 8);
1514345040Sjhb	((uint32_t *)block)[3] = htobe32(crde->crd_len * 8);
1515345040Sjhb	axf->Update(auth_ctx, block, sizeof(block));
1516345040Sjhb
1517345040Sjhb	/* Finalize MAC. */
1518345040Sjhb	axf->Final(digest, auth_ctx);
1519345040Sjhb
1520345040Sjhb	/* Inject or validate tag. */
1521345040Sjhb	if (crde->crd_flags & CRD_F_ENCRYPT) {
1522345040Sjhb		crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
1523345040Sjhb		    sizeof(digest), digest);
1524345040Sjhb		error = 0;
1525345040Sjhb	} else {
1526345040Sjhb		char digest2[GMAC_DIGEST_LEN];
1527345040Sjhb
1528345040Sjhb		crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
1529345040Sjhb		    sizeof(digest2), digest2);
1530345040Sjhb		if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) {
1531345040Sjhb			error = 0;
1532345040Sjhb
1533345040Sjhb			/* Tag matches, decrypt data. */
1534345040Sjhb			for (i = 0; i < crde->crd_len; i += sizeof(block)) {
1535345040Sjhb				len = imin(crde->crd_len - i, sizeof(block));
1536345040Sjhb				crypto_copydata(crp->crp_flags, crp->crp_buf,
1537345040Sjhb				    crde->crd_skip + i, len, block);
1538345040Sjhb				bzero(block + len, sizeof(block) - len);
1539345040Sjhb				exf->decrypt(kschedule, block);
1540345040Sjhb				crypto_copyback(crp->crp_flags, crp->crp_buf,
1541345040Sjhb				    crde->crd_skip + i, len, block);
1542345040Sjhb			}
1543345040Sjhb		} else
1544345040Sjhb			error = EBADMSG;
1545345040Sjhb	}
1546345040Sjhb
1547345040Sjhb	exf->zerokey(&kschedule);
1548345040Sjhbout:
1549345040Sjhb	if (auth_ctx != NULL) {
1550345040Sjhb		memset(auth_ctx, 0, axf->ctxsize);
1551345040Sjhb		free(auth_ctx, M_CCR);
1552345040Sjhb	}
1553345040Sjhb	crp->crp_etype = error;
1554345040Sjhb	crypto_done(crp);
1555345040Sjhb}
1556345040Sjhb
1557345040Sjhbstatic void
1558318429Sjhbccr_identify(driver_t *driver, device_t parent)
1559318429Sjhb{
1560318429Sjhb	struct adapter *sc;
1561318429Sjhb
1562318429Sjhb	sc = device_get_softc(parent);
1563318429Sjhb	if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE &&
1564318429Sjhb	    device_find_child(parent, "ccr", -1) == NULL)
1565318429Sjhb		device_add_child(parent, "ccr", -1);
1566318429Sjhb}
1567318429Sjhb
1568318429Sjhbstatic int
1569318429Sjhbccr_probe(device_t dev)
1570318429Sjhb{
1571318429Sjhb
1572318429Sjhb	device_set_desc(dev, "Chelsio Crypto Accelerator");
1573318429Sjhb	return (BUS_PROBE_DEFAULT);
1574318429Sjhb}
1575318429Sjhb
1576318429Sjhbstatic void
1577318429Sjhbccr_sysctls(struct ccr_softc *sc)
1578318429Sjhb{
1579318429Sjhb	struct sysctl_ctx_list *ctx;
1580318429Sjhb	struct sysctl_oid *oid;
1581318429Sjhb	struct sysctl_oid_list *children;
1582318429Sjhb
1583318429Sjhb	ctx = device_get_sysctl_ctx(sc->dev);
1584318429Sjhb
1585318429Sjhb	/*
1586318429Sjhb	 * dev.ccr.X.
1587318429Sjhb	 */
1588318429Sjhb	oid = device_get_sysctl_tree(sc->dev);
1589318429Sjhb	children = SYSCTL_CHILDREN(oid);
1590318429Sjhb
1591318429Sjhb	/*
1592318429Sjhb	 * dev.ccr.X.stats.
1593318429Sjhb	 */
1594318429Sjhb	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
1595318429Sjhb	    NULL, "statistics");
1596318429Sjhb	children = SYSCTL_CHILDREN(oid);
1597318429Sjhb
1598318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
1599318429Sjhb	    &sc->stats_hmac, 0, "HMAC requests submitted");
1600318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD,
1601318429Sjhb	    &sc->stats_blkcipher_encrypt, 0,
1602318429Sjhb	    "Cipher encryption requests submitted");
1603318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD,
1604318429Sjhb	    &sc->stats_blkcipher_decrypt, 0,
1605318429Sjhb	    "Cipher decryption requests submitted");
1606318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_encrypt", CTLFLAG_RD,
1607318429Sjhb	    &sc->stats_authenc_encrypt, 0,
1608318429Sjhb	    "Combined AES+HMAC encryption requests submitted");
1609318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_decrypt", CTLFLAG_RD,
1610318429Sjhb	    &sc->stats_authenc_decrypt, 0,
1611318429Sjhb	    "Combined AES+HMAC decryption requests submitted");
1612318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD,
1613318429Sjhb	    &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted");
1614318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD,
1615318429Sjhb	    &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted");
1616318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
1617318429Sjhb	    &sc->stats_wr_nomem, 0, "Work request memory allocation failures");
1618318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
1619318429Sjhb	    &sc->stats_inflight, 0, "Requests currently pending");
1620318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
1621318429Sjhb	    &sc->stats_mac_error, 0, "MAC errors");
1622318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
1623318429Sjhb	    &sc->stats_pad_error, 0, "Padding errors");
1624318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD,
1625345040Sjhb	    &sc->stats_bad_session, 0, "Requests with invalid session ID");
1626318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD,
1627345040Sjhb	    &sc->stats_sglist_error, 0,
1628345040Sjhb	    "Requests for which DMA mapping failed");
1629318429Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD,
1630345040Sjhb	    &sc->stats_process_error, 0, "Requests failed during queueing");
1631345040Sjhb	SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sw_fallback", CTLFLAG_RD,
1632345040Sjhb	    &sc->stats_sw_fallback, 0,
1633345040Sjhb	    "Requests processed by falling back to software");
1634318429Sjhb}
1635318429Sjhb
1636318429Sjhbstatic int
1637318429Sjhbccr_attach(device_t dev)
1638318429Sjhb{
1639318429Sjhb	struct ccr_softc *sc;
1640318429Sjhb	int32_t cid;
1641318429Sjhb
1642318429Sjhb	sc = device_get_softc(dev);
1643318429Sjhb	sc->dev = dev;
1644318429Sjhb	sc->adapter = device_get_softc(device_get_parent(dev));
1645318429Sjhb	sc->txq = &sc->adapter->sge.ctrlq[0];
1646318429Sjhb	sc->rxq = &sc->adapter->sge.rxq[0];
1647318429Sjhb	cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
1648318429Sjhb	if (cid < 0) {
1649318429Sjhb		device_printf(dev, "could not get crypto driver id\n");
1650318429Sjhb		return (ENXIO);
1651318429Sjhb	}
1652318429Sjhb	sc->cid = cid;
1653318429Sjhb	sc->adapter->ccr_softc = sc;
1654318429Sjhb
1655318429Sjhb	/* XXX: TODO? */
1656318429Sjhb	sc->tx_channel_id = 0;
1657318429Sjhb
1658318429Sjhb	mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
1659318429Sjhb	sc->sg_crp = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
1660318429Sjhb	sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
1661318429Sjhb	sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK);
1662345040Sjhb	sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK);
1663345040Sjhb	sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK);
1664318429Sjhb	ccr_sysctls(sc);
1665318429Sjhb
1666318429Sjhb	crypto_register(cid, CRYPTO_SHA1_HMAC, 0, 0);
1667318429Sjhb	crypto_register(cid, CRYPTO_SHA2_256_HMAC, 0, 0);
1668318429Sjhb	crypto_register(cid, CRYPTO_SHA2_384_HMAC, 0, 0);
1669318429Sjhb	crypto_register(cid, CRYPTO_SHA2_512_HMAC, 0, 0);
1670318429Sjhb	crypto_register(cid, CRYPTO_AES_CBC, 0, 0);
1671318429Sjhb	crypto_register(cid, CRYPTO_AES_ICM, 0, 0);
1672318429Sjhb	crypto_register(cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
1673318429Sjhb	crypto_register(cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
1674318429Sjhb	crypto_register(cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
1675318429Sjhb	crypto_register(cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
1676318429Sjhb	crypto_register(cid, CRYPTO_AES_XTS, 0, 0);
1677318429Sjhb	return (0);
1678318429Sjhb}
1679318429Sjhb
1680318429Sjhbstatic int
1681318429Sjhbccr_detach(device_t dev)
1682318429Sjhb{
1683318429Sjhb	struct ccr_softc *sc;
1684318429Sjhb	int i;
1685318429Sjhb
1686318429Sjhb	sc = device_get_softc(dev);
1687318429Sjhb
1688318429Sjhb	mtx_lock(&sc->lock);
1689318429Sjhb	for (i = 0; i < sc->nsessions; i++) {
1690318429Sjhb		if (sc->sessions[i].active || sc->sessions[i].pending != 0) {
1691318429Sjhb			mtx_unlock(&sc->lock);
1692318429Sjhb			return (EBUSY);
1693318429Sjhb		}
1694318429Sjhb	}
1695318429Sjhb	sc->detaching = true;
1696318429Sjhb	mtx_unlock(&sc->lock);
1697318429Sjhb
1698318429Sjhb	crypto_unregister_all(sc->cid);
1699318429Sjhb	free(sc->sessions, M_CCR);
1700318429Sjhb	mtx_destroy(&sc->lock);
1701345040Sjhb	sglist_free(sc->sg_iv_aad);
1702345040Sjhb	free(sc->iv_aad_buf, M_CCR);
1703318429Sjhb	sglist_free(sc->sg_dsgl);
1704318429Sjhb	sglist_free(sc->sg_ulptx);
1705318429Sjhb	sglist_free(sc->sg_crp);
1706318429Sjhb	sc->adapter->ccr_softc = NULL;
1707318429Sjhb	return (0);
1708318429Sjhb}
1709318429Sjhb
1710318429Sjhbstatic void
1711318429Sjhbccr_copy_partial_hash(void *dst, int cri_alg, union authctx *auth_ctx)
1712318429Sjhb{
1713318429Sjhb	uint32_t *u32;
1714318429Sjhb	uint64_t *u64;
1715318429Sjhb	u_int i;
1716318429Sjhb
1717318429Sjhb	u32 = (uint32_t *)dst;
1718318429Sjhb	u64 = (uint64_t *)dst;
1719318429Sjhb	switch (cri_alg) {
1720318429Sjhb	case CRYPTO_SHA1_HMAC:
1721318429Sjhb		for (i = 0; i < SHA1_HASH_LEN / 4; i++)
1722318429Sjhb			u32[i] = htobe32(auth_ctx->sha1ctx.h.b32[i]);
1723318429Sjhb		break;
1724318429Sjhb	case CRYPTO_SHA2_256_HMAC:
1725318429Sjhb		for (i = 0; i < SHA2_256_HASH_LEN / 4; i++)
1726318429Sjhb			u32[i] = htobe32(auth_ctx->sha256ctx.state[i]);
1727318429Sjhb		break;
1728318429Sjhb	case CRYPTO_SHA2_384_HMAC:
1729318429Sjhb		for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
1730318429Sjhb			u64[i] = htobe64(auth_ctx->sha384ctx.state[i]);
1731318429Sjhb		break;
1732318429Sjhb	case CRYPTO_SHA2_512_HMAC:
1733318429Sjhb		for (i = 0; i < SHA2_512_HASH_LEN / 8; i++)
1734318429Sjhb			u64[i] = htobe64(auth_ctx->sha512ctx.state[i]);
1735318429Sjhb		break;
1736318429Sjhb	}
1737318429Sjhb}
1738318429Sjhb
1739318429Sjhbstatic void
1740318429Sjhbccr_init_hmac_digest(struct ccr_session *s, int cri_alg, char *key,
1741318429Sjhb    int klen)
1742318429Sjhb{
1743318429Sjhb	union authctx auth_ctx;
1744318429Sjhb	struct auth_hash *axf;
1745318429Sjhb	u_int i;
1746318429Sjhb
1747318429Sjhb	/*
1748318429Sjhb	 * If the key is larger than the block size, use the digest of
1749318429Sjhb	 * the key as the key instead.
1750318429Sjhb	 */
1751318429Sjhb	axf = s->hmac.auth_hash;
1752318429Sjhb	klen /= 8;
1753318429Sjhb	if (klen > axf->blocksize) {
1754318429Sjhb		axf->Init(&auth_ctx);
1755318429Sjhb		axf->Update(&auth_ctx, key, klen);
1756318429Sjhb		axf->Final(s->hmac.ipad, &auth_ctx);
1757318429Sjhb		klen = axf->hashsize;
1758318429Sjhb	} else
1759318429Sjhb		memcpy(s->hmac.ipad, key, klen);
1760318429Sjhb
1761345040Sjhb	memset(s->hmac.ipad + klen, 0, axf->blocksize - klen);
1762318429Sjhb	memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize);
1763318429Sjhb
1764318429Sjhb	for (i = 0; i < axf->blocksize; i++) {
1765318429Sjhb		s->hmac.ipad[i] ^= HMAC_IPAD_VAL;
1766318429Sjhb		s->hmac.opad[i] ^= HMAC_OPAD_VAL;
1767318429Sjhb	}
1768318429Sjhb
1769318429Sjhb	/*
1770318429Sjhb	 * Hash the raw ipad and opad and store the partial result in
1771318429Sjhb	 * the same buffer.
1772318429Sjhb	 */
1773318429Sjhb	axf->Init(&auth_ctx);
1774318429Sjhb	axf->Update(&auth_ctx, s->hmac.ipad, axf->blocksize);
1775318429Sjhb	ccr_copy_partial_hash(s->hmac.ipad, cri_alg, &auth_ctx);
1776318429Sjhb
1777318429Sjhb	axf->Init(&auth_ctx);
1778318429Sjhb	axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize);
1779318429Sjhb	ccr_copy_partial_hash(s->hmac.opad, cri_alg, &auth_ctx);
1780318429Sjhb}
1781318429Sjhb
1782318429Sjhb/*
1783318429Sjhb * Borrowed from AES_GMAC_Setkey().
1784318429Sjhb */
1785318429Sjhbstatic void
1786318429Sjhbccr_init_gmac_hash(struct ccr_session *s, char *key, int klen)
1787318429Sjhb{
1788318429Sjhb	static char zeroes[GMAC_BLOCK_LEN];
1789318429Sjhb	uint32_t keysched[4 * (RIJNDAEL_MAXNR + 1)];
1790318429Sjhb	int rounds;
1791318429Sjhb
1792318429Sjhb	rounds = rijndaelKeySetupEnc(keysched, key, klen);
1793318429Sjhb	rijndaelEncrypt(keysched, rounds, zeroes, s->gmac.ghash_h);
1794318429Sjhb}
1795318429Sjhb
1796318429Sjhbstatic int
1797318429Sjhbccr_aes_check_keylen(int alg, int klen)
1798318429Sjhb{
1799318429Sjhb
1800318429Sjhb	switch (klen) {
1801318429Sjhb	case 128:
1802318429Sjhb	case 192:
1803318429Sjhb		if (alg == CRYPTO_AES_XTS)
1804318429Sjhb			return (EINVAL);
1805318429Sjhb		break;
1806318429Sjhb	case 256:
1807318429Sjhb		break;
1808318429Sjhb	case 512:
1809318429Sjhb		if (alg != CRYPTO_AES_XTS)
1810318429Sjhb			return (EINVAL);
1811318429Sjhb		break;
1812318429Sjhb	default:
1813318429Sjhb		return (EINVAL);
1814318429Sjhb	}
1815318429Sjhb	return (0);
1816318429Sjhb}
1817318429Sjhb
1818318429Sjhbstatic void
1819318429Sjhbccr_aes_setkey(struct ccr_session *s, int alg, const void *key, int klen)
1820318429Sjhb{
1821318429Sjhb	unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size;
1822318429Sjhb	unsigned int opad_present;
1823318429Sjhb
1824318429Sjhb	if (alg == CRYPTO_AES_XTS)
1825318429Sjhb		kbits = klen / 2;
1826318429Sjhb	else
1827318429Sjhb		kbits = klen;
1828318429Sjhb	switch (kbits) {
1829318429Sjhb	case 128:
1830318429Sjhb		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
1831318429Sjhb		break;
1832318429Sjhb	case 192:
1833318429Sjhb		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
1834318429Sjhb		break;
1835318429Sjhb	case 256:
1836318429Sjhb		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
1837318429Sjhb		break;
1838318429Sjhb	default:
1839318429Sjhb		panic("should not get here");
1840318429Sjhb	}
1841318429Sjhb
1842318429Sjhb	s->blkcipher.key_len = klen / 8;
1843318429Sjhb	memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
1844318429Sjhb	switch (alg) {
1845318429Sjhb	case CRYPTO_AES_CBC:
1846318429Sjhb	case CRYPTO_AES_XTS:
1847345664Sjhb		t4_aes_getdeckey(s->blkcipher.deckey, key, kbits);
1848318429Sjhb		break;
1849318429Sjhb	}
1850318429Sjhb
1851318429Sjhb	kctx_len = roundup2(s->blkcipher.key_len, 16);
1852318429Sjhb	switch (s->mode) {
1853318429Sjhb	case AUTHENC:
1854318429Sjhb		mk_size = s->hmac.mk_size;
1855318429Sjhb		opad_present = 1;
1856318429Sjhb		iopad_size = roundup2(s->hmac.partial_digest_len, 16);
1857318429Sjhb		kctx_len += iopad_size * 2;
1858318429Sjhb		break;
1859318429Sjhb	case GCM:
1860318429Sjhb		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
1861318429Sjhb		opad_present = 0;
1862318429Sjhb		kctx_len += GMAC_BLOCK_LEN;
1863318429Sjhb		break;
1864318429Sjhb	default:
1865318429Sjhb		mk_size = CHCR_KEYCTX_NO_KEY;
1866318429Sjhb		opad_present = 0;
1867318429Sjhb		break;
1868318429Sjhb	}
1869318429Sjhb	kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
1870318429Sjhb	s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
1871318429Sjhb	    V_KEY_CONTEXT_DUAL_CK(alg == CRYPTO_AES_XTS) |
1872318429Sjhb	    V_KEY_CONTEXT_OPAD_PRESENT(opad_present) |
1873318429Sjhb	    V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) |
1874318429Sjhb	    V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1));
1875318429Sjhb}
1876318429Sjhb
1877318429Sjhbstatic int
1878318429Sjhbccr_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
1879318429Sjhb{
1880318429Sjhb	struct ccr_softc *sc;
1881318429Sjhb	struct ccr_session *s;
1882318429Sjhb	struct auth_hash *auth_hash;
1883318429Sjhb	struct cryptoini *c, *hash, *cipher;
1884318429Sjhb	unsigned int auth_mode, cipher_mode, iv_len, mk_size;
1885318429Sjhb	unsigned int partial_digest_len;
1886318429Sjhb	int error, i, sess;
1887318429Sjhb	bool gcm_hash;
1888318429Sjhb
1889318429Sjhb	if (sidp == NULL || cri == NULL)
1890318429Sjhb		return (EINVAL);
1891318429Sjhb
1892318429Sjhb	gcm_hash = false;
1893318429Sjhb	cipher = NULL;
1894318429Sjhb	hash = NULL;
1895318429Sjhb	auth_hash = NULL;
1896318429Sjhb	auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
1897318429Sjhb	cipher_mode = CHCR_SCMD_CIPHER_MODE_NOP;
1898318429Sjhb	iv_len = 0;
1899318429Sjhb	mk_size = 0;
1900318429Sjhb	partial_digest_len = 0;
1901318429Sjhb	for (c = cri; c != NULL; c = c->cri_next) {
1902318429Sjhb		switch (c->cri_alg) {
1903318429Sjhb		case CRYPTO_SHA1_HMAC:
1904318429Sjhb		case CRYPTO_SHA2_256_HMAC:
1905318429Sjhb		case CRYPTO_SHA2_384_HMAC:
1906318429Sjhb		case CRYPTO_SHA2_512_HMAC:
1907318429Sjhb		case CRYPTO_AES_128_NIST_GMAC:
1908318429Sjhb		case CRYPTO_AES_192_NIST_GMAC:
1909318429Sjhb		case CRYPTO_AES_256_NIST_GMAC:
1910318429Sjhb			if (hash)
1911318429Sjhb				return (EINVAL);
1912318429Sjhb			hash = c;
1913318429Sjhb			switch (c->cri_alg) {
1914318429Sjhb			case CRYPTO_SHA1_HMAC:
1915318429Sjhb				auth_hash = &auth_hash_hmac_sha1;
1916318429Sjhb				auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1917318429Sjhb				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1918318429Sjhb				partial_digest_len = SHA1_HASH_LEN;
1919318429Sjhb				break;
1920318429Sjhb			case CRYPTO_SHA2_256_HMAC:
1921318429Sjhb				auth_hash = &auth_hash_hmac_sha2_256;
1922318429Sjhb				auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1923318429Sjhb				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1924318429Sjhb				partial_digest_len = SHA2_256_HASH_LEN;
1925318429Sjhb				break;
1926318429Sjhb			case CRYPTO_SHA2_384_HMAC:
1927318429Sjhb				auth_hash = &auth_hash_hmac_sha2_384;
1928318429Sjhb				auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1929318429Sjhb				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1930318429Sjhb				partial_digest_len = SHA2_512_HASH_LEN;
1931318429Sjhb				break;
1932318429Sjhb			case CRYPTO_SHA2_512_HMAC:
1933318429Sjhb				auth_hash = &auth_hash_hmac_sha2_512;
1934318429Sjhb				auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1935318429Sjhb				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1936318429Sjhb				partial_digest_len = SHA2_512_HASH_LEN;
1937318429Sjhb				break;
1938318429Sjhb			case CRYPTO_AES_128_NIST_GMAC:
1939318429Sjhb			case CRYPTO_AES_192_NIST_GMAC:
1940318429Sjhb			case CRYPTO_AES_256_NIST_GMAC:
1941318429Sjhb				gcm_hash = true;
1942318429Sjhb				auth_mode = CHCR_SCMD_AUTH_MODE_GHASH;
1943318429Sjhb				mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
1944318429Sjhb				break;
1945318429Sjhb			}
1946318429Sjhb			break;
1947318429Sjhb		case CRYPTO_AES_CBC:
1948318429Sjhb		case CRYPTO_AES_ICM:
1949318429Sjhb		case CRYPTO_AES_NIST_GCM_16:
1950318429Sjhb		case CRYPTO_AES_XTS:
1951318429Sjhb			if (cipher)
1952318429Sjhb				return (EINVAL);
1953318429Sjhb			cipher = c;
1954318429Sjhb			switch (c->cri_alg) {
1955318429Sjhb			case CRYPTO_AES_CBC:
1956318429Sjhb				cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
1957318429Sjhb				iv_len = AES_BLOCK_LEN;
1958318429Sjhb				break;
1959318429Sjhb			case CRYPTO_AES_ICM:
1960318429Sjhb				cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1961318429Sjhb				iv_len = AES_BLOCK_LEN;
1962318429Sjhb				break;
1963318429Sjhb			case CRYPTO_AES_NIST_GCM_16:
1964318429Sjhb				cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_GCM;
1965318429Sjhb				iv_len = AES_GCM_IV_LEN;
1966318429Sjhb				break;
1967318429Sjhb			case CRYPTO_AES_XTS:
1968318429Sjhb				cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1969318429Sjhb				iv_len = AES_BLOCK_LEN;
1970318429Sjhb				break;
1971318429Sjhb			}
1972318429Sjhb			if (c->cri_key != NULL) {
1973318429Sjhb				error = ccr_aes_check_keylen(c->cri_alg,
1974318429Sjhb				    c->cri_klen);
1975318429Sjhb				if (error)
1976318429Sjhb					return (error);
1977318429Sjhb			}
1978318429Sjhb			break;
1979318429Sjhb		default:
1980318429Sjhb			return (EINVAL);
1981318429Sjhb		}
1982318429Sjhb	}
1983318429Sjhb	if (gcm_hash != (cipher_mode == CHCR_SCMD_CIPHER_MODE_AES_GCM))
1984318429Sjhb		return (EINVAL);
1985318429Sjhb	if (hash == NULL && cipher == NULL)
1986318429Sjhb		return (EINVAL);
1987318429Sjhb	if (hash != NULL && hash->cri_key == NULL)
1988318429Sjhb		return (EINVAL);
1989318429Sjhb
1990318429Sjhb	sc = device_get_softc(dev);
1991345040Sjhb
1992345040Sjhb	/*
1993345040Sjhb	 * XXX: Don't create a session if the queues aren't
1994345040Sjhb	 * initialized.  This is racy as the rxq can be destroyed by
1995345040Sjhb	 * the associated VI detaching.  Eventually ccr should use
1996345040Sjhb	 * dedicated queues.
1997345040Sjhb	 */
1998345040Sjhb	if (sc->rxq->iq.adapter == NULL || sc->txq->adapter == NULL)
1999345040Sjhb		return (ENXIO);
2000345040Sjhb
2001318429Sjhb	mtx_lock(&sc->lock);
2002318429Sjhb	if (sc->detaching) {
2003318429Sjhb		mtx_unlock(&sc->lock);
2004318429Sjhb		return (ENXIO);
2005318429Sjhb	}
2006318429Sjhb	sess = -1;
2007318429Sjhb	for (i = 0; i < sc->nsessions; i++) {
2008318429Sjhb		if (!sc->sessions[i].active && sc->sessions[i].pending == 0) {
2009318429Sjhb			sess = i;
2010318429Sjhb			break;
2011318429Sjhb		}
2012318429Sjhb	}
2013318429Sjhb	if (sess == -1) {
2014318429Sjhb		s = malloc(sizeof(*s) * (sc->nsessions + 1), M_CCR,
2015318429Sjhb		    M_NOWAIT | M_ZERO);
2016318429Sjhb		if (s == NULL) {
2017318429Sjhb			mtx_unlock(&sc->lock);
2018318429Sjhb			return (ENOMEM);
2019318429Sjhb		}
2020318429Sjhb		if (sc->sessions != NULL)
2021318429Sjhb			memcpy(s, sc->sessions, sizeof(*s) * sc->nsessions);
2022318429Sjhb		sess = sc->nsessions;
2023318429Sjhb		free(sc->sessions, M_CCR);
2024318429Sjhb		sc->sessions = s;
2025318429Sjhb		sc->nsessions++;
2026318429Sjhb	}
2027318429Sjhb
2028318429Sjhb	s = &sc->sessions[sess];
2029318429Sjhb
2030318429Sjhb	if (gcm_hash)
2031318429Sjhb		s->mode = GCM;
2032318429Sjhb	else if (hash != NULL && cipher != NULL)
2033318429Sjhb		s->mode = AUTHENC;
2034318429Sjhb	else if (hash != NULL)
2035318429Sjhb		s->mode = HMAC;
2036318429Sjhb	else {
2037318429Sjhb		MPASS(cipher != NULL);
2038318429Sjhb		s->mode = BLKCIPHER;
2039318429Sjhb	}
2040318429Sjhb	if (gcm_hash) {
2041318429Sjhb		if (hash->cri_mlen == 0)
2042318429Sjhb			s->gmac.hash_len = AES_GMAC_HASH_LEN;
2043318429Sjhb		else
2044318429Sjhb			s->gmac.hash_len = hash->cri_mlen;
2045318429Sjhb		ccr_init_gmac_hash(s, hash->cri_key, hash->cri_klen);
2046318429Sjhb	} else if (hash != NULL) {
2047318429Sjhb		s->hmac.auth_hash = auth_hash;
2048318429Sjhb		s->hmac.auth_mode = auth_mode;
2049318429Sjhb		s->hmac.mk_size = mk_size;
2050318429Sjhb		s->hmac.partial_digest_len = partial_digest_len;
2051318429Sjhb		if (hash->cri_mlen == 0)
2052318429Sjhb			s->hmac.hash_len = auth_hash->hashsize;
2053318429Sjhb		else
2054318429Sjhb			s->hmac.hash_len = hash->cri_mlen;
2055318429Sjhb		ccr_init_hmac_digest(s, hash->cri_alg, hash->cri_key,
2056318429Sjhb		    hash->cri_klen);
2057318429Sjhb	}
2058318429Sjhb	if (cipher != NULL) {
2059318429Sjhb		s->blkcipher.cipher_mode = cipher_mode;
2060318429Sjhb		s->blkcipher.iv_len = iv_len;
2061318429Sjhb		if (cipher->cri_key != NULL)
2062318429Sjhb			ccr_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
2063318429Sjhb			    cipher->cri_klen);
2064318429Sjhb	}
2065318429Sjhb
2066318429Sjhb	s->active = true;
2067318429Sjhb	mtx_unlock(&sc->lock);
2068318429Sjhb
2069318429Sjhb	*sidp = sess;
2070318429Sjhb	return (0);
2071318429Sjhb}
2072318429Sjhb
2073318429Sjhbstatic int
2074318429Sjhbccr_freesession(device_t dev, uint64_t tid)
2075318429Sjhb{
2076318429Sjhb	struct ccr_softc *sc;
2077318429Sjhb	uint32_t sid;
2078318429Sjhb	int error;
2079318429Sjhb
2080318429Sjhb	sc = device_get_softc(dev);
2081318429Sjhb	sid = CRYPTO_SESID2LID(tid);
2082318429Sjhb	mtx_lock(&sc->lock);
2083318429Sjhb	if (sid >= sc->nsessions || !sc->sessions[sid].active)
2084318429Sjhb		error = EINVAL;
2085318429Sjhb	else {
2086318429Sjhb		if (sc->sessions[sid].pending != 0)
2087318429Sjhb			device_printf(dev,
2088318429Sjhb			    "session %d freed with %d pending requests\n", sid,
2089318429Sjhb			    sc->sessions[sid].pending);
2090318429Sjhb		sc->sessions[sid].active = false;
2091318429Sjhb		error = 0;
2092318429Sjhb	}
2093318429Sjhb	mtx_unlock(&sc->lock);
2094318429Sjhb	return (error);
2095318429Sjhb}
2096318429Sjhb
2097318429Sjhbstatic int
2098318429Sjhbccr_process(device_t dev, struct cryptop *crp, int hint)
2099318429Sjhb{
2100318429Sjhb	struct ccr_softc *sc;
2101318429Sjhb	struct ccr_session *s;
2102318429Sjhb	struct cryptodesc *crd, *crda, *crde;
2103318429Sjhb	uint32_t sid;
2104318429Sjhb	int error;
2105318429Sjhb
2106318429Sjhb	if (crp == NULL)
2107318429Sjhb		return (EINVAL);
2108318429Sjhb
2109318429Sjhb	crd = crp->crp_desc;
2110318429Sjhb	sid = CRYPTO_SESID2LID(crp->crp_sid);
2111318429Sjhb	sc = device_get_softc(dev);
2112318429Sjhb	mtx_lock(&sc->lock);
2113318429Sjhb	if (sid >= sc->nsessions || !sc->sessions[sid].active) {
2114318429Sjhb		sc->stats_bad_session++;
2115318429Sjhb		error = EINVAL;
2116318429Sjhb		goto out;
2117318429Sjhb	}
2118318429Sjhb
2119318429Sjhb	error = ccr_populate_sglist(sc->sg_crp, crp);
2120318429Sjhb	if (error) {
2121318429Sjhb		sc->stats_sglist_error++;
2122318429Sjhb		goto out;
2123318429Sjhb	}
2124318429Sjhb
2125318429Sjhb	s = &sc->sessions[sid];
2126318429Sjhb	switch (s->mode) {
2127318429Sjhb	case HMAC:
2128318429Sjhb		if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
2129318429Sjhb			ccr_init_hmac_digest(s, crd->crd_alg, crd->crd_key,
2130318429Sjhb			    crd->crd_klen);
2131318429Sjhb		error = ccr_hmac(sc, sid, s, crp);
2132318429Sjhb		if (error == 0)
2133318429Sjhb			sc->stats_hmac++;
2134318429Sjhb		break;
2135318429Sjhb	case BLKCIPHER:
2136318429Sjhb		if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
2137318429Sjhb			error = ccr_aes_check_keylen(crd->crd_alg,
2138318429Sjhb			    crd->crd_klen);
2139318429Sjhb			if (error)
2140318429Sjhb				break;
2141318429Sjhb			ccr_aes_setkey(s, crd->crd_alg, crd->crd_key,
2142318429Sjhb			    crd->crd_klen);
2143318429Sjhb		}
2144318429Sjhb		error = ccr_blkcipher(sc, sid, s, crp);
2145318429Sjhb		if (error == 0) {
2146318429Sjhb			if (crd->crd_flags & CRD_F_ENCRYPT)
2147318429Sjhb				sc->stats_blkcipher_encrypt++;
2148318429Sjhb			else
2149318429Sjhb				sc->stats_blkcipher_decrypt++;
2150318429Sjhb		}
2151318429Sjhb		break;
2152318429Sjhb	case AUTHENC:
2153318429Sjhb		error = 0;
2154318429Sjhb		switch (crd->crd_alg) {
2155318429Sjhb		case CRYPTO_AES_CBC:
2156318429Sjhb		case CRYPTO_AES_ICM:
2157318429Sjhb		case CRYPTO_AES_XTS:
2158318429Sjhb			/* Only encrypt-then-authenticate supported. */
2159318429Sjhb			crde = crd;
2160318429Sjhb			crda = crd->crd_next;
2161318429Sjhb			if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
2162318429Sjhb				error = EINVAL;
2163318429Sjhb				break;
2164318429Sjhb			}
2165318429Sjhb			break;
2166318429Sjhb		default:
2167318429Sjhb			crda = crd;
2168318429Sjhb			crde = crd->crd_next;
2169318429Sjhb			if (crde->crd_flags & CRD_F_ENCRYPT) {
2170318429Sjhb				error = EINVAL;
2171318429Sjhb				break;
2172318429Sjhb			}
2173318429Sjhb			break;
2174318429Sjhb		}
2175318429Sjhb		if (error)
2176318429Sjhb			break;
2177318429Sjhb		if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
2178318429Sjhb			ccr_init_hmac_digest(s, crda->crd_alg, crda->crd_key,
2179318429Sjhb			    crda->crd_klen);
2180318429Sjhb		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2181318429Sjhb			error = ccr_aes_check_keylen(crde->crd_alg,
2182318429Sjhb			    crde->crd_klen);
2183318429Sjhb			if (error)
2184318429Sjhb				break;
2185318429Sjhb			ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2186318429Sjhb			    crde->crd_klen);
2187318429Sjhb		}
2188318429Sjhb		error = ccr_authenc(sc, sid, s, crp, crda, crde);
2189318429Sjhb		if (error == 0) {
2190318429Sjhb			if (crde->crd_flags & CRD_F_ENCRYPT)
2191318429Sjhb				sc->stats_authenc_encrypt++;
2192318429Sjhb			else
2193318429Sjhb				sc->stats_authenc_decrypt++;
2194318429Sjhb		}
2195318429Sjhb		break;
2196318429Sjhb	case GCM:
2197318429Sjhb		error = 0;
2198318429Sjhb		if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
2199318429Sjhb			crde = crd;
2200318429Sjhb			crda = crd->crd_next;
2201318429Sjhb		} else {
2202318429Sjhb			crda = crd;
2203318429Sjhb			crde = crd->crd_next;
2204318429Sjhb		}
2205318429Sjhb		if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
2206318429Sjhb			ccr_init_gmac_hash(s, crda->crd_key, crda->crd_klen);
2207318429Sjhb		if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
2208318429Sjhb			error = ccr_aes_check_keylen(crde->crd_alg,
2209318429Sjhb			    crde->crd_klen);
2210318429Sjhb			if (error)
2211318429Sjhb				break;
2212318429Sjhb			ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
2213318429Sjhb			    crde->crd_klen);
2214318429Sjhb		}
2215345040Sjhb		if (crde->crd_len == 0) {
2216345040Sjhb			mtx_unlock(&sc->lock);
2217345040Sjhb			ccr_gcm_soft(s, crp, crda, crde);
2218345040Sjhb			return (0);
2219345040Sjhb		}
2220318429Sjhb		error = ccr_gcm(sc, sid, s, crp, crda, crde);
2221345040Sjhb		if (error == EMSGSIZE) {
2222345040Sjhb			sc->stats_sw_fallback++;
2223345040Sjhb			mtx_unlock(&sc->lock);
2224345040Sjhb			ccr_gcm_soft(s, crp, crda, crde);
2225345040Sjhb			return (0);
2226345040Sjhb		}
2227318429Sjhb		if (error == 0) {
2228318429Sjhb			if (crde->crd_flags & CRD_F_ENCRYPT)
2229318429Sjhb				sc->stats_gcm_encrypt++;
2230318429Sjhb			else
2231318429Sjhb				sc->stats_gcm_decrypt++;
2232318429Sjhb		}
2233318429Sjhb		break;
2234318429Sjhb	}
2235318429Sjhb
2236318429Sjhb	if (error == 0) {
2237318429Sjhb		s->pending++;
2238318429Sjhb		sc->stats_inflight++;
2239318429Sjhb	} else
2240318429Sjhb		sc->stats_process_error++;
2241318429Sjhb
2242318429Sjhbout:
2243318429Sjhb	mtx_unlock(&sc->lock);
2244318429Sjhb
2245318429Sjhb	if (error) {
2246318429Sjhb		crp->crp_etype = error;
2247318429Sjhb		crypto_done(crp);
2248318429Sjhb	}
2249318429Sjhb
2250318429Sjhb	return (0);
2251318429Sjhb}
2252318429Sjhb
2253318429Sjhbstatic int
2254318429Sjhbdo_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
2255318429Sjhb    struct mbuf *m)
2256318429Sjhb{
2257318429Sjhb	struct ccr_softc *sc = iq->adapter->ccr_softc;
2258318429Sjhb	struct ccr_session *s;
2259318429Sjhb	const struct cpl_fw6_pld *cpl;
2260318429Sjhb	struct cryptop *crp;
2261318429Sjhb	uint32_t sid, status;
2262318429Sjhb	int error;
2263318429Sjhb
2264318429Sjhb	if (m != NULL)
2265318429Sjhb		cpl = mtod(m, const void *);
2266318429Sjhb	else
2267318429Sjhb		cpl = (const void *)(rss + 1);
2268318429Sjhb
2269318429Sjhb	crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
2270318429Sjhb	sid = CRYPTO_SESID2LID(crp->crp_sid);
2271318429Sjhb	status = be64toh(cpl->data[0]);
2272318429Sjhb	if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
2273318429Sjhb		error = EBADMSG;
2274318429Sjhb	else
2275318429Sjhb		error = 0;
2276318429Sjhb
2277318429Sjhb	mtx_lock(&sc->lock);
2278318429Sjhb	MPASS(sid < sc->nsessions);
2279318429Sjhb	s = &sc->sessions[sid];
2280318429Sjhb	s->pending--;
2281318429Sjhb	sc->stats_inflight--;
2282318429Sjhb
2283318429Sjhb	switch (s->mode) {
2284318429Sjhb	case HMAC:
2285318429Sjhb		error = ccr_hmac_done(sc, s, crp, cpl, error);
2286318429Sjhb		break;
2287318429Sjhb	case BLKCIPHER:
2288318429Sjhb		error = ccr_blkcipher_done(sc, s, crp, cpl, error);
2289318429Sjhb		break;
2290318429Sjhb	case AUTHENC:
2291318429Sjhb		error = ccr_authenc_done(sc, s, crp, cpl, error);
2292318429Sjhb		break;
2293318429Sjhb	case GCM:
2294318429Sjhb		error = ccr_gcm_done(sc, s, crp, cpl, error);
2295318429Sjhb		break;
2296318429Sjhb	}
2297318429Sjhb
2298318429Sjhb	if (error == EBADMSG) {
2299318429Sjhb		if (CHK_MAC_ERR_BIT(status))
2300318429Sjhb			sc->stats_mac_error++;
2301318429Sjhb		if (CHK_PAD_ERR_BIT(status))
2302318429Sjhb			sc->stats_pad_error++;
2303318429Sjhb	}
2304318429Sjhb	mtx_unlock(&sc->lock);
2305318429Sjhb	crp->crp_etype = error;
2306318429Sjhb	crypto_done(crp);
2307318429Sjhb	m_freem(m);
2308318429Sjhb	return (0);
2309318429Sjhb}
2310318429Sjhb
2311318429Sjhbstatic int
2312318429Sjhbccr_modevent(module_t mod, int cmd, void *arg)
2313318429Sjhb{
2314318429Sjhb
2315318429Sjhb	switch (cmd) {
2316318429Sjhb	case MOD_LOAD:
2317318429Sjhb		t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
2318318429Sjhb		return (0);
2319318429Sjhb	case MOD_UNLOAD:
2320318429Sjhb		t4_register_cpl_handler(CPL_FW6_PLD, NULL);
2321318429Sjhb		return (0);
2322318429Sjhb	default:
2323318429Sjhb		return (EOPNOTSUPP);
2324318429Sjhb	}
2325318429Sjhb}
2326318429Sjhb
2327318429Sjhbstatic device_method_t ccr_methods[] = {
2328318429Sjhb	DEVMETHOD(device_identify,	ccr_identify),
2329318429Sjhb	DEVMETHOD(device_probe,		ccr_probe),
2330318429Sjhb	DEVMETHOD(device_attach,	ccr_attach),
2331318429Sjhb	DEVMETHOD(device_detach,	ccr_detach),
2332318429Sjhb
2333318429Sjhb	DEVMETHOD(cryptodev_newsession,	ccr_newsession),
2334318429Sjhb	DEVMETHOD(cryptodev_freesession, ccr_freesession),
2335318429Sjhb	DEVMETHOD(cryptodev_process,	ccr_process),
2336318429Sjhb
2337318429Sjhb	DEVMETHOD_END
2338318429Sjhb};
2339318429Sjhb
2340318429Sjhbstatic driver_t ccr_driver = {
2341318429Sjhb	"ccr",
2342318429Sjhb	ccr_methods,
2343318429Sjhb	sizeof(struct ccr_softc)
2344318429Sjhb};
2345318429Sjhb
2346318429Sjhbstatic devclass_t ccr_devclass;
2347318429Sjhb
2348318429SjhbDRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL);
2349318429SjhbMODULE_VERSION(ccr, 1);
2350318429SjhbMODULE_DEPEND(ccr, crypto, 1, 1, 1);
2351318429SjhbMODULE_DEPEND(ccr, t6nex, 1, 1, 1);
2352