1/* SPDX-License-Identifier: BSD-2-Clause AND BSD-3-Clause */
2/*	$NetBSD: qat_hw15.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $	*/
3
4/*
5 * Copyright (c) 2019 Internet Initiative Japan, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 *   Copyright(c) 2007-2013 Intel Corporation. All rights reserved.
32 *
33 *   Redistribution and use in source and binary forms, with or without
34 *   modification, are permitted provided that the following conditions
35 *   are met:
36 *
37 *     * Redistributions of source code must retain the above copyright
38 *       notice, this list of conditions and the following disclaimer.
39 *     * Redistributions in binary form must reproduce the above copyright
40 *       notice, this list of conditions and the following disclaimer in
41 *       the documentation and/or other materials provided with the
42 *       distribution.
43 *     * Neither the name of Intel Corporation nor the names of its
44 *       contributors may be used to endorse or promote products derived
45 *       from this software without specific prior written permission.
46 *
47 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
48 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
49 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
50 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
51 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
54 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
55 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
56 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
57 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 */
59
60#include <sys/cdefs.h>
61#if 0
62__KERNEL_RCSID(0, "$NetBSD: qat_hw15.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
63#endif
64
65#include <sys/param.h>
66#include <sys/bus.h>
67#include <sys/kernel.h>
68#include <sys/proc.h>
69#include <sys/systm.h>
70
71#include <machine/bus.h>
72
73#include <opencrypto/xform.h>
74
75#include <dev/pci/pcireg.h>
76#include <dev/pci/pcivar.h>
77
78#include "qatreg.h"
79#include "qat_hw15reg.h"
80#include "qatvar.h"
81#include "qat_hw15var.h"
82
83static int	qat_adm_ring_init_ring_table(struct qat_softc *);
84static void	qat_adm_ring_build_slice_mask(uint16_t *, uint32_t, uint32_t);
85static void	qat_adm_ring_build_shram_mask(uint64_t *, uint32_t, uint32_t);
86static int	qat_adm_ring_build_ring_table(struct qat_softc *, uint32_t);
87static int	qat_adm_ring_build_init_msg(struct qat_softc *,
88		    struct fw_init_req *, enum fw_init_cmd_id, uint32_t,
89		    struct qat_accel_init_cb *);
90static int	qat_adm_ring_send_init_msg_sync(struct qat_softc *,
91		    enum fw_init_cmd_id, uint32_t);
92static int	qat_adm_ring_send_init_msg(struct qat_softc *,
93		    enum fw_init_cmd_id);
94static int	qat_adm_ring_intr(struct qat_softc *, void *, void *);
95
96void
97qat_msg_req_type_populate(struct arch_if_req_hdr *msg, enum arch_if_req type,
98    uint32_t rxring)
99{
100
101	memset(msg, 0, sizeof(struct arch_if_req_hdr));
102	msg->flags = ARCH_IF_FLAGS_VALID_FLAG |
103	    ARCH_IF_FLAGS_RESP_RING_TYPE_ET | ARCH_IF_FLAGS_RESP_TYPE_S;
104	msg->req_type = type;
105	msg->resp_pipe_id = rxring;
106}
107
108void
109qat_msg_cmn_hdr_populate(struct fw_la_bulk_req *msg, bus_addr_t desc_paddr,
110    uint8_t hdrsz, uint8_t hwblksz, uint16_t comn_req_flags, uint32_t flow_id)
111{
112	struct fw_comn_req_hdr *hdr = &msg->comn_hdr;
113
114	hdr->comn_req_flags = comn_req_flags;
115	hdr->content_desc_params_sz = hwblksz;
116	hdr->content_desc_hdr_sz = hdrsz;
117	hdr->content_desc_addr = desc_paddr;
118	msg->flow_id = flow_id;
119}
120
121void
122qat_msg_service_cmd_populate(struct fw_la_bulk_req *msg, enum fw_la_cmd_id cmdid,
123    uint16_t cmd_flags)
124{
125	msg->comn_la_req.la_cmd_id = cmdid;
126	msg->comn_la_req.u.la_flags = cmd_flags;
127}
128
129void
130qat_msg_cmn_mid_populate(struct fw_comn_req_mid *msg, void *cookie,
131    uint64_t src, uint64_t dst)
132{
133
134	msg->opaque_data = (uint64_t)(uintptr_t)cookie;
135	msg->src_data_addr = src;
136	if (dst == 0)
137		msg->dest_data_addr = src;
138	else
139		msg->dest_data_addr = dst;
140}
141
142void
143qat_msg_req_params_populate(struct fw_la_bulk_req *msg,
144    bus_addr_t req_params_paddr, uint8_t req_params_sz)
145{
146	msg->req_params_addr = req_params_paddr;
147	msg->comn_la_req.u1.req_params_blk_sz = req_params_sz / 8;
148}
149
150void
151qat_msg_cmn_footer_populate(union fw_comn_req_ftr *msg, uint64_t next_addr)
152{
153	msg->next_request_addr = next_addr;
154}
155
156void
157qat_msg_params_populate(struct fw_la_bulk_req *msg,
158    struct qat_crypto_desc *desc, uint8_t req_params_sz,
159    uint16_t service_cmd_flags, uint16_t comn_req_flags)
160{
161	qat_msg_cmn_hdr_populate(msg, desc->qcd_desc_paddr,
162	    desc->qcd_hdr_sz, desc->qcd_hw_blk_sz, comn_req_flags, 0);
163	qat_msg_service_cmd_populate(msg, desc->qcd_cmd_id, service_cmd_flags);
164	qat_msg_cmn_mid_populate(&msg->comn_mid, NULL, 0, 0);
165	qat_msg_req_params_populate(msg, 0, req_params_sz);
166	qat_msg_cmn_footer_populate(&msg->comn_ftr, 0);
167}
168
169static int
170qat_adm_ring_init_ring_table(struct qat_softc *sc)
171{
172	struct qat_admin_rings *qadr = &sc->sc_admin_rings;
173
174	if (sc->sc_ae_num == 1) {
175		qadr->qadr_cya_ring_tbl =
176		    &qadr->qadr_master_ring_tbl[0];
177		qadr->qadr_srv_mask[0] = QAT_SERVICE_CRYPTO_A;
178	} else if (sc->sc_ae_num == 2 || sc->sc_ae_num == 4) {
179		qadr->qadr_cya_ring_tbl =
180		    &qadr->qadr_master_ring_tbl[0];
181		qadr->qadr_srv_mask[0] = QAT_SERVICE_CRYPTO_A;
182		qadr->qadr_cyb_ring_tbl =
183		    &qadr->qadr_master_ring_tbl[1];
184		qadr->qadr_srv_mask[1] = QAT_SERVICE_CRYPTO_B;
185	}
186
187	return 0;
188}
189
190int
191qat_adm_ring_init(struct qat_softc *sc)
192{
193	struct qat_admin_rings *qadr = &sc->sc_admin_rings;
194	int error, i, j;
195
196	error = qat_alloc_dmamem(sc, &qadr->qadr_dma, 1, PAGE_SIZE, PAGE_SIZE);
197	if (error)
198		return error;
199
200	qadr->qadr_master_ring_tbl = qadr->qadr_dma.qdm_dma_vaddr;
201
202	MPASS(sc->sc_ae_num *
203	    sizeof(struct fw_init_ring_table) <= PAGE_SIZE);
204
205	/* Initialize the Master Ring Table */
206	for (i = 0; i < sc->sc_ae_num; i++) {
207		struct fw_init_ring_table *firt =
208		    &qadr->qadr_master_ring_tbl[i];
209
210		for (j = 0; j < INIT_RING_TABLE_SZ; j++) {
211			struct fw_init_ring_params *firp =
212			    &firt->firt_bulk_rings[j];
213
214			firp->firp_reserved = 0;
215			firp->firp_curr_weight = QAT_DEFAULT_RING_WEIGHT;
216			firp->firp_init_weight = QAT_DEFAULT_RING_WEIGHT;
217			firp->firp_ring_pvl = QAT_DEFAULT_PVL;
218		}
219		memset(firt->firt_ring_mask, 0, sizeof(firt->firt_ring_mask));
220	}
221
222	error = qat_etr_setup_ring(sc, 0, RING_NUM_ADMIN_TX,
223	    ADMIN_RING_SIZE, sc->sc_hw.qhw_fw_req_size,
224	    NULL, NULL, "admin_tx", &qadr->qadr_admin_tx);
225	if (error)
226		return error;
227
228	error = qat_etr_setup_ring(sc, 0, RING_NUM_ADMIN_RX,
229	    ADMIN_RING_SIZE, sc->sc_hw.qhw_fw_resp_size,
230	    qat_adm_ring_intr, qadr, "admin_rx", &qadr->qadr_admin_rx);
231	if (error)
232		return error;
233
234	/*
235	 * Finally set up the service indices into the Master Ring Table
236	 * and convenient ring table pointers for each service enabled.
237	 * Only the Admin rings are initialized.
238	 */
239	error = qat_adm_ring_init_ring_table(sc);
240	if (error)
241		return error;
242
243	/*
244	 * Calculate the number of active AEs per QAT
245	 * needed for Shram partitioning.
246	 */
247	for (i = 0; i < sc->sc_ae_num; i++) {
248		if (qadr->qadr_srv_mask[i])
249			qadr->qadr_active_aes_per_accel++;
250	}
251
252	return 0;
253}
254
255static void
256qat_adm_ring_build_slice_mask(uint16_t *slice_mask, uint32_t srv_mask,
257   uint32_t init_shram)
258{
259	uint16_t shram = 0, comn_req = 0;
260
261	if (init_shram)
262		shram = COMN_REQ_SHRAM_INIT_REQUIRED;
263
264	if (srv_mask & QAT_SERVICE_CRYPTO_A)
265		comn_req |= COMN_REQ_CY0_ONLY(shram);
266	if (srv_mask & QAT_SERVICE_CRYPTO_B)
267		comn_req |= COMN_REQ_CY1_ONLY(shram);
268
269	*slice_mask = comn_req;
270}
271
272static void
273qat_adm_ring_build_shram_mask(uint64_t *shram_mask, uint32_t active_aes,
274    uint32_t ae)
275{
276	*shram_mask = 0;
277
278	if (active_aes == 1) {
279		*shram_mask = ~(*shram_mask);
280	} else if (active_aes == 2) {
281		if (ae == 1)
282			*shram_mask = ((~(*shram_mask)) & 0xffffffff);
283		else
284			*shram_mask = ((~(*shram_mask)) & 0xffffffff00000000ull);
285	} else if (active_aes == 3) {
286		if (ae == 0)
287			*shram_mask = ((~(*shram_mask)) & 0x7fffff);
288		else if (ae == 1)
289			*shram_mask = ((~(*shram_mask)) & 0x3fffff800000ull);
290		else
291			*shram_mask = ((~(*shram_mask)) & 0xffffc00000000000ull);
292	} else {
293		panic("Only three services are supported in current version");
294	}
295}
296
297static int
298qat_adm_ring_build_ring_table(struct qat_softc *sc, uint32_t ae)
299{
300	struct qat_admin_rings *qadr = &sc->sc_admin_rings;
301	struct fw_init_ring_table *tbl;
302	struct fw_init_ring_params *param;
303	uint8_t srv_mask = sc->sc_admin_rings.qadr_srv_mask[ae];
304
305	if ((srv_mask & QAT_SERVICE_CRYPTO_A)) {
306		tbl = qadr->qadr_cya_ring_tbl;
307	} else if ((srv_mask & QAT_SERVICE_CRYPTO_B)) {
308		tbl = qadr->qadr_cyb_ring_tbl;
309	} else {
310		device_printf(sc->sc_dev,
311		    "Invalid execution engine %d\n", ae);
312		return EINVAL;
313	}
314
315	param = &tbl->firt_bulk_rings[sc->sc_hw.qhw_ring_sym_tx];
316	param->firp_curr_weight = QAT_HI_PRIO_RING_WEIGHT;
317	param->firp_init_weight = QAT_HI_PRIO_RING_WEIGHT;
318	FW_INIT_RING_MASK_SET(tbl, sc->sc_hw.qhw_ring_sym_tx);
319
320	return 0;
321}
322
323static int
324qat_adm_ring_build_init_msg(struct qat_softc *sc,
325    struct fw_init_req *initmsg, enum fw_init_cmd_id cmd, uint32_t ae,
326    struct qat_accel_init_cb *cb)
327{
328	struct fw_init_set_ae_info_hdr *aehdr;
329	struct fw_init_set_ae_info *aeinfo;
330	struct fw_init_set_ring_info_hdr *ringhdr;
331	struct fw_init_set_ring_info *ringinfo;
332	int init_shram = 0, tgt_id, cluster_id;
333	uint32_t srv_mask;
334
335	srv_mask = sc->sc_admin_rings.qadr_srv_mask[
336	    ae % sc->sc_ae_num];
337
338	memset(initmsg, 0, sizeof(struct fw_init_req));
339
340	qat_msg_req_type_populate(&initmsg->comn_hdr.arch_if,
341	    ARCH_IF_REQ_QAT_FW_INIT,
342	    sc->sc_admin_rings.qadr_admin_rx->qr_ring_id);
343
344	qat_msg_cmn_mid_populate(&initmsg->comn_mid, cb, 0, 0);
345
346	switch (cmd) {
347	case FW_INIT_CMD_SET_AE_INFO:
348		if (ae % sc->sc_ae_num == 0)
349			init_shram = 1;
350		if (ae >= sc->sc_ae_num) {
351			tgt_id = 1;
352			cluster_id = 1;
353		} else {
354			cluster_id = 0;
355			if (sc->sc_ae_mask)
356				tgt_id = 0;
357			else
358				tgt_id = 1;
359		}
360		aehdr = &initmsg->u.set_ae_info;
361		aeinfo = &initmsg->u1.set_ae_info;
362
363		aehdr->init_cmd_id = cmd;
364		/* XXX that does not support sparse ae_mask */
365		aehdr->init_trgt_id = ae;
366		aehdr->init_ring_cluster_id = cluster_id;
367		aehdr->init_qat_id = tgt_id;
368
369		qat_adm_ring_build_slice_mask(&aehdr->init_slice_mask, srv_mask,
370		    init_shram);
371
372		qat_adm_ring_build_shram_mask(&aeinfo->init_shram_mask,
373		    sc->sc_admin_rings.qadr_active_aes_per_accel,
374		    ae % sc->sc_ae_num);
375
376		break;
377	case FW_INIT_CMD_SET_RING_INFO:
378		ringhdr = &initmsg->u.set_ring_info;
379		ringinfo = &initmsg->u1.set_ring_info;
380
381		ringhdr->init_cmd_id = cmd;
382		/* XXX that does not support sparse ae_mask */
383		ringhdr->init_trgt_id = ae;
384
385		/* XXX */
386		qat_adm_ring_build_ring_table(sc,
387		    ae % sc->sc_ae_num);
388
389		ringhdr->init_ring_tbl_sz = sizeof(struct fw_init_ring_table);
390
391		ringinfo->init_ring_table_ptr =
392		    sc->sc_admin_rings.qadr_dma.qdm_dma_seg.ds_addr +
393		    ((ae % sc->sc_ae_num) *
394		    sizeof(struct fw_init_ring_table));
395
396		break;
397	default:
398		return ENOTSUP;
399	}
400
401	return 0;
402}
403
404static int
405qat_adm_ring_send_init_msg_sync(struct qat_softc *sc,
406    enum fw_init_cmd_id cmd, uint32_t ae)
407{
408	struct fw_init_req initmsg;
409	struct qat_accel_init_cb cb;
410	int error;
411
412	error = qat_adm_ring_build_init_msg(sc, &initmsg, cmd, ae, &cb);
413	if (error)
414		return error;
415
416	error = qat_etr_put_msg(sc, sc->sc_admin_rings.qadr_admin_tx,
417	    (uint32_t *)&initmsg);
418	if (error)
419		return error;
420
421	error = tsleep(&cb, PZERO, "qat_init", hz * 3 / 2);
422	if (error) {
423		device_printf(sc->sc_dev,
424		    "Timed out initialization firmware: %d\n", error);
425		return error;
426	}
427	if (cb.qaic_status) {
428		device_printf(sc->sc_dev, "Failed to initialize firmware\n");
429		return EIO;
430	}
431
432	return error;
433}
434
435static int
436qat_adm_ring_send_init_msg(struct qat_softc *sc,
437    enum fw_init_cmd_id cmd)
438{
439	struct qat_admin_rings *qadr = &sc->sc_admin_rings;
440	uint32_t error, ae;
441
442	for (ae = 0; ae < sc->sc_ae_num; ae++) {
443		uint8_t srv_mask = qadr->qadr_srv_mask[ae];
444		switch (cmd) {
445		case FW_INIT_CMD_SET_AE_INFO:
446		case FW_INIT_CMD_SET_RING_INFO:
447			if (!srv_mask)
448				continue;
449			break;
450		case FW_INIT_CMD_TRNG_ENABLE:
451		case FW_INIT_CMD_TRNG_DISABLE:
452			if (!(srv_mask & QAT_SERVICE_CRYPTO_A))
453				continue;
454			break;
455		default:
456			return ENOTSUP;
457		}
458
459		error = qat_adm_ring_send_init_msg_sync(sc, cmd, ae);
460		if (error)
461			return error;
462	}
463
464	return 0;
465}
466
467int
468qat_adm_ring_send_init(struct qat_softc *sc)
469{
470	int error;
471
472	error = qat_adm_ring_send_init_msg(sc, FW_INIT_CMD_SET_AE_INFO);
473	if (error)
474		return error;
475
476	error = qat_adm_ring_send_init_msg(sc, FW_INIT_CMD_SET_RING_INFO);
477	if (error)
478		return error;
479
480	return 0;
481}
482
483static int
484qat_adm_ring_intr(struct qat_softc *sc, void *arg, void *msg)
485{
486	struct arch_if_resp_hdr *resp;
487	struct fw_init_resp *init_resp;
488	struct qat_accel_init_cb *init_cb;
489	int handled = 0;
490
491	resp = (struct arch_if_resp_hdr *)msg;
492
493	switch (resp->resp_type) {
494	case ARCH_IF_REQ_QAT_FW_INIT:
495		init_resp = (struct fw_init_resp *)msg;
496		init_cb = (struct qat_accel_init_cb *)
497		    (uintptr_t)init_resp->comn_resp.opaque_data;
498		init_cb->qaic_status =
499		    __SHIFTOUT(init_resp->comn_resp.comn_status,
500		    COMN_RESP_INIT_ADMIN_STATUS);
501		wakeup(init_cb);
502		break;
503	default:
504		device_printf(sc->sc_dev,
505		    "unknown resp type %d\n", resp->resp_type);
506		break;
507	}
508
509	return handled;
510}
511
512static inline uint16_t
513qat_hw15_get_comn_req_flags(uint8_t ae)
514{
515	if (ae == 0) {
516		return COMN_REQ_ORD_STRICT | COMN_REQ_PTR_TYPE_SGL |
517		    COMN_REQ_AUTH0_SLICE_REQUIRED |
518		    COMN_REQ_CIPHER0_SLICE_REQUIRED;
519	} else {
520		return COMN_REQ_ORD_STRICT | COMN_REQ_PTR_TYPE_SGL |
521		    COMN_REQ_AUTH1_SLICE_REQUIRED |
522		    COMN_REQ_CIPHER1_SLICE_REQUIRED;
523	}
524}
525
526static uint32_t
527qat_hw15_crypto_setup_cipher_desc(struct qat_crypto_desc *desc,
528    struct qat_session *qs, struct fw_cipher_hdr *cipher_hdr,
529    uint32_t hw_blk_offset, enum fw_slice next_slice)
530{
531	desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ;
532
533	cipher_hdr->state_padding_sz = 0;
534	cipher_hdr->key_sz = qs->qs_cipher_klen / 8;
535
536	cipher_hdr->state_sz = desc->qcd_cipher_blk_sz / 8;
537
538	cipher_hdr->next_id = next_slice;
539	cipher_hdr->curr_id = FW_SLICE_CIPHER;
540	cipher_hdr->offset = hw_blk_offset / 8;
541	cipher_hdr->resrvd = 0;
542
543	return sizeof(struct hw_cipher_config) + qs->qs_cipher_klen;
544}
545
546static void
547qat_hw15_crypto_setup_cipher_config(const struct qat_crypto_desc *desc,
548    const struct qat_session *qs, const struct cryptop *crp,
549    struct hw_cipher_config *cipher_config)
550{
551	const uint8_t *key;
552	uint8_t *cipher_key;
553
554	cipher_config->val = qat_crypto_load_cipher_session(desc, qs);
555	cipher_config->reserved = 0;
556
557	cipher_key = (uint8_t *)(cipher_config + 1);
558	if (crp != NULL && crp->crp_cipher_key != NULL)
559		key = crp->crp_cipher_key;
560	else
561		key = qs->qs_cipher_key;
562	memcpy(cipher_key, key, qs->qs_cipher_klen);
563}
564
565static uint32_t
566qat_hw15_crypto_setup_auth_desc(struct qat_crypto_desc *desc,
567    struct qat_session *qs, struct fw_auth_hdr *auth_hdr,
568    uint32_t ctrl_blk_offset, uint32_t hw_blk_offset,
569    enum fw_slice next_slice)
570{
571	const struct qat_sym_hash_def *hash_def;
572
573	(void)qat_crypto_load_auth_session(desc, qs, &hash_def);
574
575	auth_hdr->next_id = next_slice;
576	auth_hdr->curr_id = FW_SLICE_AUTH;
577	auth_hdr->offset = hw_blk_offset / 8;
578	auth_hdr->resrvd = 0;
579
580	auth_hdr->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED;
581	auth_hdr->u.inner_prefix_sz = 0;
582	auth_hdr->outer_prefix_sz = 0;
583	auth_hdr->final_sz = hash_def->qshd_alg->qshai_digest_len;
584	auth_hdr->inner_state1_sz =
585	    roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
586	auth_hdr->inner_res_sz = hash_def->qshd_alg->qshai_digest_len;
587	auth_hdr->inner_state2_sz =
588	    roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
589	auth_hdr->inner_state2_off = auth_hdr->offset +
590	    ((sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz) / 8);
591
592	auth_hdr->outer_config_off = 0;
593	auth_hdr->outer_state1_sz = 0;
594	auth_hdr->outer_res_sz = 0;
595	auth_hdr->outer_prefix_off = 0;
596
597	desc->qcd_auth_sz = hash_def->qshd_alg->qshai_sah->hashsize;
598	desc->qcd_state_storage_sz = (sizeof(struct hw_auth_counter) +
599	    roundup(hash_def->qshd_alg->qshai_state_size, 8)) / 8;
600	desc->qcd_gcm_aad_sz_offset1 = desc->qcd_auth_offset +
601	    sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz +
602	    AES_BLOCK_LEN;
603	desc->qcd_gcm_aad_sz_offset2 = ctrl_blk_offset +
604	    offsetof(struct fw_auth_hdr, u.aad_sz);
605
606	return sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz +
607	    auth_hdr->inner_state2_sz;
608}
609
610static void
611qat_hw15_crypto_setup_auth_setup(const struct qat_crypto_desc *desc,
612    const struct qat_session *qs, const struct cryptop *crp,
613    struct hw_auth_setup *auth_setup)
614{
615	const struct qat_sym_hash_def *hash_def;
616	const uint8_t *key;
617	uint8_t *state1, *state2;
618	uint32_t state_sz, state1_sz, state2_sz, state1_pad_len, state2_pad_len;
619
620	auth_setup->auth_config.config = qat_crypto_load_auth_session(desc, qs,
621	    &hash_def);
622	auth_setup->auth_config.reserved = 0;
623
624	auth_setup->auth_counter.counter =
625	    htobe32(hash_def->qshd_qat->qshqi_auth_counter);
626	auth_setup->auth_counter.reserved = 0;
627
628	state1 = (uint8_t *)(auth_setup + 1);
629	state2 = state1 + roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
630	switch (qs->qs_auth_algo) {
631	case HW_AUTH_ALGO_GALOIS_128:
632		qat_crypto_gmac_precompute(desc, qs->qs_cipher_key,
633		    qs->qs_cipher_klen, hash_def, state2);
634		break;
635	case HW_AUTH_ALGO_SHA1:
636		state_sz = hash_def->qshd_alg->qshai_state_size;
637		state1_sz = roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
638		state2_sz = roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
639		if (qs->qs_auth_mode == HW_AUTH_MODE1) {
640			state1_pad_len = state1_sz - state_sz;
641			state2_pad_len = state2_sz - state_sz;
642			if (state1_pad_len > 0)
643				memset(state1 + state_sz, 0, state1_pad_len);
644			if (state2_pad_len > 0)
645				memset(state2 + state_sz, 0, state2_pad_len);
646		}
647		/* FALLTHROUGH */
648	case HW_AUTH_ALGO_SHA256:
649	case HW_AUTH_ALGO_SHA384:
650	case HW_AUTH_ALGO_SHA512:
651		switch (qs->qs_auth_mode) {
652		case HW_AUTH_MODE0:
653			memcpy(state1, hash_def->qshd_alg->qshai_init_state,
654			    state1_sz);
655			/* Override for mode 0 hashes. */
656			auth_setup->auth_counter.counter = 0;
657			break;
658		case HW_AUTH_MODE1:
659			if (crp != NULL && crp->crp_auth_key != NULL)
660				key = crp->crp_auth_key;
661			else
662				key = qs->qs_auth_key;
663			if (key != NULL) {
664				qat_crypto_hmac_precompute(desc, key,
665				    qs->qs_auth_klen, hash_def, state1, state2);
666			}
667			break;
668		default:
669			panic("%s: unhandled auth mode %d", __func__,
670			    qs->qs_auth_mode);
671		}
672		break;
673	default:
674		panic("%s: unhandled auth algorithm %d", __func__,
675		    qs->qs_auth_algo);
676	}
677}
678
679void
680qat_hw15_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs,
681    struct qat_crypto_desc *desc)
682{
683	struct fw_cipher_hdr *cipher_hdr;
684	struct fw_auth_hdr *auth_hdr;
685	struct fw_la_bulk_req *req_cache;
686	struct hw_auth_setup *auth_setup;
687	struct hw_cipher_config *cipher_config;
688	uint32_t ctrl_blk_sz, ctrl_blk_offset, hw_blk_offset;
689	int i;
690	uint16_t la_cmd_flags;
691	uint8_t req_params_sz;
692	uint8_t *ctrl_blk_ptr, *hw_blk_ptr;
693
694	ctrl_blk_sz = 0;
695	if (qs->qs_cipher_algo != HW_CIPHER_ALGO_NULL)
696		ctrl_blk_sz += sizeof(struct fw_cipher_hdr);
697	if (qs->qs_auth_algo != HW_AUTH_ALGO_NULL)
698		ctrl_blk_sz += sizeof(struct fw_auth_hdr);
699
700	ctrl_blk_ptr = desc->qcd_content_desc;
701	ctrl_blk_offset = 0;
702	hw_blk_ptr = ctrl_blk_ptr + ctrl_blk_sz;
703	hw_blk_offset = 0;
704
705	la_cmd_flags = 0;
706	req_params_sz = 0;
707	for (i = 0; i < MAX_FW_SLICE; i++) {
708		switch (desc->qcd_slices[i]) {
709		case FW_SLICE_CIPHER:
710			cipher_hdr = (struct fw_cipher_hdr *)(ctrl_blk_ptr +
711			    ctrl_blk_offset);
712			cipher_config = (struct hw_cipher_config *)(hw_blk_ptr +
713			    hw_blk_offset);
714			desc->qcd_cipher_offset = ctrl_blk_sz + hw_blk_offset;
715			hw_blk_offset += qat_hw15_crypto_setup_cipher_desc(desc,
716			    qs, cipher_hdr, hw_blk_offset,
717			    desc->qcd_slices[i + 1]);
718			qat_hw15_crypto_setup_cipher_config(desc, qs, NULL,
719			    cipher_config);
720			ctrl_blk_offset += sizeof(struct fw_cipher_hdr);
721			req_params_sz += sizeof(struct fw_la_cipher_req_params);
722			break;
723		case FW_SLICE_AUTH:
724			auth_hdr = (struct fw_auth_hdr *)(ctrl_blk_ptr +
725			    ctrl_blk_offset);
726			auth_setup = (struct hw_auth_setup *)(hw_blk_ptr +
727			    hw_blk_offset);
728			desc->qcd_auth_offset = ctrl_blk_sz + hw_blk_offset;
729			hw_blk_offset += qat_hw15_crypto_setup_auth_desc(desc,
730			    qs, auth_hdr, ctrl_blk_offset, hw_blk_offset,
731			    desc->qcd_slices[i + 1]);
732			qat_hw15_crypto_setup_auth_setup(desc, qs, NULL,
733			    auth_setup);
734			ctrl_blk_offset += sizeof(struct fw_auth_hdr);
735			req_params_sz += sizeof(struct fw_la_auth_req_params);
736			la_cmd_flags |= LA_FLAGS_RET_AUTH_RES;
737			/* no digest verify */
738			break;
739		case FW_SLICE_DRAM_WR:
740			i = MAX_FW_SLICE; /* end of chain */
741			break;
742		default:
743			MPASS(0);
744			break;
745		}
746	}
747
748	desc->qcd_hdr_sz = ctrl_blk_offset / 8;
749	desc->qcd_hw_blk_sz = hw_blk_offset / 8;
750
751	req_cache = (struct fw_la_bulk_req *)desc->qcd_req_cache;
752	qat_msg_req_type_populate(
753	    &req_cache->comn_hdr.arch_if,
754	    ARCH_IF_REQ_QAT_FW_LA, 0);
755
756	if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128)
757		la_cmd_flags |= LA_FLAGS_PROTO_GCM | LA_FLAGS_GCM_IV_LEN_FLAG;
758	else
759		la_cmd_flags |= LA_FLAGS_PROTO_NO;
760
761	qat_msg_params_populate(req_cache, desc, req_params_sz,
762	    la_cmd_flags, 0);
763
764	bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
765	    qs->qs_desc_mem.qdm_dma_map, BUS_DMASYNC_PREWRITE);
766}
767
768static void
769qat_hw15_crypto_req_setkey(const struct qat_crypto_desc *desc,
770    const struct qat_session *qs, struct qat_sym_cookie *qsc,
771    struct fw_la_bulk_req *bulk_req, struct cryptop *crp)
772{
773	struct hw_auth_setup *auth_setup;
774	struct hw_cipher_config *cipher_config;
775	uint8_t *cdesc;
776	int i;
777
778	cdesc = qsc->qsc_content_desc;
779	memcpy(cdesc, desc->qcd_content_desc, CONTENT_DESC_MAX_SIZE);
780	for (i = 0; i < MAX_FW_SLICE; i++) {
781		switch (desc->qcd_slices[i]) {
782		case FW_SLICE_CIPHER:
783			cipher_config = (struct hw_cipher_config *)
784			    (cdesc + desc->qcd_cipher_offset);
785			qat_hw15_crypto_setup_cipher_config(desc, qs, crp,
786			    cipher_config);
787			break;
788		case FW_SLICE_AUTH:
789			auth_setup = (struct hw_auth_setup *)
790			    (cdesc + desc->qcd_auth_offset);
791			qat_hw15_crypto_setup_auth_setup(desc, qs, crp,
792			    auth_setup);
793			break;
794		case FW_SLICE_DRAM_WR:
795			i = MAX_FW_SLICE; /* end of chain */
796			break;
797		default:
798			MPASS(0);
799		}
800	}
801
802	bulk_req->comn_hdr.content_desc_addr = qsc->qsc_content_desc_paddr;
803}
804
805void
806qat_hw15_crypto_setup_req_params(struct qat_crypto_bank *qcb,
807    struct qat_session *qs, struct qat_crypto_desc const *desc,
808    struct qat_sym_cookie *qsc, struct cryptop *crp)
809{
810	struct qat_sym_bulk_cookie *qsbc;
811	struct fw_la_bulk_req *bulk_req;
812	struct fw_la_cipher_req_params *cipher_req;
813	struct fw_la_auth_req_params *auth_req;
814	bus_addr_t digest_paddr;
815	uint8_t *aad_szp2, *req_params_ptr;
816	uint32_t aad_sz, *aad_szp1;
817	enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id;
818	enum fw_slice next_slice;
819
820	qsbc = &qsc->qsc_bulk_cookie;
821
822	bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg;
823	memcpy(bulk_req, &desc->qcd_req_cache, QAT_HW15_SESSION_REQ_CACHE_SIZE);
824	bulk_req->comn_hdr.arch_if.resp_pipe_id = qcb->qcb_sym_rx->qr_ring_id;
825	bulk_req->comn_hdr.comn_req_flags =
826	    qat_hw15_get_comn_req_flags(qcb->qcb_bank % 2);
827	bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr;
828	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
829		bulk_req->comn_mid.dest_data_addr =
830		    qsc->qsc_obuffer_list_desc_paddr;
831	} else {
832		bulk_req->comn_mid.dest_data_addr =
833		    qsc->qsc_buffer_list_desc_paddr;
834	}
835	bulk_req->req_params_addr = qsc->qsc_bulk_req_params_buf_paddr;
836	bulk_req->comn_ftr.next_request_addr = 0;
837	bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
838	if (__predict_false(crp->crp_cipher_key != NULL ||
839	    crp->crp_auth_key != NULL)) {
840		qat_hw15_crypto_req_setkey(desc, qs, qsc, bulk_req, crp);
841	}
842
843	digest_paddr = 0;
844	if (desc->qcd_auth_sz != 0)
845		digest_paddr = qsc->qsc_auth_res_paddr;
846
847	req_params_ptr = qsbc->qsbc_req_params_buf;
848	memset(req_params_ptr, 0, sizeof(qsbc->qsbc_req_params_buf));
849
850	/*
851	 * The SG list layout is a bit different for GCM and GMAC, it's simpler
852	 * to handle those cases separately.
853	 */
854	if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
855		cipher_req = (struct fw_la_cipher_req_params *)req_params_ptr;
856		auth_req = (struct fw_la_auth_req_params *)
857		    (req_params_ptr + sizeof(struct fw_la_cipher_req_params));
858
859		cipher_req->cipher_state_sz = desc->qcd_cipher_blk_sz / 8;
860		cipher_req->curr_id = FW_SLICE_CIPHER;
861		if (cmd_id == FW_LA_CMD_HASH_CIPHER || cmd_id == FW_LA_CMD_AUTH)
862			cipher_req->next_id = FW_SLICE_DRAM_WR;
863		else
864			cipher_req->next_id = FW_SLICE_AUTH;
865		cipher_req->state_address = qsc->qsc_iv_buf_paddr;
866
867		if (cmd_id != FW_LA_CMD_AUTH) {
868			/*
869			 * Don't fill out the cipher block if we're doing GMAC
870			 * only.
871			 */
872			cipher_req->cipher_off = 0;
873			cipher_req->cipher_len = crp->crp_payload_length;
874		}
875		auth_req->curr_id = FW_SLICE_AUTH;
876		if (cmd_id == FW_LA_CMD_HASH_CIPHER || cmd_id == FW_LA_CMD_AUTH)
877			auth_req->next_id = FW_SLICE_CIPHER;
878		else
879			auth_req->next_id = FW_SLICE_DRAM_WR;
880
881		auth_req->auth_res_address = digest_paddr;
882		auth_req->auth_res_sz = desc->qcd_auth_sz;
883
884		auth_req->auth_off = 0;
885		auth_req->auth_len = crp->crp_payload_length;
886
887		auth_req->hash_state_sz =
888		    roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN) >> 3;
889		auth_req->u1.aad_addr = crp->crp_aad_length > 0 ?
890		    qsc->qsc_gcm_aad_paddr : 0;
891
892		/*
893		 * Update the hash state block if necessary.  This only occurs
894		 * when the AAD length changes between requests in a session and
895		 * is synchronized by qat_process().
896		 */
897		aad_sz = htobe32(crp->crp_aad_length);
898		aad_szp1 = (uint32_t *)(
899		    __DECONST(uint8_t *, desc->qcd_content_desc) +
900		    desc->qcd_gcm_aad_sz_offset1);
901		aad_szp2 = __DECONST(uint8_t *, desc->qcd_content_desc) +
902		    desc->qcd_gcm_aad_sz_offset2;
903		if (__predict_false(*aad_szp1 != aad_sz)) {
904			*aad_szp1 = aad_sz;
905			*aad_szp2 = (uint8_t)roundup2(crp->crp_aad_length,
906			    QAT_AES_GCM_AAD_ALIGN);
907			bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
908			    qs->qs_desc_mem.qdm_dma_map,
909			    BUS_DMASYNC_PREWRITE);
910		}
911	} else {
912		cipher_req = (struct fw_la_cipher_req_params *)req_params_ptr;
913		if (cmd_id != FW_LA_CMD_AUTH) {
914			if (cmd_id == FW_LA_CMD_CIPHER ||
915			    cmd_id == FW_LA_CMD_HASH_CIPHER)
916				next_slice = FW_SLICE_DRAM_WR;
917			else
918				next_slice = FW_SLICE_AUTH;
919
920			cipher_req->cipher_state_sz =
921			    desc->qcd_cipher_blk_sz / 8;
922
923			cipher_req->curr_id = FW_SLICE_CIPHER;
924			cipher_req->next_id = next_slice;
925
926			if (crp->crp_aad_length == 0) {
927				cipher_req->cipher_off = 0;
928			} else if (crp->crp_aad == NULL) {
929				cipher_req->cipher_off =
930				    crp->crp_payload_start - crp->crp_aad_start;
931			} else {
932				cipher_req->cipher_off = crp->crp_aad_length;
933			}
934			cipher_req->cipher_len = crp->crp_payload_length;
935			cipher_req->state_address = qsc->qsc_iv_buf_paddr;
936		}
937		if (cmd_id != FW_LA_CMD_CIPHER) {
938			if (cmd_id == FW_LA_CMD_AUTH)
939				auth_req = (struct fw_la_auth_req_params *)
940				    req_params_ptr;
941			else
942				auth_req = (struct fw_la_auth_req_params *)
943				    (cipher_req + 1);
944			if (cmd_id == FW_LA_CMD_HASH_CIPHER)
945				next_slice = FW_SLICE_CIPHER;
946			else
947				next_slice = FW_SLICE_DRAM_WR;
948
949			auth_req->curr_id = FW_SLICE_AUTH;
950			auth_req->next_id = next_slice;
951
952			auth_req->auth_res_address = digest_paddr;
953			auth_req->auth_res_sz = desc->qcd_auth_sz;
954
955			auth_req->auth_len =
956			    crp->crp_payload_length + crp->crp_aad_length;
957			auth_req->auth_off = 0;
958
959			auth_req->hash_state_sz = 0;
960			auth_req->u1.prefix_addr = desc->qcd_hash_state_paddr +
961			    desc->qcd_state_storage_sz;
962		}
963	}
964}
965