1/*	$NetBSD: qatvar.h,v 1.3 2022/07/06 12:33:42 andvar Exp $	*/
2
3/*
4 * Copyright (c) 2019 Internet Initiative Japan, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 *   Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
31 *
32 *   Redistribution and use in source and binary forms, with or without
33 *   modification, are permitted provided that the following conditions
34 *   are met:
35 *
36 *     * Redistributions of source code must retain the above copyright
37 *       notice, this list of conditions and the following disclaimer.
38 *     * Redistributions in binary form must reproduce the above copyright
39 *       notice, this list of conditions and the following disclaimer in
40 *       the documentation and/or other materials provided with the
41 *       distribution.
42 *     * Neither the name of Intel Corporation nor the names of its
43 *       contributors may be used to endorse or promote products derived
44 *       from this software without specific prior written permission.
45 *
46 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
47 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
48 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
49 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
50 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
51 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
52 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
56 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 */
58
59#ifndef _DEV_PCI_QATVAR_H_
60#define _DEV_PCI_QATVAR_H_
61
62#include <sys/malloc.h>			/* for cryptodev.h */
63#include <opencrypto/cryptodev.h>
64
65#define QAT_NSYMREQ	256
66#define QAT_NSYMCOOKIE	((QAT_NSYMREQ * 2 + 1) * 2)	/* XXX why? */
67#define QAT_NASYMREQ	64
68#define QAT_BATCH_SUBMIT_FREE_SPACE	2
69#define QAT_NSESSION	16384
70
71#define QAT_EV_NAME_SIZE		32
72#define QAT_RING_NAME_SIZE		32
73
74#define QAT_MAXSEG			32	/* max segments for sg dma */
75#define QAT_MAXLEN			65535	/* IP_MAXPACKET */
76
77#define QAT_HB_INTERVAL			500	/* heartbeat msec */
78#define QAT_SSM_WDT			100
79
80#if !defined(SET)
81#define	SET(t, f)	((t) |= (f))
82#define	ISSET(t, f)	((t) & (f))
83#define	CLR(t, f)	((t) &= ~(f))
84#endif
85
86#define QAT_EVENT_COUNTERS
87
88#ifdef QAT_EVENT_COUNTERS
89#define QAT_EVCNT_ATTACH(sc, ev, type, name, fmt, args...)		\
90		do {							\
91			snprintf((name), sizeof((name)), fmt, ##args);	\
92			evcnt_attach_dynamic((ev), (type), NULL,	\
93			    device_xname((sc)->sc_dev), (name));	\
94		} while (0)
95#define QAT_EVCNT_INCR(ev)	(ev)->ev_count++
96#else
97#define QAT_EVCNT_ATTACH(sc, ev, type, name, fmt, args...)	/* nothing */
98#define QAT_EVCNT_INCR(ev)	/* nothing */
99#endif
100
101enum qat_chip_type {
102	QAT_CHIP_C2XXX = 0,	/* NanoQAT: Atom C2000 */
103	QAT_CHIP_C2XXX_IOV,
104	QAT_CHIP_C3XXX,		/* Atom C3000 */
105	QAT_CHIP_C3XXX_IOV,
106	QAT_CHIP_C62X,
107	QAT_CHIP_C62X_IOV,
108	QAT_CHIP_D15XX,
109	QAT_CHIP_D15XX_IOV,
110};
111
112enum qat_sku {
113	QAT_SKU_UNKNOWN = 0,
114	QAT_SKU_1,
115	QAT_SKU_2,
116	QAT_SKU_3,
117	QAT_SKU_4,
118	QAT_SKU_VF,
119};
120
121enum qat_ae_status {
122	QAT_AE_ENABLED = 1,
123	QAT_AE_ACTIVE,
124	QAT_AE_DISABLED
125};
126
127#define TIMEOUT_AE_RESET	100
128#define TIMEOUT_AE_CHECK	10000
129#define TIMEOUT_AE_CSR		500
130#define AE_EXEC_CYCLE		20
131
132#define QAT_UOF_MAX_PAGE		1
133#define QAT_UOF_MAX_PAGE_REGION		1
134
135struct qat_dmamem {
136	bus_dmamap_t qdm_dma_map;
137	bus_size_t qdm_dma_size;
138	bus_dma_segment_t qdm_dma_seg;
139	void *qdm_dma_vaddr;
140};
141
142/* Valid internal ring size values */
143#define QAT_RING_SIZE_128 0x01
144#define QAT_RING_SIZE_256 0x02
145#define QAT_RING_SIZE_512 0x03
146#define QAT_RING_SIZE_4K 0x06
147#define QAT_RING_SIZE_16K 0x08
148#define QAT_RING_SIZE_4M 0x10
149#define QAT_MIN_RING_SIZE QAT_RING_SIZE_128
150#define QAT_MAX_RING_SIZE QAT_RING_SIZE_4M
151#define QAT_DEFAULT_RING_SIZE QAT_RING_SIZE_16K
152
153/* Valid internal msg size values */
154#define QAT_MSG_SIZE_32 0x01
155#define QAT_MSG_SIZE_64 0x02
156#define QAT_MSG_SIZE_128 0x04
157#define QAT_MIN_MSG_SIZE QAT_MSG_SIZE_32
158#define QAT_MAX_MSG_SIZE QAT_MSG_SIZE_128
159
160/* Size to bytes conversion macros for ring and msg size values */
161#define QAT_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
162#define QAT_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
163#define QAT_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
164#define QAT_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
165
166/* Minimum ring buffer size for memory allocation */
167#define QAT_RING_SIZE_BYTES_MIN(SIZE) \
168	((SIZE < QAT_SIZE_TO_RING_SIZE_IN_BYTES(QAT_RING_SIZE_4K)) ? \
169		QAT_SIZE_TO_RING_SIZE_IN_BYTES(QAT_RING_SIZE_4K) : SIZE)
170#define QAT_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
171#define QAT_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
172				SIZE) & ~0x4)
173/* Max outstanding requests */
174#define QAT_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
175	((((1 << (RING_SIZE - 1)) << 3) >> QAT_SIZE_TO_POW(MSG_SIZE)) - 1)
176
177#define QAT_RING_PATTERN		0x7f
178
179struct qat_softc;
180
181typedef int (*qat_cb_t)(struct qat_softc *, void *, void *);
182
183struct qat_ring {
184	struct qat_dmamem qr_dma;
185	bus_addr_t qr_ring_paddr;
186	void *qr_ring_vaddr;
187	uint32_t * volatile qr_inflight;	/* tx/rx shared */
188	uint32_t qr_head;
189	uint32_t qr_tail;
190	uint8_t qr_msg_size;
191	uint8_t qr_ring_size;
192	uint32_t qr_ring;	/* ring number in bank */
193	uint32_t qr_bank;	/* bank number in device */
194	uint32_t qr_ring_id;
195	uint32_t qr_ring_mask;
196	qat_cb_t qr_cb;
197	void *qr_cb_arg;
198
199	const char *qr_name;
200	kmutex_t qr_ring_mtx;   /* Lock per ring */
201
202#ifdef QAT_EVENT_COUNTERS
203	char qr_ev_rxintr_name[QAT_EV_NAME_SIZE];
204	struct evcnt qr_ev_rxintr;
205	char qr_ev_rxmsg_name[QAT_EV_NAME_SIZE];
206	struct evcnt qr_ev_rxmsg;
207	char qr_ev_txmsg_name[QAT_EV_NAME_SIZE];
208	struct evcnt qr_ev_txmsg;
209	char qr_ev_txfull_name[QAT_EV_NAME_SIZE];
210	struct evcnt qr_ev_txfull;
211#endif
212};
213
214struct qat_bank {
215	struct qat_softc *qb_sc;	/* back pointer to softc */
216	uint32_t qb_intr_mask;		/* current interrupt mask */
217	uint32_t qb_allocated_rings;	/* current allocated ring bitfield */
218	uint32_t qb_coalescing_time;	/* timer in nano sec, 0: disabled */
219#define COALESCING_TIME_INTERVAL_DEFAULT	10000
220#define COALESCING_TIME_INTERVAL_MIN		500
221#define COALESCING_TIME_INTERVAL_MAX		0xfffff
222	uint32_t qb_bank;		/* bank index */
223	kmutex_t qb_bank_mtx;
224	void *qb_ih_cookie;
225
226#ifdef QAT_EVENT_COUNTERS
227	char qb_ev_rxintr_name[QAT_EV_NAME_SIZE];
228	struct evcnt qb_ev_rxintr;
229#endif
230
231	struct qat_ring qb_et_rings[MAX_RING_PER_BANK];
232
233};
234
235struct qat_ap_bank {
236	uint32_t qab_nf_mask;
237	uint32_t qab_nf_dest;
238	uint32_t qab_ne_mask;
239	uint32_t qab_ne_dest;
240};
241
242struct qat_ae_page {
243	struct qat_ae_page *qap_next;
244	struct qat_uof_page *qap_page;
245	struct qat_ae_region *qap_region;
246	u_int qap_flags;
247};
248
249#define QAT_AE_PAGA_FLAG_WAITING	(1 << 0)
250
251struct qat_ae_region {
252	struct qat_ae_page *qar_loaded_page;
253	SIMPLEQ_HEAD(, qat_ae_page) qar_waiting_pages;
254};
255
256struct qat_ae_slice {
257	u_int qas_assigned_ctx_mask;
258	struct qat_ae_region qas_regions[QAT_UOF_MAX_PAGE_REGION];
259	struct qat_ae_page qas_pages[QAT_UOF_MAX_PAGE];
260	struct qat_ae_page *qas_cur_pages[MAX_AE_CTX];
261	struct qat_uof_image *qas_image;
262};
263
264#define QAT_AE(sc, ae)			\
265		((sc)->sc_ae[ae])
266
267struct qat_ae {
268	u_int qae_state;		/* AE state */
269	u_int qae_ustore_size;		/* free micro-store address */
270	u_int qae_free_addr;		/* free micro-store address */
271	u_int qae_free_size;		/* free micro-store size */
272	u_int qae_live_ctx_mask;	/* live context mask */
273	u_int qae_ustore_dram_addr;	/* micro-store DRAM address */
274	u_int qae_reload_size;		/* reloadable code size */
275
276	/* aefw */
277	u_int qae_num_slices;
278	struct qat_ae_slice qae_slices[MAX_AE_CTX];
279	u_int qae_reloc_ustore_dram;	/* reloadable ustore-dram address */
280	u_int qae_effect_ustore_size;	/* effective AE ustore size */
281	u_int qae_shareable_ustore;
282};
283
284struct qat_mof {
285	void *qmf_sym;			/* SYM_OBJS in sc_fw_mof */
286	size_t qmf_sym_size;
287	void *qmf_uof_objs;		/* UOF_OBJS in sc_fw_mof */
288	size_t qmf_uof_objs_size;
289	void *qmf_suof_objs;		/* SUOF_OBJS in sc_fw_mof */
290	size_t qmf_suof_objs_size;
291};
292
293struct qat_ae_batch_init {
294	u_int qabi_ae;
295	u_int qabi_addr;
296	u_int *qabi_value;
297	u_int qabi_size;
298	SIMPLEQ_ENTRY(qat_ae_batch_init) qabi_next;
299};
300
301SIMPLEQ_HEAD(qat_ae_batch_init_list, qat_ae_batch_init);
302
303/* overwritten struct uof_uword_block */
304struct qat_uof_uword_block {
305	u_int quub_start_addr;		/* start address */
306	u_int quub_num_words;		/* number of microwords */
307	uint64_t quub_micro_words;	/* pointer to the uwords */
308};
309
310struct qat_uof_page {
311	u_int qup_page_num;		/* page number */
312	u_int qup_def_page;		/* default page */
313	u_int qup_page_region;		/* region of page */
314	u_int qup_beg_vaddr;		/* begin virtual address */
315	u_int qup_beg_paddr;		/* begin physical address */
316
317	u_int qup_num_uc_var;		/* num of uC var in array */
318	struct uof_uword_fixup *qup_uc_var;
319					/* array of import variables */
320	u_int qup_num_imp_var;		/* num of import var in array */
321	struct uof_import_var *qup_imp_var;
322					/* array of import variables */
323	u_int qup_num_imp_expr;		/* num of import expr in array */
324	struct uof_uword_fixup *qup_imp_expr;
325					/* array of import expressions */
326	u_int qup_num_neigh_reg;	/* num of neigh-reg in array */
327	struct uof_uword_fixup *qup_neigh_reg;
328					/* array of neigh-reg assignments */
329	u_int qup_num_micro_words;	/* number of microwords in the seg */
330
331	u_int qup_num_uw_blocks;	/* number of uword blocks */
332	struct qat_uof_uword_block *qup_uw_blocks;
333					/* array of uword blocks */
334};
335
336struct qat_uof_image {
337	struct uof_image *qui_image;		/* image pointer */
338	struct qat_uof_page qui_pages[QAT_UOF_MAX_PAGE];
339						/* array of pages */
340
341	u_int qui_num_ae_reg;			/* num of registers */
342	struct uof_ae_reg *qui_ae_reg;		/* array of registers */
343
344	u_int qui_num_init_reg_sym;		/* num of reg/sym init values */
345	struct uof_init_reg_sym *qui_init_reg_sym;
346					/* array of reg/sym init values */
347
348	u_int qui_num_sbreak;			/* num of sbreak values */
349	struct qui_sbreak *qui_sbreak;		/* array of sbreak values */
350
351	u_int qui_num_uwords_used;
352				/* highest uword addressreferenced + 1 */
353};
354
355struct qat_aefw_uof {
356	size_t qafu_size;			/* uof size */
357	struct uof_obj_hdr *qafu_obj_hdr;	/* UOF_OBJS */
358
359	void *qafu_str_tab;
360	size_t qafu_str_tab_size;
361
362	u_int qafu_num_init_mem;
363	struct uof_init_mem *qafu_init_mem;
364	size_t qafu_init_mem_size;
365
366	struct uof_var_mem_seg *qafu_var_mem_seg;
367
368	struct qat_ae_batch_init_list qafu_lm_init[MAX_AE];
369	size_t qafu_num_lm_init[MAX_AE];
370	size_t qafu_num_lm_init_inst[MAX_AE];
371
372	u_int qafu_num_imgs;			/* number of uof image */
373	struct qat_uof_image qafu_imgs[MAX_NUM_AE * MAX_AE_CTX];
374						/* uof images */
375};
376
377#define QAT_SERVICE_CRYPTO_A		(1 << 0)
378#define QAT_SERVICE_CRYPTO_B		(1 << 1)
379
380struct qat_admin_rings {
381	uint32_t qadr_active_aes_per_accel;
382	uint8_t qadr_srv_mask[MAX_AE_PER_ACCEL];
383
384	struct qat_dmamem qadr_dma;
385	struct fw_init_ring_table *qadr_master_ring_tbl;
386	struct fw_init_ring_table *qadr_cya_ring_tbl;
387	struct fw_init_ring_table *qadr_cyb_ring_tbl;
388
389	struct qat_ring *qadr_admin_tx;
390	struct qat_ring *qadr_admin_rx;
391};
392
393struct qat_accel_init_cb {
394	int qaic_status;
395};
396
397struct qat_admin_comms {
398	struct qat_dmamem qadc_dma;
399	struct qat_dmamem qadc_const_tbl_dma;
400	struct qat_dmamem qadc_hb_dma;
401};
402
403#define QAT_PID_MINOR_REV 0xf
404#define QAT_PID_MAJOR_REV (0xf << 4)
405
406struct qat_suof_image {
407	char *qsi_simg_buf;
408	u_long qsi_simg_len;
409	char *qsi_css_header;
410	char *qsi_css_key;
411	char *qsi_css_signature;
412	char *qsi_css_simg;
413	u_long qsi_simg_size;
414	u_int qsi_ae_num;
415	u_int qsi_ae_mask;
416	u_int qsi_fw_type;
417	u_long qsi_simg_name;
418	u_long qsi_appmeta_data;
419	struct qat_dmamem qsi_dma;
420};
421
422struct qat_aefw_suof {
423	u_int qafs_file_id;
424	u_int qafs_check_sum;
425	char qafs_min_ver;
426	char qafs_maj_ver;
427	char qafs_fw_type;
428	char *qafs_suof_buf;
429	u_int qafs_suof_size;
430	char *qafs_sym_str;
431	u_int qafs_sym_size;
432	u_int qafs_num_simgs;
433	struct qat_suof_image *qafs_simg;
434};
435
436enum qat_sym_hash_algorithm {
437	QAT_SYM_HASH_NONE = 0,
438	QAT_SYM_HASH_MD5,
439	QAT_SYM_HASH_SHA1,
440	QAT_SYM_HASH_SHA224,
441	QAT_SYM_HASH_SHA256,
442	QAT_SYM_HASH_SHA384,
443	QAT_SYM_HASH_SHA512,
444	QAT_SYM_HASH_AES_XCBC,
445	QAT_SYM_HASH_AES_CCM,
446	QAT_SYM_HASH_AES_GCM,
447	QAT_SYM_HASH_KASUMI_F9,
448	QAT_SYM_HASH_SNOW3G_UIA2,
449	QAT_SYM_HASH_AES_CMAC,
450	QAT_SYM_HASH_AES_GMAC,
451	QAT_SYM_HASH_AES_CBC_MAC
452};
453
454#define QAT_HASH_MD5_BLOCK_SIZE			64
455#define QAT_HASH_MD5_DIGEST_SIZE		16
456#define QAT_HASH_MD5_STATE_SIZE			16
457#define QAT_HASH_SHA1_BLOCK_SIZE		64
458#define QAT_HASH_SHA1_DIGEST_SIZE		20
459#define QAT_HASH_SHA1_STATE_SIZE		20
460#define QAT_HASH_SHA224_BLOCK_SIZE		64
461#define QAT_HASH_SHA224_DIGEST_SIZE		28
462#define QAT_HASH_SHA224_STATE_SIZE		32
463#define QAT_HASH_SHA256_BLOCK_SIZE		64
464#define QAT_HASH_SHA256_DIGEST_SIZE		32
465#define QAT_HASH_SHA256_STATE_SIZE		32
466#define QAT_HASH_SHA384_BLOCK_SIZE		128
467#define QAT_HASH_SHA384_DIGEST_SIZE		48
468#define QAT_HASH_SHA384_STATE_SIZE		64
469#define QAT_HASH_SHA512_BLOCK_SIZE		128
470#define QAT_HASH_SHA512_DIGEST_SIZE		64
471#define QAT_HASH_SHA512_STATE_SIZE		64
472#define QAT_HASH_XCBC_PRECOMP_KEY_NUM		3
473#define QAT_HASH_XCBC_MAC_BLOCK_SIZE		16
474#define QAT_HASH_XCBC_MAC_128_DIGEST_SIZE	16
475#define QAT_HASH_CMAC_BLOCK_SIZE		16
476#define QAT_HASH_CMAC_128_DIGEST_SIZE		16
477#define QAT_HASH_AES_CCM_BLOCK_SIZE		16
478#define QAT_HASH_AES_CCM_DIGEST_SIZE		16
479#define QAT_HASH_AES_GCM_BLOCK_SIZE		16
480#define QAT_HASH_AES_GCM_DIGEST_SIZE		16
481#define QAT_HASH_KASUMI_F9_BLOCK_SIZE		8
482#define QAT_HASH_KASUMI_F9_DIGEST_SIZE		4
483#define QAT_HASH_SNOW3G_UIA2_BLOCK_SIZE		8
484#define QAT_HASH_SNOW3G_UIA2_DIGEST_SIZE	4
485#define QAT_HASH_AES_CBC_MAC_BLOCK_SIZE		16
486#define QAT_HASH_AES_CBC_MAC_DIGEST_SIZE	16
487#define QAT_HASH_AES_GCM_ICV_SIZE_8		8
488#define QAT_HASH_AES_GCM_ICV_SIZE_12		12
489#define QAT_HASH_AES_GCM_ICV_SIZE_16		16
490#define QAT_HASH_AES_CCM_ICV_SIZE_MIN		4
491#define QAT_HASH_AES_CCM_ICV_SIZE_MAX		16
492#define QAT_HASH_IPAD_BYTE			0x36
493#define QAT_HASH_OPAD_BYTE			0x5c
494#define QAT_HASH_IPAD_4_BYTES			0x36363636
495#define QAT_HASH_OPAD_4_BYTES			0x5c5c5c5c
496#define QAT_HASH_KASUMI_F9_KEY_MODIFIER_4_BYTES	0xAAAAAAAA
497
498#define QAT_SYM_XCBC_STATE_SIZE		((QAT_HASH_XCBC_MAC_BLOCK_SIZE) * 3)
499#define QAT_SYM_CMAC_STATE_SIZE		((QAT_HASH_CMAC_BLOCK_SIZE) * 3)
500
501struct qat_sym_hash_alg_info {
502	uint32_t qshai_digest_len;		/* Digest length in bytes */
503	uint32_t qshai_block_len;		/* Block length in bytes */
504	const uint8_t *qshai_init_state;	/* Initialiser state for hash
505						 * algorithm */
506	uint32_t qshai_state_size;		/* size of above state in bytes */
507
508	const struct swcr_auth_hash *qshai_sah;	/* software auth hash */
509	uint32_t qshai_state_offset;		/* offset to state in *_CTX */
510	uint32_t qshai_state_word;
511};
512
513struct qat_sym_hash_qat_info {
514	uint32_t qshqi_algo_enc;	/* QAT Algorithm encoding */
515	uint32_t qshqi_auth_counter;	/* Counter value for Auth */
516	uint32_t qshqi_state1_len;	/* QAT state1 length in bytes */
517	uint32_t qshqi_state2_len;	/* QAT state2 length in bytes */
518};
519
520struct qat_sym_hash_def {
521	const struct qat_sym_hash_alg_info *qshd_alg;
522	const struct qat_sym_hash_qat_info *qshd_qat;
523};
524
525#define QAT_SYM_REQ_PARAMS_SIZE_MAX			(24 + 32)
526/* Reserve enough space for cipher and authentication request params */
527/* Basis of values are guaranteed in qat_hw*var.h with CTASSERT */
528
529#define QAT_SYM_REQ_PARAMS_SIZE_PADDED			\
530		roundup(QAT_SYM_REQ_PARAMS_SIZE_MAX, QAT_OPTIMAL_ALIGN)
531/* Pad out to 64-byte multiple to ensure optimal alignment of next field */
532
533#define QAT_SYM_KEY_TLS_PREFIX_SIZE			(128)
534/* Hash Prefix size in bytes for TLS (128 = MAX = SHA2 (384, 512)*/
535
536#define QAT_SYM_KEY_MAX_HASH_STATE_BUFFER		\
537		(QAT_SYM_KEY_TLS_PREFIX_SIZE * 2)
538/* hash state prefix buffer structure that holds the maximum sized secret */
539
540#define QAT_SYM_HASH_BUFFER_LEN			QAT_HASH_SHA512_STATE_SIZE
541/* Buffer length to hold 16 byte MD5 key and 20 byte SHA1 key */
542
543struct qat_sym_bulk_cookie {
544	uint8_t qsbc_req_params_buf[QAT_SYM_REQ_PARAMS_SIZE_PADDED];
545	/* memory block reserved for request params
546	 * NOTE: Field must be correctly aligned in memory for access by QAT
547	 * engine */
548	struct qat_crypto *qsbc_crypto;
549	struct qat_session *qsbc_session;
550	/* Session context */
551	void *qsbc_cb_tag;
552	/* correlator supplied by the client */
553	uint8_t qsbc_msg[QAT_MSG_SIZE_TO_BYTES(QAT_MAX_MSG_SIZE)];
554	/* QAT request message */
555} __aligned(QAT_OPTIMAL_ALIGN);
556
557struct qat_sym_cookie {
558	union qat_sym_cookie_u {
559		/* should be 64byte aligned */
560		struct qat_sym_bulk_cookie qsc_bulk_cookie;
561						/* symmetric bulk cookie */
562#ifdef notyet
563		struct qat_sym_key_cookie qsc_key_cookie;
564						/* symmetric key cookie */
565		struct qat_sym_nrbg_cookie qsc_nrbg_cookie;
566						/* symmetric NRBG cookie */
567#endif
568	} u;
569
570	/* should be 64-byte aligned */
571	struct buffer_list_desc qsc_buf_list;
572	struct flat_buffer_desc qsc_flat_bufs[QAT_MAXSEG]; /* should be here */
573
574	bus_dmamap_t *qsc_self_dmamap;	/* self DMA mapping and
575					   end of DMA region */
576
577	uint8_t qsc_iv_buf[EALG_MAX_BLOCK_LEN];
578
579	bus_dmamap_t qsc_buf_dmamap;	/* qsc_flat_bufs DMA mapping */
580	void *qsc_buf;
581
582	bus_addr_t qsc_bulk_req_params_buf_paddr;
583	bus_addr_t qsc_buffer_list_desc_paddr;
584	bus_addr_t qsc_iv_buf_paddr;
585
586#ifdef notyet
587	uint64_t qsc_key_content_desc_paddr;
588	uint64_t qsc_key_hash_state_buf_paddr;
589	uint64_t qsc_key_ssl_key_in_paddr;
590	uint64_t qsc_key_tls_key_in_paddr;
591#endif
592};
593
594CTASSERT(offsetof(struct qat_sym_cookie,
595    u.qsc_bulk_cookie.qsbc_req_params_buf) % QAT_OPTIMAL_ALIGN == 0);
596CTASSERT(offsetof(struct qat_sym_cookie, qsc_buf_list) % QAT_OPTIMAL_ALIGN == 0);
597CTASSERT(sizeof(struct buffer_list_desc) == 16);
598
599#define MAX_CIPHER_SETUP_BLK_SZ						\
600		(sizeof(struct hw_cipher_config) +			\
601		2 * HW_KASUMI_KEY_SZ + 2 * HW_KASUMI_BLK_SZ)
602#define MAX_HASH_SETUP_BLK_SZ	sizeof(union hw_auth_algo_blk)
603
604/* Basis of values are guaranteed in qat_hw*var.h with CTASSERT */
605#define HASH_CONTENT_DESC_SIZE		176
606#define CIPHER_CONTENT_DESC_SIZE	64
607
608#define CONTENT_DESC_MAX_SIZE	roundup(				\
609		HASH_CONTENT_DESC_SIZE + CIPHER_CONTENT_DESC_SIZE,	\
610		QAT_OPTIMAL_ALIGN)
611
612#define QAT_MAX_AAD_SIZE_BYTES		256
613
614struct qat_crypto_desc {
615	uint8_t qcd_content_desc[CONTENT_DESC_MAX_SIZE];
616	/* using only for qat 1.5 */
617	uint8_t qcd_hash_state_prefix_buf[QAT_MAX_AAD_SIZE_BYTES];
618
619	enum fw_slice qcd_slices[MAX_FW_SLICE];
620	enum fw_la_cmd_id qcd_cmd_id;
621	enum hw_cipher_dir qcd_cipher_dir;
622
623	bus_addr_t qcd_desc_paddr;
624	bus_addr_t qcd_hash_state_paddr;
625
626	/* content desc info */
627	uint8_t qcd_hdr_sz;		/* in quad words */
628	uint8_t qcd_hw_blk_sz;		/* in quad words */
629	/* hash info */
630	uint8_t qcd_state_storage_sz;	/* in quad words */
631	/* cipher info */
632	uint16_t qcd_cipher_blk_sz;	/* in bytes */
633	uint16_t qcd_auth_sz;		/* in bytes */
634
635	uint8_t qcd_req_cache[QAT_MSG_SIZE_TO_BYTES(QAT_MAX_MSG_SIZE)];
636} __aligned(QAT_OPTIMAL_ALIGN);
637
638/* should be aligned to 64bytes */
639struct qat_session {
640	struct qat_crypto_desc qs_dec_desc;	/* should be at top of struct*/
641	/* decrypt or auth then decrypt or auth */
642
643	struct qat_crypto_desc qs_enc_desc;
644	/* encrypt or encrypt then auth */
645
646	uint32_t qs_lid;
647	uint32_t qs_status;
648#define QAT_SESSION_STATUS_ACTIVE	(1 << 0)
649#define QAT_SESSION_STATUS_FREEING	(1 << 1)
650	uint32_t qs_inflight;
651
652	kmutex_t qs_session_mtx;
653};
654
655CTASSERT(offsetof(struct qat_session, qs_dec_desc) % QAT_OPTIMAL_ALIGN == 0);
656CTASSERT(offsetof(struct qat_session, qs_enc_desc) % QAT_OPTIMAL_ALIGN == 0);
657
658struct qat_crypto_bank {
659	uint16_t qcb_bank;
660
661	struct qat_ring *qcb_sym_tx;
662	struct qat_ring *qcb_sym_rx;
663
664	struct qat_dmamem qcb_symck_dmamems[QAT_NSYMCOOKIE];
665	struct qat_sym_cookie *qcb_symck_free[QAT_NSYMCOOKIE];
666	uint32_t qcb_symck_free_count;
667
668	kmutex_t qcb_bank_mtx;
669
670	struct qat_crypto *qcb_crypto;
671
672	char qcb_ring_names[2][QAT_RING_NAME_SIZE];	/* sym tx,rx */
673#ifdef QAT_EVENT_COUNTERS
674	char qcb_ev_no_symck_name[QAT_EV_NAME_SIZE];
675	struct evcnt qcb_ev_no_symck;
676#endif
677};
678
679
680struct qat_crypto {
681	struct qat_softc *qcy_sc;
682	uint32_t qcy_bank_mask;
683	uint16_t qcy_num_banks;
684
685	int32_t qcy_cid;		/* OpenCrypto driver ID */
686
687	struct qat_crypto_bank *qcy_banks; /* array of qat_crypto_bank */
688
689	struct qat_dmamem qcy_session_dmamems[QAT_NSESSION];
690	struct qat_session *qcy_sessions[QAT_NSESSION];
691	struct qat_session *qcy_session_free[QAT_NSESSION];
692	uint32_t qcy_session_free_count;
693
694	kmutex_t qcy_crypto_mtx;
695
696#ifdef QAT_EVENT_COUNTERS
697	char qcy_ev_new_sess_name[QAT_EV_NAME_SIZE];
698	struct evcnt qcy_ev_new_sess;
699	char qcy_ev_free_sess_name[QAT_EV_NAME_SIZE];
700	struct evcnt qcy_ev_free_sess;
701	char qcy_ev_no_sess_name[QAT_EV_NAME_SIZE];
702	struct evcnt qcy_ev_no_sess;
703#endif
704};
705
706struct qat_hw {
707	int8_t qhw_sram_bar_id;
708	int8_t qhw_misc_bar_id;
709	int8_t qhw_etr_bar_id;
710
711	bus_size_t qhw_cap_global_offset;
712	bus_size_t qhw_ae_offset;
713	bus_size_t qhw_ae_local_offset;
714	bus_size_t qhw_etr_bundle_size;
715
716	/* crypto processing callbacks */
717	size_t qhw_crypto_opaque_offset;
718	void (*qhw_crypto_setup_req_params)(struct qat_crypto_bank *,
719	    struct qat_session *, struct qat_crypto_desc const *,
720	    struct qat_sym_cookie *, struct cryptodesc *, struct cryptodesc *,
721	    bus_addr_t);
722	void (*qhw_crypto_setup_desc)(struct qat_crypto *, struct qat_session *,
723	    struct qat_crypto_desc *, struct cryptoini *, struct cryptoini *);
724
725	uint8_t qhw_num_banks;			/* max number of banks */
726	uint8_t qhw_num_ap_banks;		/* max number of AutoPush banks */
727	uint8_t qhw_num_rings_per_bank;		/* rings per bank */
728	uint8_t qhw_num_accel;			/* max number of accelerators */
729	uint8_t qhw_num_engines;		/* max number of accelerator engines */
730	uint8_t qhw_tx_rx_gap;
731	uint32_t qhw_tx_rings_mask;
732	uint32_t qhw_clock_per_sec;
733	bool qhw_fw_auth;
734	uint32_t qhw_fw_req_size;
735	uint32_t qhw_fw_resp_size;
736
737	uint8_t qhw_ring_sym_tx;
738	uint8_t qhw_ring_sym_rx;
739	uint8_t qhw_ring_asym_tx;
740	uint8_t qhw_ring_asym_rx;
741
742	/* MSIx */
743	uint32_t qhw_msix_ae_vec_gap;	/* gap to ae vec from bank */
744
745	const char *qhw_mof_fwname;
746	const char *qhw_mmp_fwname;
747
748	uint32_t qhw_prod_type;		/* cpu type */
749
750	/* setup callbacks */
751	uint32_t (*qhw_get_accel_mask)(struct qat_softc *);
752	uint32_t (*qhw_get_ae_mask)(struct qat_softc *);
753	enum qat_sku (*qhw_get_sku)(struct qat_softc *);
754	uint32_t (*qhw_get_accel_cap)(struct qat_softc *);
755	const char *(*qhw_get_fw_uof_name)(struct qat_softc *);
756	void (*qhw_enable_intr)(struct qat_softc *);
757	void (*qhw_init_etr_intr)(struct qat_softc *, int);
758	int (*qhw_init_admin_comms)(struct qat_softc *);
759	int (*qhw_send_admin_init)(struct qat_softc *);
760	int (*qhw_init_arb)(struct qat_softc *);
761	void (*qhw_get_arb_mapping)(struct qat_softc *, const uint32_t **);
762	void (*qhw_enable_error_correction)(struct qat_softc *);
763	int (*qhw_check_uncorrectable_error)(struct qat_softc *);
764	void (*qhw_print_err_registers)(struct qat_softc *);
765	void (*qhw_disable_error_interrupts)(struct qat_softc *);
766	int (*qhw_check_slice_hang)(struct qat_softc *);
767	int (*qhw_set_ssm_wdtimer)(struct qat_softc *);
768};
769
770
771/* sc_flags */
772#define QAT_FLAG_ESRAM_ENABLE_AUTO_INIT	(1 << 0)
773#define QAT_FLAG_SHRAM_WAIT_READY	(1 << 1)
774
775/* sc_accel_cap */
776#define QAT_ACCEL_CAP_CRYPTO_SYMMETRIC	(1 << 0)
777#define QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC	(1 << 1)
778#define QAT_ACCEL_CAP_CIPHER		(1 << 2)
779#define QAT_ACCEL_CAP_AUTHENTICATION	(1 << 3)
780#define QAT_ACCEL_CAP_REGEX		(1 << 4)
781#define QAT_ACCEL_CAP_COMPRESSION	(1 << 5)
782#define QAT_ACCEL_CAP_LZS_COMPRESSION	(1 << 6)
783#define QAT_ACCEL_CAP_RANDOM_NUMBER	(1 << 7)
784#define QAT_ACCEL_CAP_ZUC		(1 << 8)
785#define QAT_ACCEL_CAP_SHA3		(1 << 9)
786#define QAT_ACCEL_CAP_KPT		(1 << 10)
787
788#define QAT_ACCEL_CAP_BITS	\
789	"\177\020"	\
790	"b\x0a"		"KPT\0" \
791	"b\x09"		"SHA3\0" \
792	"b\x08"		"ZUC\0" \
793	"b\x07"		"RANDOM_NUMBER\0" \
794	"b\x06"		"LZS_COMPRESSION\0" \
795	"b\x05"		"COMPRESSION\0" \
796	"b\x04"		"REGEX\0" \
797	"b\x03"		"AUTHENTICATION\0" \
798	"b\x02"		"CIPHER\0" \
799	"b\x01"		"CRYPTO_ASYMMETRIC\0" \
800	"b\x00"		"CRYPTO_SYMMETRIC\0"
801
802#define QAT_HI_PRIO_RING_WEIGHT		0xfc
803#define QAT_LO_PRIO_RING_WEIGHT		0xfe
804#define QAT_DEFAULT_RING_WEIGHT		0xff
805#define QAT_DEFAULT_PVL			0
806
807struct qat_softc {
808	struct device *sc_dev;
809
810	pci_chipset_tag_t sc_pc;
811	pcitag_t sc_pcitag;
812
813	bus_space_tag_t sc_csrt[MAX_BARS];
814	bus_space_handle_t sc_csrh[MAX_BARS];
815	bus_size_t sc_csrs[MAX_BARS];
816
817	bus_dma_tag_t sc_dmat;
818
819	uint32_t sc_ae_num;
820	uint32_t sc_ae_mask;
821
822	struct qat_crypto sc_crypto;		/* crypto services */
823
824	struct qat_hw sc_hw;
825
826	uint8_t sc_rev;
827	enum qat_sku sc_sku;
828	uint32_t sc_flags;
829
830	uint32_t sc_accel_num;
831	uint32_t sc_accel_mask;
832	uint32_t sc_accel_cap;
833
834	struct qat_admin_rings sc_admin_rings;	/* use only for qat 1.5 */
835	struct qat_admin_comms sc_admin_comms;	/* use only for qat 1.7 */
836
837	/* ETR */
838	struct qat_bank *sc_etr_banks;		/* array of etr banks */
839	struct qat_ap_bank *sc_etr_ap_banks;	/* array of etr auto push banks */
840
841	/* AE */
842	struct qat_ae sc_ae[MAX_NUM_AE];
843
844	/* Interrupt */
845	pci_intr_handle_t *sc_ih;		/* banks and ae cluster ih */
846	void *sc_ae_ih_cookie;			/* ae cluster ih cookie */
847
848	/* Firmware */
849	void *sc_fw_mof;			/* mof via firmload(9) */
850	size_t sc_fw_mof_size;			/* mof size */
851	struct qat_mof sc_mof;			/* mof sections */
852
853	const char *sc_fw_uof_name;		/* uof/suof name in mof */
854
855	void *sc_fw_uof;			/* uof head */
856	size_t sc_fw_uof_size;			/* uof size */
857	struct qat_aefw_uof sc_aefw_uof;	/* UOF_OBJS in uof */
858
859	void *sc_fw_suof;			/* suof head */
860	size_t sc_fw_suof_size;			/* suof size */
861	struct qat_aefw_suof sc_aefw_suof;	/* suof context */
862
863	void *sc_fw_mmp;			/* mmp via firmload(9) */
864	size_t sc_fw_mmp_size;			/* mmp size */
865};
866
867#define QAT_DUMP_DESC		__BIT(0)
868#define QAT_DUMP_RING		__BIT(1)
869#define QAT_DUMP_RING_MSG	__BIT(2)
870#define QAT_DUMP_PCI		__BIT(3)
871#define QAT_DUMP_AEFW		__BIT(4)
872
873//#define QAT_DUMP		(__BITS(0, 4))
874
875#ifdef QAT_DUMP
876
877#include <sys/endian.h>
878
879#ifdef DDB
880#include <machine/db_machdep.h>
881#include <ddb/db_sym.h>
882#endif
883
884/*
885 * To avoid spinout detection in mutex_enter,
886 * yield cpu to other threads if QAT_DUMP is defined.
887 *
888 * Since printf of QAT_DUMP_PCI takes a lot of cpu time,
889 * and the configroot threads, which is running for qat_init(),
890 * takes kernel_lock and the uvm_scheduler is not working at that point.
891 */
892#define QAT_YIELD()	preempt_point()
893
894extern int qat_dump;
895
896void		qat_dump_raw(int, const char *, void *, size_t);
897void		qat_dump_ring(int, int);
898void		qat_dump_mbuf(struct mbuf *, int, int);
899
900static inline void
901qat_print_sym(uintptr_t pc)
902{
903#ifdef DDB
904	const char *name;
905	db_expr_t offset;
906
907	db_find_sym_and_offset((db_expr_t)pc, &name, &offset);
908
909	if (name != NULL) {
910		printf("%zx (%s+%zx)", (size_t)pc, name, (size_t)offset);
911		return;
912	}
913#endif
914	printf("%zx", (size_t)pc);
915}
916
917static inline void
918qat_dump_bar_write_4(struct qat_softc *sc, int baroff, bus_size_t offset,
919    int value)
920{
921pc:
922	if ((qat_dump & QAT_DUMP_PCI) == 0)
923		return;
924	printf("[qat_pci]: w %02x+%04zx %08x ", baroff, (size_t)offset, value);
925	qat_print_sym((uintptr_t)&&pc);
926	printf("\n");
927}
928
929#else /* QAT_DUMP */
930#define QAT_YIELD()
931#endif /* QAT_DUMP */
932
933static inline void
934qat_bar_write_4(struct qat_softc *sc, int baroff, bus_size_t offset,
935    uint32_t value)
936{
937
938	KASSERT(baroff >= 0 && baroff < MAX_BARS);
939
940	bus_space_write_4(sc->sc_csrt[baroff],
941	    sc->sc_csrh[baroff], offset, value);
942#ifdef QAT_DUMP
943	qat_dump_bar_write_4(sc, baroff, offset, value);
944#endif
945}
946
947static inline uint32_t
948qat_bar_read_4(struct qat_softc *sc, int baroff, bus_size_t offset)
949{
950
951	KASSERT(baroff >= 0 && baroff < MAX_BARS);
952
953	return bus_space_read_4(sc->sc_csrt[baroff],
954	    sc->sc_csrh[baroff], offset);
955}
956
957static inline void
958qat_misc_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value)
959{
960
961	qat_bar_write_4(sc, sc->sc_hw.qhw_misc_bar_id, offset, value);
962}
963
964static inline uint32_t
965qat_misc_read_4(struct qat_softc *sc, bus_size_t offset)
966{
967
968	return qat_bar_read_4(sc, sc->sc_hw.qhw_misc_bar_id, offset);
969}
970
971static inline void
972qat_misc_read_write_or_4(struct qat_softc *sc, bus_size_t offset,
973    uint32_t value)
974{
975	uint32_t reg;
976
977	reg = qat_misc_read_4(sc, offset);
978	reg |= value;
979	qat_misc_write_4(sc, offset, reg);
980}
981
982static inline void
983qat_misc_read_write_and_4(struct qat_softc *sc, bus_size_t offset,
984    uint32_t mask)
985{
986	uint32_t reg;
987
988	reg = qat_misc_read_4(sc, offset);
989	reg &= mask;
990	qat_misc_write_4(sc, offset, reg);
991}
992
993static inline void
994qat_etr_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value)
995{
996
997	qat_bar_write_4(sc, sc->sc_hw.qhw_etr_bar_id, offset, value);
998}
999
1000static inline uint32_t
1001qat_etr_read_4(struct qat_softc *sc, bus_size_t offset)
1002{
1003
1004	return qat_bar_read_4(sc, sc->sc_hw.qhw_etr_bar_id, offset);
1005}
1006
1007static inline void
1008qat_ae_local_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
1009	uint32_t value)
1010{
1011
1012	offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_LOCAL_AE_MASK) |
1013	    (offset & AE_LOCAL_CSR_MASK);
1014
1015	qat_misc_write_4(sc, sc->sc_hw.qhw_ae_local_offset + offset,
1016	    value);
1017}
1018
1019static inline uint32_t
1020qat_ae_local_read_4(struct qat_softc *sc, u_char ae, bus_size_t offset)
1021{
1022
1023	offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_LOCAL_AE_MASK) |
1024	    (offset & AE_LOCAL_CSR_MASK);
1025
1026	return qat_misc_read_4(sc, sc->sc_hw.qhw_ae_local_offset + offset);
1027}
1028
1029static inline void
1030qat_ae_xfer_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
1031	uint32_t value)
1032{
1033	offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_XFER_AE_MASK) |
1034	    __SHIFTIN(offset, AE_XFER_CSR_MASK);
1035
1036	qat_misc_write_4(sc, sc->sc_hw.qhw_ae_offset + offset, value);
1037}
1038
1039static inline void
1040qat_cap_global_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value)
1041{
1042
1043	qat_misc_write_4(sc, sc->sc_hw.qhw_cap_global_offset + offset, value);
1044}
1045
1046static inline uint32_t
1047qat_cap_global_read_4(struct qat_softc *sc, bus_size_t offset)
1048{
1049
1050	return qat_misc_read_4(sc, sc->sc_hw.qhw_cap_global_offset + offset);
1051}
1052
1053
1054static inline void
1055qat_etr_bank_write_4(struct qat_softc *sc, int bank,
1056	bus_size_t offset, uint32_t value)
1057{
1058
1059	qat_etr_write_4(sc, sc->sc_hw.qhw_etr_bundle_size * bank + offset,
1060	    value);
1061}
1062
1063static inline uint32_t
1064qat_etr_bank_read_4(struct qat_softc *sc, int bank,
1065	bus_size_t offset)
1066{
1067
1068	return qat_etr_read_4(sc,
1069	    sc->sc_hw.qhw_etr_bundle_size * bank + offset);
1070}
1071
1072static inline void
1073qat_etr_ap_bank_write_4(struct qat_softc *sc, int ap_bank,
1074	bus_size_t offset, uint32_t value)
1075{
1076
1077	qat_etr_write_4(sc, ETR_AP_BANK_OFFSET * ap_bank + offset, value);
1078}
1079
1080static inline uint32_t
1081qat_etr_ap_bank_read_4(struct qat_softc *sc, int ap_bank,
1082	bus_size_t offset)
1083{
1084
1085	return qat_etr_read_4(sc, ETR_AP_BANK_OFFSET * ap_bank + offset);
1086}
1087
1088
1089static inline void
1090qat_etr_bank_ring_write_4(struct qat_softc *sc, int bank, int ring,
1091	bus_size_t offset, uint32_t value)
1092{
1093
1094	qat_etr_bank_write_4(sc, bank, (ring << 2) + offset, value);
1095}
1096
1097static inline uint32_t
1098qat_etr_bank_ring_read_4(struct qat_softc *sc, int bank, int ring,
1099	bus_size_t offset)
1100{
1101
1102	return qat_etr_bank_read_4(sc, bank, (ring << 2) * offset);
1103}
1104
1105static inline void
1106qat_etr_bank_ring_base_write_8(struct qat_softc *sc, int bank, int ring,
1107	uint64_t value)
1108{
1109	uint32_t lo, hi;
1110
1111	lo = (uint32_t)(value & 0xffffffff);
1112	hi = (uint32_t)((value & 0xffffffff00000000ULL) >> 32);
1113	qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_LBASE, lo);
1114	qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_UBASE, hi);
1115}
1116
1117static inline void
1118qat_arb_ringsrvarben_write_4(struct qat_softc *sc, int index, uint32_t value)
1119{
1120
1121	qat_etr_write_4(sc, ARB_RINGSRVARBEN_OFFSET +
1122	    (ARB_REG_SLOT * index), value);
1123}
1124
1125static inline void
1126qat_arb_sarconfig_write_4(struct qat_softc *sc, int index, uint32_t value)
1127{
1128
1129	qat_etr_write_4(sc, ARB_OFFSET +
1130	    (ARB_REG_SIZE * index), value);
1131}
1132
1133static inline void
1134qat_arb_wrk_2_ser_map_write_4(struct qat_softc *sc, int index, uint32_t value)
1135{
1136
1137	qat_etr_write_4(sc, ARB_OFFSET + ARB_WRK_2_SER_MAP_OFFSET +
1138	    (ARB_REG_SIZE * index), value);
1139}
1140
1141void *		qat_alloc_mem(size_t);
1142void		qat_free_mem(void *);
1143void		qat_free_dmamem(struct qat_softc *, struct qat_dmamem *);
1144int		qat_alloc_dmamem(struct qat_softc *, struct qat_dmamem *,
1145		    bus_size_t, bus_size_t);
1146
1147int		qat_etr_setup_ring(struct qat_softc *, int, uint32_t, uint32_t,
1148		    uint32_t, qat_cb_t, void *, const char *,
1149		    struct qat_ring **);
1150int		qat_etr_put_msg(struct qat_softc *, struct qat_ring *,
1151		    uint32_t *);
1152
1153void		qat_memcpy_htobe64(void *, const void *, size_t);
1154void		qat_memcpy_htobe32(void *, const void *, size_t);
1155void		qat_memcpy_htobe(void *, const void *, size_t, uint32_t);
1156void		qat_crypto_hmac_precompute(struct qat_crypto_desc *,
1157		    struct cryptoini *cria, struct qat_sym_hash_def const *,
1158		    uint8_t *, uint8_t *);
1159uint16_t	qat_crypto_load_cipher_cryptoini(
1160		    struct qat_crypto_desc *, struct cryptoini *);
1161uint16_t	qat_crypto_load_auth_cryptoini(
1162		    struct qat_crypto_desc *, struct cryptoini *,
1163		    struct qat_sym_hash_def const **);
1164
1165#endif
1166