1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31#pragma once
32
33/*
34 * Keccak SHAKE128 (if supported by the device?) uses a 1344 bit block.
35 * SHA3-224 is the next largest block size, at 1152 bits.  However, crypto(4)
36 * doesn't support any SHA3 hash, so SHA2 is the constraint:
37 */
38#define CCP_HASH_MAX_BLOCK_SIZE	(SHA2_512_BLOCK_LEN)
39
40#define CCP_AES_MAX_KEY_LEN	(AES_XTS_MAX_KEY)
41#define CCP_MAX_CRYPTO_IV_LEN	32	/* GCM IV + GHASH context */
42
43#define MAX_HW_QUEUES		5
44#define MAX_LSB_REGIONS		8
45
46#ifndef __must_check
47#define __must_check __attribute__((__warn_unused_result__))
48#endif
49
50/*
51 * Internal data structures.
52 */
53enum sha_version {
54	SHA1,
55#if 0
56	SHA2_224,
57#endif
58	SHA2_256, SHA2_384, SHA2_512
59};
60
61struct ccp_session_hmac {
62	struct auth_hash *auth_hash;
63	int hash_len;
64	unsigned int partial_digest_len;
65	unsigned int auth_mode;
66	unsigned int mk_size;
67	char ipad[CCP_HASH_MAX_BLOCK_SIZE];
68	char opad[CCP_HASH_MAX_BLOCK_SIZE];
69};
70
71struct ccp_session_gmac {
72	int hash_len;
73	char final_block[GMAC_BLOCK_LEN];
74};
75
76struct ccp_session_blkcipher {
77	unsigned cipher_mode;
78	unsigned cipher_type;
79	unsigned key_len;
80	unsigned iv_len;
81	char enckey[CCP_AES_MAX_KEY_LEN];
82	char iv[CCP_MAX_CRYPTO_IV_LEN];
83};
84
85struct ccp_session {
86	bool active : 1;
87	bool cipher_first : 1;
88	int pending;
89	enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode;
90	unsigned queue;
91	union {
92		struct ccp_session_hmac hmac;
93		struct ccp_session_gmac gmac;
94	};
95	struct ccp_session_blkcipher blkcipher;
96};
97
98struct ccp_softc;
99struct ccp_queue {
100	struct mtx		cq_lock;
101	unsigned		cq_qindex;
102	struct ccp_softc	*cq_softc;
103
104	/* Host memory and tracking structures for descriptor ring. */
105	bus_dma_tag_t		ring_desc_tag;
106	bus_dmamap_t		ring_desc_map;
107	struct ccp_desc		*desc_ring;
108	bus_addr_t		desc_ring_bus_addr;
109	/* Callbacks and arguments ring; indices correspond to above ring. */
110	struct ccp_completion_ctx *completions_ring;
111
112	uint32_t		qcontrol;	/* Cached register value */
113	unsigned		lsb_mask;	/* LSBs available to queue */
114	int			private_lsb;	/* Reserved LSB #, or -1 */
115
116	unsigned		cq_head;
117	unsigned		cq_tail;
118	unsigned		cq_acq_tail;
119
120	bool			cq_waiting;	/* Thread waiting for space */
121
122	struct sglist		*cq_sg_crp;
123	struct sglist		*cq_sg_ulptx;
124	struct sglist		*cq_sg_dst;
125};
126
127struct ccp_completion_ctx {
128	void (*callback_fn)(struct ccp_queue *qp, struct ccp_session *s,
129	    void *arg, int error);
130	void *callback_arg;
131	struct ccp_session *session;
132};
133
134struct ccp_softc {
135	device_t dev;
136	int32_t cid;
137	struct mtx lock;
138	bool detaching;
139
140	unsigned ring_size_order;
141
142	/*
143	 * Each command queue is either public or private.  "Private"
144	 * (PSP-only) by default.  PSP grants access to some queues to host via
145	 * QMR (Queue Mask Register).  Set bits are host accessible.
146	 */
147	uint8_t valid_queues;
148
149	uint8_t hw_version;
150	uint8_t num_queues;
151	uint16_t hw_features;
152	uint16_t num_lsb_entries;
153
154	/* Primary BAR (RID 2) used for register access */
155	bus_space_tag_t pci_bus_tag;
156	bus_space_handle_t pci_bus_handle;
157	int pci_resource_id;
158	struct resource *pci_resource;
159
160	/* Secondary BAR (RID 5) apparently used for MSI-X */
161	int pci_resource_id_msix;
162	struct resource *pci_resource_msix;
163
164	/* Interrupt resources */
165	void *intr_tag[2];
166	struct resource *intr_res[2];
167	unsigned intr_count;
168
169	struct ccp_queue queues[MAX_HW_QUEUES];
170};
171
172/* Internal globals */
173SYSCTL_DECL(_hw_ccp);
174MALLOC_DECLARE(M_CCP);
175extern bool g_debug_print;
176extern struct ccp_softc *g_ccp_softc;
177
178/*
179 * Debug macros.
180 */
181#define DPRINTF(dev, ...)	do {				\
182	if (!g_debug_print)					\
183		break;						\
184	if ((dev) != NULL)					\
185		device_printf((dev), "XXX " __VA_ARGS__);	\
186	else							\
187		printf("ccpXXX: " __VA_ARGS__);			\
188} while (0)
189
190#if 0
191#define INSECURE_DEBUG(dev, ...)	do {			\
192	if (!g_debug_print)					\
193		break;						\
194	if ((dev) != NULL)					\
195		device_printf((dev), "XXX " __VA_ARGS__);	\
196	else							\
197		printf("ccpXXX: " __VA_ARGS__);			\
198} while (0)
199#else
200#define INSECURE_DEBUG(dev, ...)
201#endif
202
203/*
204 * Internal hardware manipulation routines.
205 */
206int ccp_hw_attach(device_t dev);
207void ccp_hw_detach(device_t dev);
208
209void ccp_queue_write_tail(struct ccp_queue *qp);
210
211#ifdef DDB
212void db_ccp_show_hw(struct ccp_softc *sc);
213void db_ccp_show_queue_hw(struct ccp_queue *qp);
214#endif
215
216/*
217 * Internal hardware crypt-op submission routines.
218 */
219int ccp_authenc(struct ccp_queue *sc, struct ccp_session *s,
220    struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde)
221    __must_check;
222int ccp_blkcipher(struct ccp_queue *sc, struct ccp_session *s,
223    struct cryptop *crp) __must_check;
224int ccp_gcm(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp,
225    struct cryptodesc *crda, struct cryptodesc *crde) __must_check;
226int ccp_hmac(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp)
227    __must_check;
228
229/*
230 * Internal hardware TRNG read routine.
231 */
232u_int random_ccp_read(void *v, u_int c);
233
234/* XXX */
235int ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags)
236    __must_check;
237void ccp_queue_abort(struct ccp_queue *qp);
238void ccp_queue_release(struct ccp_queue *qp);
239
240/*
241 * Internal inline routines.
242 */
243static inline unsigned
244ccp_queue_get_active(struct ccp_queue *qp)
245{
246	struct ccp_softc *sc;
247
248	sc = qp->cq_softc;
249	return ((qp->cq_tail - qp->cq_head) & ((1 << sc->ring_size_order) - 1));
250}
251
252static inline unsigned
253ccp_queue_get_ring_space(struct ccp_queue *qp)
254{
255	struct ccp_softc *sc;
256
257	sc = qp->cq_softc;
258	return ((1 << sc->ring_size_order) - ccp_queue_get_active(qp) - 1);
259}
260