1/* SPDX-License-Identifier: BSD-3-Clause */
2/* Copyright(c) 2007-2022 Intel Corporation */
3/* System headers */
4#include <sys/param.h>
5#include <sys/systm.h>
6#include <sys/bus.h>
7#include <sys/kernel.h>
8#include <sys/mbuf.h>
9#include <sys/mutex.h>
10#include <machine/bus.h>
11
12/* Cryptodev headers */
13#include <opencrypto/cryptodev.h>
14#include <opencrypto/xform.h>
15
16/* QAT specific headers */
17#include "qat_ocf_mem_pool.h"
18#include "qat_ocf_utils.h"
19#include "cpa.h"
20
21/* Private functions */
22static void
23qat_ocf_alloc_single_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
24{
25	struct qat_ocf_dma_mem *dma_mem;
26
27	if (error != 0)
28		return;
29
30	dma_mem = arg;
31	dma_mem->dma_seg = segs[0];
32}
33
34static int
35qat_ocf_populate_buf_list_cb(struct qat_ocf_buffer_list *buffers,
36			     bus_dma_segment_t *segs,
37			     int niseg,
38			     int skip_seg,
39			     int skip_bytes)
40{
41	CpaPhysFlatBuffer *flatBuffer;
42	bus_addr_t segment_addr;
43	bus_size_t segment_len;
44	int iseg, oseg;
45
46	for (iseg = 0, oseg = skip_seg;
47	     iseg < niseg && oseg < QAT_OCF_MAX_FLATS;
48	     iseg++) {
49		segment_addr = segs[iseg].ds_addr;
50		segment_len = segs[iseg].ds_len;
51
52		if (skip_bytes > 0) {
53			if (skip_bytes < segment_len) {
54				segment_addr += skip_bytes;
55				segment_len -= skip_bytes;
56				skip_bytes = 0;
57			} else {
58				skip_bytes -= segment_len;
59				continue;
60			}
61		}
62		flatBuffer = &buffers->flatBuffers[oseg++];
63		flatBuffer->dataLenInBytes = (Cpa32U)segment_len;
64		flatBuffer->bufferPhysAddr = (CpaPhysicalAddr)segment_addr;
65	};
66	buffers->numBuffers = oseg;
67
68	return iseg < niseg ? E2BIG : 0;
69}
70
71void
72qat_ocf_crypto_load_aadbuf_cb(void *_arg,
73			      bus_dma_segment_t *segs,
74			      int nseg,
75			      int error)
76{
77	struct qat_ocf_load_cb_arg *arg;
78	struct qat_ocf_cookie *qat_cookie;
79
80	arg = _arg;
81	if (error != 0) {
82		arg->error = error;
83		return;
84	}
85
86	qat_cookie = arg->qat_cookie;
87	arg->error = qat_ocf_populate_buf_list_cb(
88	    &qat_cookie->src_buffers, segs, nseg, 0, 0);
89}
90
91void
92qat_ocf_crypto_load_buf_cb(void *_arg,
93			   bus_dma_segment_t *segs,
94			   int nseg,
95			   int error)
96{
97	struct qat_ocf_cookie *qat_cookie;
98	struct qat_ocf_load_cb_arg *arg;
99	int start_segment = 0, skip_bytes = 0;
100
101	arg = _arg;
102	if (error != 0) {
103		arg->error = error;
104		return;
105	}
106
107	qat_cookie = arg->qat_cookie;
108
109	skip_bytes = 0;
110	start_segment = qat_cookie->src_buffers.numBuffers;
111
112	arg->error = qat_ocf_populate_buf_list_cb(
113	    &qat_cookie->src_buffers, segs, nseg, start_segment, skip_bytes);
114}
115
116void
117qat_ocf_crypto_load_obuf_cb(void *_arg,
118			    bus_dma_segment_t *segs,
119			    int nseg,
120			    int error)
121{
122	struct qat_ocf_load_cb_arg *arg;
123	struct cryptop *crp;
124	struct qat_ocf_cookie *qat_cookie;
125	const struct crypto_session_params *csp;
126	int osegs = 0, to_copy = 0;
127
128	arg = _arg;
129	if (error != 0) {
130		arg->error = error;
131		return;
132	}
133
134	crp = arg->crp_op;
135	qat_cookie = arg->qat_cookie;
136	csp = crypto_get_params(crp->crp_session);
137
138	/*
139	 * The payload must start at the same offset in the output SG list as in
140	 * the input SG list.  Copy over SG entries from the input corresponding
141	 * to the AAD buffer.
142	 */
143	if (crp->crp_aad_length == 0 ||
144	    (CPA_TRUE == is_sep_aad_supported(csp) && crp->crp_aad)) {
145		arg->error =
146		    qat_ocf_populate_buf_list_cb(&qat_cookie->dst_buffers,
147						 segs,
148						 nseg,
149						 0,
150						 crp->crp_payload_output_start);
151		return;
152	}
153
154	/* Copy AAD from source SGL to keep payload in the same position in
155	 * destination buffers */
156	if (NULL == crp->crp_aad)
157		to_copy = crp->crp_payload_start - crp->crp_aad_start;
158	else
159		to_copy = crp->crp_aad_length;
160
161	for (; osegs < qat_cookie->src_buffers.numBuffers; osegs++) {
162		CpaPhysFlatBuffer *src_flat;
163		CpaPhysFlatBuffer *dst_flat;
164		int data_len;
165
166		if (to_copy <= 0)
167			break;
168
169		src_flat = &qat_cookie->src_buffers.flatBuffers[osegs];
170		dst_flat = &qat_cookie->dst_buffers.flatBuffers[osegs];
171
172		dst_flat->bufferPhysAddr = src_flat->bufferPhysAddr;
173		data_len = imin(src_flat->dataLenInBytes, to_copy);
174		dst_flat->dataLenInBytes = data_len;
175		to_copy -= data_len;
176	}
177
178	arg->error =
179	    qat_ocf_populate_buf_list_cb(&qat_cookie->dst_buffers,
180					 segs,
181					 nseg,
182					 osegs,
183					 crp->crp_payload_output_start);
184}
185
186static int
187qat_ocf_alloc_dma_mem(device_t dev,
188		      struct qat_ocf_dma_mem *dma_mem,
189		      int nseg,
190		      bus_size_t size,
191		      bus_size_t alignment)
192{
193	int error;
194
195	error = bus_dma_tag_create(bus_get_dma_tag(dev),
196				   alignment,
197				   0,		      /* alignment, boundary */
198				   BUS_SPACE_MAXADDR, /* lowaddr */
199				   BUS_SPACE_MAXADDR, /* highaddr */
200				   NULL,
201				   NULL,	     /* filter, filterarg */
202				   size,	     /* maxsize */
203				   nseg,	     /* nsegments */
204				   size,	     /* maxsegsize */
205				   BUS_DMA_COHERENT, /* flags */
206				   NULL,
207				   NULL, /* lockfunc, lockarg */
208				   &dma_mem->dma_tag);
209	if (error != 0) {
210		device_printf(dev,
211			      "couldn't create DMA tag, error = %d\n",
212			      error);
213		return error;
214	}
215
216	error =
217	    bus_dmamem_alloc(dma_mem->dma_tag,
218			     &dma_mem->dma_vaddr,
219			     BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
220			     &dma_mem->dma_map);
221	if (error != 0) {
222		device_printf(dev,
223			      "couldn't allocate dmamem, error = %d\n",
224			      error);
225		goto fail_0;
226	}
227
228	error = bus_dmamap_load(dma_mem->dma_tag,
229				dma_mem->dma_map,
230				dma_mem->dma_vaddr,
231				size,
232				qat_ocf_alloc_single_cb,
233				dma_mem,
234				BUS_DMA_NOWAIT);
235	if (error) {
236		device_printf(dev,
237			      "couldn't load dmamem map, error = %d\n",
238			      error);
239		goto fail_1;
240	}
241
242	return 0;
243fail_1:
244	bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map);
245fail_0:
246	bus_dma_tag_destroy(dma_mem->dma_tag);
247
248	return error;
249}
250
251static void
252qat_ocf_free_dma_mem(struct qat_ocf_dma_mem *qdm)
253{
254	if (qdm->dma_tag != NULL && qdm->dma_vaddr != NULL) {
255		bus_dmamap_unload(qdm->dma_tag, qdm->dma_map);
256		bus_dmamem_free(qdm->dma_tag, qdm->dma_vaddr, qdm->dma_map);
257		bus_dma_tag_destroy(qdm->dma_tag);
258		explicit_bzero(qdm, sizeof(*qdm));
259	}
260}
261
262static int
263qat_ocf_dma_tag_and_map(device_t dev,
264			struct qat_ocf_dma_mem *dma_mem,
265			bus_size_t size,
266			bus_size_t segs)
267{
268	int error;
269
270	error = bus_dma_tag_create(bus_get_dma_tag(dev),
271				   1,
272				   0,		      /* alignment, boundary */
273				   BUS_SPACE_MAXADDR, /* lowaddr */
274				   BUS_SPACE_MAXADDR, /* highaddr */
275				   NULL,
276				   NULL,	     /* filter, filterarg */
277				   size,	     /* maxsize */
278				   segs,	     /* nsegments */
279				   size,	     /* maxsegsize */
280				   BUS_DMA_COHERENT, /* flags */
281				   NULL,
282				   NULL, /* lockfunc, lockarg */
283				   &dma_mem->dma_tag);
284	if (error != 0)
285		return error;
286
287	error = bus_dmamap_create(dma_mem->dma_tag,
288				  BUS_DMA_COHERENT,
289				  &dma_mem->dma_map);
290	if (error != 0)
291		return error;
292
293	return 0;
294}
295
296static void
297qat_ocf_clear_cookie(struct qat_ocf_cookie *qat_cookie)
298{
299	qat_cookie->src_buffers.numBuffers = 0;
300	qat_cookie->dst_buffers.numBuffers = 0;
301	qat_cookie->is_sep_aad_used = CPA_FALSE;
302	explicit_bzero(qat_cookie->qat_ocf_iv_buf,
303		       sizeof(qat_cookie->qat_ocf_iv_buf));
304	explicit_bzero(qat_cookie->qat_ocf_digest,
305		       sizeof(qat_cookie->qat_ocf_digest));
306	explicit_bzero(qat_cookie->qat_ocf_gcm_aad,
307		       sizeof(qat_cookie->qat_ocf_gcm_aad));
308	qat_cookie->crp_op = NULL;
309}
310
311/* Public functions */
312CpaStatus
313qat_ocf_cookie_dma_pre_sync(struct cryptop *crp, CpaCySymDpOpData *pOpData)
314{
315	struct qat_ocf_cookie *qat_cookie;
316
317	if (NULL == pOpData->pCallbackTag)
318		return CPA_STATUS_FAIL;
319
320	qat_cookie = (struct qat_ocf_cookie *)pOpData->pCallbackTag;
321
322	if (CPA_TRUE == qat_cookie->is_sep_aad_used) {
323		bus_dmamap_sync(qat_cookie->gcm_aad_dma_mem.dma_tag,
324				qat_cookie->gcm_aad_dma_mem.dma_map,
325				BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
326	}
327
328	bus_dmamap_sync(qat_cookie->src_dma_mem.dma_tag,
329			qat_cookie->src_dma_mem.dma_map,
330			BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
331	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
332		bus_dmamap_sync(qat_cookie->dst_dma_mem.dma_tag,
333				qat_cookie->dst_dma_mem.dma_map,
334				BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
335	}
336	bus_dmamap_sync(qat_cookie->dma_tag,
337			qat_cookie->dma_map,
338			BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
339
340	return CPA_STATUS_SUCCESS;
341}
342
343CpaStatus
344qat_ocf_cookie_dma_post_sync(struct cryptop *crp, CpaCySymDpOpData *pOpData)
345{
346	struct qat_ocf_cookie *qat_cookie;
347
348	if (NULL == pOpData->pCallbackTag)
349		return CPA_STATUS_FAIL;
350
351	qat_cookie = (struct qat_ocf_cookie *)pOpData->pCallbackTag;
352
353	bus_dmamap_sync(qat_cookie->src_dma_mem.dma_tag,
354			qat_cookie->src_dma_mem.dma_map,
355			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
356
357	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
358		bus_dmamap_sync(qat_cookie->dst_dma_mem.dma_tag,
359				qat_cookie->dst_dma_mem.dma_map,
360				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
361	}
362	bus_dmamap_sync(qat_cookie->dma_tag,
363			qat_cookie->dma_map,
364			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
365
366	if (qat_cookie->is_sep_aad_used)
367		bus_dmamap_sync(qat_cookie->gcm_aad_dma_mem.dma_tag,
368				qat_cookie->gcm_aad_dma_mem.dma_map,
369				BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
370
371	return CPA_STATUS_SUCCESS;
372}
373
374CpaStatus
375qat_ocf_cookie_dma_unload(struct cryptop *crp, CpaCySymDpOpData *pOpData)
376{
377	struct qat_ocf_cookie *qat_cookie;
378
379	qat_cookie = pOpData->pCallbackTag;
380
381	if (NULL == qat_cookie)
382		return CPA_STATUS_FAIL;
383
384	bus_dmamap_unload(qat_cookie->src_dma_mem.dma_tag,
385			  qat_cookie->src_dma_mem.dma_map);
386	if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
387		bus_dmamap_unload(qat_cookie->dst_dma_mem.dma_tag,
388				  qat_cookie->dst_dma_mem.dma_map);
389	if (qat_cookie->is_sep_aad_used)
390		bus_dmamap_unload(qat_cookie->gcm_aad_dma_mem.dma_tag,
391				  qat_cookie->gcm_aad_dma_mem.dma_map);
392
393	return CPA_STATUS_SUCCESS;
394}
395
396CpaStatus
397qat_ocf_cookie_pool_init(struct qat_ocf_instance *instance, device_t dev)
398{
399	int i, error = 0;
400
401	mtx_init(&instance->cookie_pool_mtx,
402		 "QAT cookie pool MTX",
403		 NULL,
404		 MTX_DEF);
405	instance->free_cookie_ptr = 0;
406	for (i = 0; i < QAT_OCF_MEM_POOL_SIZE; i++) {
407		struct qat_ocf_cookie *qat_cookie;
408		struct qat_ocf_dma_mem *entry_dma_mem;
409
410		entry_dma_mem = &instance->cookie_dmamem[i];
411
412		/* Allocate DMA segment for cache entry.
413		 * Cache has to be stored in DMAable mem due to
414		 * it contains i.a src and dst flat buffer
415		 * lists.
416		 */
417		error = qat_ocf_alloc_dma_mem(dev,
418					      entry_dma_mem,
419					      1,
420					      sizeof(struct qat_ocf_cookie),
421					      (1 << 6));
422		if (error)
423			break;
424
425		qat_cookie = entry_dma_mem->dma_vaddr;
426		instance->cookie_pool[i] = qat_cookie;
427
428		qat_cookie->dma_map = entry_dma_mem->dma_map;
429		qat_cookie->dma_tag = entry_dma_mem->dma_tag;
430
431		qat_ocf_clear_cookie(qat_cookie);
432
433		/* Physical address of IV buffer */
434		qat_cookie->qat_ocf_iv_buf_paddr =
435		    entry_dma_mem->dma_seg.ds_addr +
436		    offsetof(struct qat_ocf_cookie, qat_ocf_iv_buf);
437
438		/* Physical address of digest buffer */
439		qat_cookie->qat_ocf_digest_paddr =
440		    entry_dma_mem->dma_seg.ds_addr +
441		    offsetof(struct qat_ocf_cookie, qat_ocf_digest);
442
443		/* Physical address of AAD buffer */
444		qat_cookie->qat_ocf_gcm_aad_paddr =
445		    entry_dma_mem->dma_seg.ds_addr +
446		    offsetof(struct qat_ocf_cookie, qat_ocf_gcm_aad);
447
448		/* We already got physical address of src and dest SGL header */
449		qat_cookie->src_buffer_list_paddr =
450		    entry_dma_mem->dma_seg.ds_addr +
451		    offsetof(struct qat_ocf_cookie, src_buffers);
452
453		qat_cookie->dst_buffer_list_paddr =
454		    entry_dma_mem->dma_seg.ds_addr +
455		    offsetof(struct qat_ocf_cookie, dst_buffers);
456
457		/* We already have physical address of pOpdata */
458		qat_cookie->pOpData_paddr = entry_dma_mem->dma_seg.ds_addr +
459		    offsetof(struct qat_ocf_cookie, pOpdata);
460		/* Init QAT DP API OP data with const values */
461		qat_cookie->pOpdata.pCallbackTag = (void *)qat_cookie;
462		qat_cookie->pOpdata.thisPhys =
463		    (CpaPhysicalAddr)qat_cookie->pOpData_paddr;
464
465		error = qat_ocf_dma_tag_and_map(dev,
466						&qat_cookie->src_dma_mem,
467						QAT_OCF_MAXLEN,
468						QAT_OCF_MAX_FLATS);
469		if (error)
470			break;
471
472		error = qat_ocf_dma_tag_and_map(dev,
473						&qat_cookie->dst_dma_mem,
474						QAT_OCF_MAXLEN,
475						QAT_OCF_MAX_FLATS);
476		if (error)
477			break;
478
479		/* Max one flat buffer for embedded AAD if provided as separated
480		 * by OCF and it's not supported by QAT */
481		error = qat_ocf_dma_tag_and_map(dev,
482						&qat_cookie->gcm_aad_dma_mem,
483						QAT_OCF_MAXLEN,
484						1);
485		if (error)
486			break;
487
488		instance->free_cookie[i] = qat_cookie;
489		instance->free_cookie_ptr++;
490	}
491
492	return error;
493}
494
495CpaStatus
496qat_ocf_cookie_alloc(struct qat_ocf_instance *qat_instance,
497		     struct qat_ocf_cookie **cookie_out)
498{
499	mtx_lock(&qat_instance->cookie_pool_mtx);
500	if (qat_instance->free_cookie_ptr == 0) {
501		mtx_unlock(&qat_instance->cookie_pool_mtx);
502		return CPA_STATUS_FAIL;
503	}
504	*cookie_out =
505	    qat_instance->free_cookie[--qat_instance->free_cookie_ptr];
506	mtx_unlock(&qat_instance->cookie_pool_mtx);
507
508	return CPA_STATUS_SUCCESS;
509}
510
511void
512qat_ocf_cookie_free(struct qat_ocf_instance *qat_instance,
513		    struct qat_ocf_cookie *cookie)
514{
515	qat_ocf_clear_cookie(cookie);
516	mtx_lock(&qat_instance->cookie_pool_mtx);
517	qat_instance->free_cookie[qat_instance->free_cookie_ptr++] = cookie;
518	mtx_unlock(&qat_instance->cookie_pool_mtx);
519}
520
521void
522qat_ocf_cookie_pool_deinit(struct qat_ocf_instance *qat_instance)
523{
524	int i;
525
526	for (i = 0; i < QAT_OCF_MEM_POOL_SIZE; i++) {
527		struct qat_ocf_cookie *cookie;
528		struct qat_ocf_dma_mem *cookie_dma;
529
530		cookie = qat_instance->cookie_pool[i];
531		if (NULL == cookie)
532			continue;
533
534		/* Destroy tag and map for source SGL */
535		if (cookie->src_dma_mem.dma_tag) {
536			bus_dmamap_destroy(cookie->src_dma_mem.dma_tag,
537					   cookie->src_dma_mem.dma_map);
538			bus_dma_tag_destroy(cookie->src_dma_mem.dma_tag);
539		}
540
541		/* Destroy tag and map for dest SGL */
542		if (cookie->dst_dma_mem.dma_tag) {
543			bus_dmamap_destroy(cookie->dst_dma_mem.dma_tag,
544					   cookie->dst_dma_mem.dma_map);
545			bus_dma_tag_destroy(cookie->dst_dma_mem.dma_tag);
546		}
547
548		/* Destroy tag and map for separated AAD */
549		if (cookie->gcm_aad_dma_mem.dma_tag) {
550			bus_dmamap_destroy(cookie->gcm_aad_dma_mem.dma_tag,
551					   cookie->gcm_aad_dma_mem.dma_map);
552			bus_dma_tag_destroy(cookie->gcm_aad_dma_mem.dma_tag);
553		}
554
555		/* Free DMA memory */
556		cookie_dma = &qat_instance->cookie_dmamem[i];
557		qat_ocf_free_dma_mem(cookie_dma);
558		qat_instance->cookie_pool[i] = NULL;
559	}
560	mtx_destroy(&qat_instance->cookie_pool_mtx);
561
562	return;
563}
564