1/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ 2/* $NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $ */ 3 4/* 5 * Copyright (c) 2019 Internet Initiative Japan, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30/* 31 * Copyright(c) 2007-2019 Intel Corporation. All rights reserved. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions 35 * are met: 36 * 37 * * Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * * Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in 41 * the documentation and/or other materials provided with the 42 * distribution. 43 * * Neither the name of Intel Corporation nor the names of its 44 * contributors may be used to endorse or promote products derived 45 * from this software without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 48 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 49 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 50 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 51 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 54 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 55 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 56 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 57 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 58 */ 59 60#include <sys/cdefs.h> 61__FBSDID("$FreeBSD$"); 62#if 0 63__KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $"); 64#endif 65 66#include <sys/param.h> 67#include <sys/systm.h> 68#include <sys/bus.h> 69#include <sys/cpu.h> 70#include <sys/firmware.h> 71#include <sys/kernel.h> 72#include <sys/mbuf.h> 73#include <sys/md5.h> 74#include <sys/module.h> 75#include <sys/mutex.h> 76#include <sys/smp.h> 77#include <sys/sysctl.h> 78#include <sys/rman.h> 79 80#include <machine/bus.h> 81 82#include <opencrypto/cryptodev.h> 83#include <opencrypto/xform.h> 84 85#include "cryptodev_if.h" 86 87#include <dev/pci/pcireg.h> 88#include <dev/pci/pcivar.h> 89 90#include "qatreg.h" 91#include "qatvar.h" 92#include "qat_aevar.h" 93 94extern struct qat_hw qat_hw_c2xxx; 95extern struct qat_hw qat_hw_c3xxx; 96extern struct qat_hw qat_hw_c62x; 97extern struct qat_hw qat_hw_d15xx; 98extern struct qat_hw qat_hw_dh895xcc; 99 100#define PCI_VENDOR_INTEL 0x8086 101#define PCI_PRODUCT_INTEL_C2000_IQIA_PHYS 0x1f18 102#define PCI_PRODUCT_INTEL_C3K_QAT 0x19e2 103#define PCI_PRODUCT_INTEL_C3K_QAT_VF 0x19e3 104#define PCI_PRODUCT_INTEL_C620_QAT 0x37c8 105#define PCI_PRODUCT_INTEL_C620_QAT_VF 0x37c9 106#define PCI_PRODUCT_INTEL_XEOND_QAT 0x6f54 107#define PCI_PRODUCT_INTEL_XEOND_QAT_VF 0x6f55 108#define PCI_PRODUCT_INTEL_DH895XCC_QAT 0x0435 109#define PCI_PRODUCT_INTEL_DH895XCC_QAT_VF 0x0443 110 111static const struct qat_product { 112 uint16_t qatp_vendor; 113 uint16_t qatp_product; 114 const char *qatp_name; 115 enum qat_chip_type qatp_chip; 116 const struct qat_hw *qatp_hw; 117} qat_products[] = { 118 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_IQIA_PHYS, 119 "Intel C2000 QuickAssist PF", 120 QAT_CHIP_C2XXX, &qat_hw_c2xxx }, 121 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT, 122 "Intel C3000 QuickAssist PF", 123 QAT_CHIP_C3XXX, &qat_hw_c3xxx }, 124 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT, 125 "Intel C620/Xeon D-2100 QuickAssist PF", 126 QAT_CHIP_C62X, &qat_hw_c62x }, 127 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT, 128 "Intel Xeon D-1500 QuickAssist PF", 129 QAT_CHIP_D15XX, &qat_hw_d15xx }, 130 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH895XCC_QAT, 131 "Intel 8950 QuickAssist PCIe Adapter PF", 132 QAT_CHIP_DH895XCC, &qat_hw_dh895xcc }, 133 { 0, 0, NULL, 0, NULL }, 134}; 135 136/* Hash Algorithm specific structure */ 137 138/* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */ 139static const uint8_t sha1_initial_state[QAT_HASH_SHA1_STATE_SIZE] = { 140 0x67, 0x45, 0x23, 0x01, 141 0xef, 0xcd, 0xab, 0x89, 142 0x98, 0xba, 0xdc, 0xfe, 143 0x10, 0x32, 0x54, 0x76, 144 0xc3, 0xd2, 0xe1, 0xf0 145}; 146 147/* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */ 148static const uint8_t sha256_initial_state[QAT_HASH_SHA256_STATE_SIZE] = { 149 0x6a, 0x09, 0xe6, 0x67, 150 0xbb, 0x67, 0xae, 0x85, 151 0x3c, 0x6e, 0xf3, 0x72, 152 0xa5, 0x4f, 0xf5, 0x3a, 153 0x51, 0x0e, 0x52, 0x7f, 154 0x9b, 0x05, 0x68, 0x8c, 155 0x1f, 0x83, 0xd9, 0xab, 156 0x5b, 0xe0, 0xcd, 0x19 157}; 158 159/* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ 160static const uint8_t sha384_initial_state[QAT_HASH_SHA384_STATE_SIZE] = { 161 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 162 0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07, 163 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17, 164 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 165 0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 166 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 167 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 168 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4 169}; 170 171/* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ 172static const uint8_t sha512_initial_state[QAT_HASH_SHA512_STATE_SIZE] = { 173 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 174 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 175 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b, 176 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 177 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 178 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f, 179 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 180 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79 181}; 182 183static const struct qat_sym_hash_alg_info sha1_info = { 184 .qshai_digest_len = QAT_HASH_SHA1_DIGEST_SIZE, 185 .qshai_block_len = QAT_HASH_SHA1_BLOCK_SIZE, 186 .qshai_state_size = QAT_HASH_SHA1_STATE_SIZE, 187 .qshai_init_state = sha1_initial_state, 188 .qshai_sah = &auth_hash_hmac_sha1, 189 .qshai_state_offset = 0, 190 .qshai_state_word = 4, 191}; 192 193static const struct qat_sym_hash_alg_info sha256_info = { 194 .qshai_digest_len = QAT_HASH_SHA256_DIGEST_SIZE, 195 .qshai_block_len = QAT_HASH_SHA256_BLOCK_SIZE, 196 .qshai_state_size = QAT_HASH_SHA256_STATE_SIZE, 197 .qshai_init_state = sha256_initial_state, 198 .qshai_sah = &auth_hash_hmac_sha2_256, 199 .qshai_state_offset = offsetof(SHA256_CTX, state), 200 .qshai_state_word = 4, 201}; 202 203static const struct qat_sym_hash_alg_info sha384_info = { 204 .qshai_digest_len = QAT_HASH_SHA384_DIGEST_SIZE, 205 .qshai_block_len = QAT_HASH_SHA384_BLOCK_SIZE, 206 .qshai_state_size = QAT_HASH_SHA384_STATE_SIZE, 207 .qshai_init_state = sha384_initial_state, 208 .qshai_sah = &auth_hash_hmac_sha2_384, 209 .qshai_state_offset = offsetof(SHA384_CTX, state), 210 .qshai_state_word = 8, 211}; 212 213static const struct qat_sym_hash_alg_info sha512_info = { 214 .qshai_digest_len = QAT_HASH_SHA512_DIGEST_SIZE, 215 .qshai_block_len = QAT_HASH_SHA512_BLOCK_SIZE, 216 .qshai_state_size = QAT_HASH_SHA512_STATE_SIZE, 217 .qshai_init_state = sha512_initial_state, 218 .qshai_sah = &auth_hash_hmac_sha2_512, 219 .qshai_state_offset = offsetof(SHA512_CTX, state), 220 .qshai_state_word = 8, 221}; 222 223static const struct qat_sym_hash_alg_info aes_gcm_info = { 224 .qshai_digest_len = QAT_HASH_AES_GCM_DIGEST_SIZE, 225 .qshai_block_len = QAT_HASH_AES_GCM_BLOCK_SIZE, 226 .qshai_state_size = QAT_HASH_AES_GCM_STATE_SIZE, 227 .qshai_sah = &auth_hash_nist_gmac_aes_128, 228}; 229 230/* Hash QAT specific structures */ 231 232static const struct qat_sym_hash_qat_info sha1_config = { 233 .qshqi_algo_enc = HW_AUTH_ALGO_SHA1, 234 .qshqi_auth_counter = QAT_HASH_SHA1_BLOCK_SIZE, 235 .qshqi_state1_len = HW_SHA1_STATE1_SZ, 236 .qshqi_state2_len = HW_SHA1_STATE2_SZ, 237}; 238 239static const struct qat_sym_hash_qat_info sha256_config = { 240 .qshqi_algo_enc = HW_AUTH_ALGO_SHA256, 241 .qshqi_auth_counter = QAT_HASH_SHA256_BLOCK_SIZE, 242 .qshqi_state1_len = HW_SHA256_STATE1_SZ, 243 .qshqi_state2_len = HW_SHA256_STATE2_SZ 244}; 245 246static const struct qat_sym_hash_qat_info sha384_config = { 247 .qshqi_algo_enc = HW_AUTH_ALGO_SHA384, 248 .qshqi_auth_counter = QAT_HASH_SHA384_BLOCK_SIZE, 249 .qshqi_state1_len = HW_SHA384_STATE1_SZ, 250 .qshqi_state2_len = HW_SHA384_STATE2_SZ 251}; 252 253static const struct qat_sym_hash_qat_info sha512_config = { 254 .qshqi_algo_enc = HW_AUTH_ALGO_SHA512, 255 .qshqi_auth_counter = QAT_HASH_SHA512_BLOCK_SIZE, 256 .qshqi_state1_len = HW_SHA512_STATE1_SZ, 257 .qshqi_state2_len = HW_SHA512_STATE2_SZ 258}; 259 260static const struct qat_sym_hash_qat_info aes_gcm_config = { 261 .qshqi_algo_enc = HW_AUTH_ALGO_GALOIS_128, 262 .qshqi_auth_counter = QAT_HASH_AES_GCM_BLOCK_SIZE, 263 .qshqi_state1_len = HW_GALOIS_128_STATE1_SZ, 264 .qshqi_state2_len = 265 HW_GALOIS_H_SZ + HW_GALOIS_LEN_A_SZ + HW_GALOIS_E_CTR0_SZ, 266}; 267 268static const struct qat_sym_hash_def qat_sym_hash_defs[] = { 269 [QAT_SYM_HASH_SHA1] = { &sha1_info, &sha1_config }, 270 [QAT_SYM_HASH_SHA256] = { &sha256_info, &sha256_config }, 271 [QAT_SYM_HASH_SHA384] = { &sha384_info, &sha384_config }, 272 [QAT_SYM_HASH_SHA512] = { &sha512_info, &sha512_config }, 273 [QAT_SYM_HASH_AES_GCM] = { &aes_gcm_info, &aes_gcm_config }, 274}; 275 276static const struct qat_product *qat_lookup(device_t); 277static int qat_probe(device_t); 278static int qat_attach(device_t); 279static int qat_init(device_t); 280static int qat_start(device_t); 281static int qat_detach(device_t); 282 283static int qat_newsession(device_t dev, crypto_session_t cses, 284 const struct crypto_session_params *csp); 285static void qat_freesession(device_t dev, crypto_session_t cses); 286 287static int qat_setup_msix_intr(struct qat_softc *); 288 289static void qat_etr_init(struct qat_softc *); 290static void qat_etr_deinit(struct qat_softc *); 291static void qat_etr_bank_init(struct qat_softc *, int); 292static void qat_etr_bank_deinit(struct qat_softc *sc, int); 293 294static void qat_etr_ap_bank_init(struct qat_softc *); 295static void qat_etr_ap_bank_set_ring_mask(uint32_t *, uint32_t, int); 296static void qat_etr_ap_bank_set_ring_dest(struct qat_softc *, uint32_t *, 297 uint32_t, int); 298static void qat_etr_ap_bank_setup_ring(struct qat_softc *, 299 struct qat_ring *); 300static int qat_etr_verify_ring_size(uint32_t, uint32_t); 301 302static int qat_etr_ring_intr(struct qat_softc *, struct qat_bank *, 303 struct qat_ring *); 304static void qat_etr_bank_intr(void *); 305 306static void qat_arb_update(struct qat_softc *, struct qat_bank *); 307 308static struct qat_sym_cookie *qat_crypto_alloc_sym_cookie( 309 struct qat_crypto_bank *); 310static void qat_crypto_free_sym_cookie(struct qat_crypto_bank *, 311 struct qat_sym_cookie *); 312static int qat_crypto_setup_ring(struct qat_softc *, 313 struct qat_crypto_bank *); 314static int qat_crypto_bank_init(struct qat_softc *, 315 struct qat_crypto_bank *); 316static int qat_crypto_init(struct qat_softc *); 317static void qat_crypto_deinit(struct qat_softc *); 318static int qat_crypto_start(struct qat_softc *); 319static void qat_crypto_stop(struct qat_softc *); 320static int qat_crypto_sym_rxintr(struct qat_softc *, void *, void *); 321 322static MALLOC_DEFINE(M_QAT, "qat", "Intel QAT driver"); 323 324static const struct qat_product * 325qat_lookup(device_t dev) 326{ 327 const struct qat_product *qatp; 328 329 for (qatp = qat_products; qatp->qatp_name != NULL; qatp++) { 330 if (pci_get_vendor(dev) == qatp->qatp_vendor && 331 pci_get_device(dev) == qatp->qatp_product) 332 return qatp; 333 } 334 return NULL; 335} 336 337static int 338qat_probe(device_t dev) 339{ 340 const struct qat_product *prod; 341 342 prod = qat_lookup(dev); 343 if (prod != NULL) { 344 device_set_desc(dev, prod->qatp_name); 345 return BUS_PROBE_DEFAULT; 346 } 347 return ENXIO; 348} 349 350static int 351qat_attach(device_t dev) 352{ 353 struct qat_softc *sc = device_get_softc(dev); 354 const struct qat_product *qatp; 355 bus_size_t msixtbl_offset; 356 int bar, count, error, i, msixoff, msixtbl_bar; 357 358 sc->sc_dev = dev; 359 sc->sc_rev = pci_get_revid(dev); 360 sc->sc_crypto.qcy_cid = -1; 361 362 qatp = qat_lookup(dev); 363 memcpy(&sc->sc_hw, qatp->qatp_hw, sizeof(struct qat_hw)); 364 365 /* Determine active accelerators and engines */ 366 sc->sc_accel_mask = sc->sc_hw.qhw_get_accel_mask(sc); 367 sc->sc_ae_mask = sc->sc_hw.qhw_get_ae_mask(sc); 368 369 sc->sc_accel_num = 0; 370 for (i = 0; i < sc->sc_hw.qhw_num_accel; i++) { 371 if (sc->sc_accel_mask & (1 << i)) 372 sc->sc_accel_num++; 373 } 374 sc->sc_ae_num = 0; 375 for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) { 376 if (sc->sc_ae_mask & (1 << i)) 377 sc->sc_ae_num++; 378 } 379 380 if (!sc->sc_accel_mask || (sc->sc_ae_mask & 0x01) == 0) { 381 device_printf(sc->sc_dev, "couldn't find acceleration"); 382 goto fail; 383 } 384 385 MPASS(sc->sc_accel_num <= MAX_NUM_ACCEL); 386 MPASS(sc->sc_ae_num <= MAX_NUM_AE); 387 388 /* Determine SKU and capabilities */ 389 sc->sc_sku = sc->sc_hw.qhw_get_sku(sc); 390 sc->sc_accel_cap = sc->sc_hw.qhw_get_accel_cap(sc); 391 sc->sc_fw_uof_name = sc->sc_hw.qhw_get_fw_uof_name(sc); 392 393 /* Map BARs */ 394 msixtbl_bar = 0; 395 msixtbl_offset = 0; 396 if (pci_find_cap(dev, PCIY_MSIX, &msixoff) == 0) { 397 uint32_t msixtbl; 398 msixtbl = pci_read_config(dev, msixoff + PCIR_MSIX_TABLE, 4); 399 msixtbl_offset = msixtbl & ~PCIM_MSIX_BIR_MASK; 400 msixtbl_bar = PCIR_BAR(msixtbl & PCIM_MSIX_BIR_MASK); 401 } 402 403 i = 0; 404 if (sc->sc_hw.qhw_sram_bar_id != NO_PCI_REG) { 405 MPASS(sc->sc_hw.qhw_sram_bar_id == 0); 406 uint32_t fusectl = pci_read_config(dev, FUSECTL_REG, 4); 407 /* Skip SRAM BAR */ 408 i = (fusectl & FUSECTL_MASK) ? 1 : 0; 409 } 410 for (bar = 0; bar < PCIR_MAX_BAR_0; bar++) { 411 uint32_t val = pci_read_config(dev, PCIR_BAR(bar), 4); 412 if (val == 0 || !PCI_BAR_MEM(val)) 413 continue; 414 415 sc->sc_rid[i] = PCIR_BAR(bar); 416 sc->sc_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 417 &sc->sc_rid[i], RF_ACTIVE); 418 if (sc->sc_res[i] == NULL) { 419 device_printf(dev, "couldn't map BAR %d\n", bar); 420 goto fail; 421 } 422 423 sc->sc_csrt[i] = rman_get_bustag(sc->sc_res[i]); 424 sc->sc_csrh[i] = rman_get_bushandle(sc->sc_res[i]); 425 426 i++; 427 if ((val & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64) 428 bar++; 429 } 430 431 pci_enable_busmaster(dev); 432 433 count = sc->sc_hw.qhw_num_banks + 1; 434 if (pci_msix_count(dev) < count) { 435 device_printf(dev, "insufficient MSI-X vectors (%d vs. %d)\n", 436 pci_msix_count(dev), count); 437 goto fail; 438 } 439 error = pci_alloc_msix(dev, &count); 440 if (error != 0) { 441 device_printf(dev, "failed to allocate MSI-X vectors\n"); 442 goto fail; 443 } 444 445 error = qat_init(dev); 446 if (error == 0) 447 return 0; 448 449fail: 450 qat_detach(dev); 451 return ENXIO; 452} 453 454static int 455qat_init(device_t dev) 456{ 457 struct qat_softc *sc = device_get_softc(dev); 458 int error; 459 460 qat_etr_init(sc); 461 462 if (sc->sc_hw.qhw_init_admin_comms != NULL && 463 (error = sc->sc_hw.qhw_init_admin_comms(sc)) != 0) { 464 device_printf(sc->sc_dev, 465 "Could not initialize admin comms: %d\n", error); 466 return error; 467 } 468 469 if (sc->sc_hw.qhw_init_arb != NULL && 470 (error = sc->sc_hw.qhw_init_arb(sc)) != 0) { 471 device_printf(sc->sc_dev, 472 "Could not initialize hw arbiter: %d\n", error); 473 return error; 474 } 475 476 error = qat_ae_init(sc); 477 if (error) { 478 device_printf(sc->sc_dev, 479 "Could not initialize Acceleration Engine: %d\n", error); 480 return error; 481 } 482 483 error = qat_aefw_load(sc); 484 if (error) { 485 device_printf(sc->sc_dev, 486 "Could not load firmware: %d\n", error); 487 return error; 488 } 489 490 error = qat_setup_msix_intr(sc); 491 if (error) { 492 device_printf(sc->sc_dev, 493 "Could not setup interrupts: %d\n", error); 494 return error; 495 } 496 497 sc->sc_hw.qhw_enable_intr(sc); 498 499 error = qat_crypto_init(sc); 500 if (error) { 501 device_printf(sc->sc_dev, 502 "Could not initialize service: %d\n", error); 503 return error; 504 } 505 506 if (sc->sc_hw.qhw_enable_error_correction != NULL) 507 sc->sc_hw.qhw_enable_error_correction(sc); 508 509 if (sc->sc_hw.qhw_set_ssm_wdtimer != NULL && 510 (error = sc->sc_hw.qhw_set_ssm_wdtimer(sc)) != 0) { 511 device_printf(sc->sc_dev, 512 "Could not initialize watchdog timer: %d\n", error); 513 return error; 514 } 515 516 error = qat_start(dev); 517 if (error) { 518 device_printf(sc->sc_dev, 519 "Could not start: %d\n", error); 520 return error; 521 } 522 523 return 0; 524} 525 526static int 527qat_start(device_t dev) 528{ 529 struct qat_softc *sc = device_get_softc(dev); 530 int error; 531 532 error = qat_ae_start(sc); 533 if (error) 534 return error; 535 536 if (sc->sc_hw.qhw_send_admin_init != NULL && 537 (error = sc->sc_hw.qhw_send_admin_init(sc)) != 0) { 538 return error; 539 } 540 541 error = qat_crypto_start(sc); 542 if (error) 543 return error; 544 545 return 0; 546} 547 548static int 549qat_detach(device_t dev) 550{ 551 struct qat_softc *sc; 552 int bar, i; 553 554 sc = device_get_softc(dev); 555 556 qat_crypto_stop(sc); 557 qat_crypto_deinit(sc); 558 qat_aefw_unload(sc); 559 560 if (sc->sc_etr_banks != NULL) { 561 for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) { 562 struct qat_bank *qb = &sc->sc_etr_banks[i]; 563 564 if (qb->qb_ih_cookie != NULL) 565 (void)bus_teardown_intr(dev, qb->qb_ih, 566 qb->qb_ih_cookie); 567 if (qb->qb_ih != NULL) 568 (void)bus_release_resource(dev, SYS_RES_IRQ, 569 i + 1, qb->qb_ih); 570 } 571 } 572 if (sc->sc_ih_cookie != NULL) { 573 (void)bus_teardown_intr(dev, sc->sc_ih, sc->sc_ih_cookie); 574 sc->sc_ih_cookie = NULL; 575 } 576 if (sc->sc_ih != NULL) { 577 (void)bus_release_resource(dev, SYS_RES_IRQ, 578 sc->sc_hw.qhw_num_banks + 1, sc->sc_ih); 579 sc->sc_ih = NULL; 580 } 581 pci_release_msi(dev); 582 583 qat_etr_deinit(sc); 584 585 for (bar = 0; bar < MAX_BARS; bar++) { 586 if (sc->sc_res[bar] != NULL) { 587 (void)bus_release_resource(dev, SYS_RES_MEMORY, 588 sc->sc_rid[bar], sc->sc_res[bar]); 589 sc->sc_res[bar] = NULL; 590 } 591 } 592 593 return 0; 594} 595 596void * 597qat_alloc_mem(size_t size) 598{ 599 return (malloc(size, M_QAT, M_WAITOK | M_ZERO)); 600} 601 602void 603qat_free_mem(void *ptr) 604{ 605 free(ptr, M_QAT); 606} 607 608static void 609qat_alloc_dmamem_cb(void *arg, bus_dma_segment_t *segs, int nseg, 610 int error) 611{ 612 struct qat_dmamem *qdm; 613 614 if (error != 0) 615 return; 616 617 KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg)); 618 qdm = arg; 619 qdm->qdm_dma_seg = segs[0]; 620} 621 622int 623qat_alloc_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm, 624 int nseg, bus_size_t size, bus_size_t alignment) 625{ 626 int error; 627 628 KASSERT(qdm->qdm_dma_vaddr == NULL, 629 ("%s: DMA memory descriptor in use", __func__)); 630 631 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 632 alignment, 0, /* alignment, boundary */ 633 BUS_SPACE_MAXADDR, /* lowaddr */ 634 BUS_SPACE_MAXADDR, /* highaddr */ 635 NULL, NULL, /* filter, filterarg */ 636 size, /* maxsize */ 637 nseg, /* nsegments */ 638 size, /* maxsegsize */ 639 BUS_DMA_COHERENT, /* flags */ 640 NULL, NULL, /* lockfunc, lockarg */ 641 &qdm->qdm_dma_tag); 642 if (error != 0) 643 return error; 644 645 error = bus_dmamem_alloc(qdm->qdm_dma_tag, &qdm->qdm_dma_vaddr, 646 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 647 &qdm->qdm_dma_map); 648 if (error != 0) { 649 device_printf(sc->sc_dev, 650 "couldn't allocate dmamem, error = %d\n", error); 651 goto fail_0; 652 } 653 654 error = bus_dmamap_load(qdm->qdm_dma_tag, qdm->qdm_dma_map, 655 qdm->qdm_dma_vaddr, size, qat_alloc_dmamem_cb, qdm, 656 BUS_DMA_NOWAIT); 657 if (error) { 658 device_printf(sc->sc_dev, 659 "couldn't load dmamem map, error = %d\n", error); 660 goto fail_1; 661 } 662 663 return 0; 664fail_1: 665 bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr, qdm->qdm_dma_map); 666fail_0: 667 bus_dma_tag_destroy(qdm->qdm_dma_tag); 668 return error; 669} 670 671void 672qat_free_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm) 673{ 674 if (qdm->qdm_dma_tag != NULL) { 675 bus_dmamap_unload(qdm->qdm_dma_tag, qdm->qdm_dma_map); 676 bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr, 677 qdm->qdm_dma_map); 678 bus_dma_tag_destroy(qdm->qdm_dma_tag); 679 explicit_bzero(qdm, sizeof(*qdm)); 680 } 681} 682 683static int 684qat_setup_msix_intr(struct qat_softc *sc) 685{ 686 device_t dev; 687 int error, i, rid; 688 689 dev = sc->sc_dev; 690 691 for (i = 1; i <= sc->sc_hw.qhw_num_banks; i++) { 692 struct qat_bank *qb = &sc->sc_etr_banks[i - 1]; 693 694 rid = i; 695 qb->qb_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 696 RF_ACTIVE); 697 if (qb->qb_ih == NULL) { 698 device_printf(dev, 699 "failed to allocate bank intr resource\n"); 700 return ENXIO; 701 } 702 error = bus_setup_intr(dev, qb->qb_ih, 703 INTR_TYPE_NET | INTR_MPSAFE, NULL, qat_etr_bank_intr, qb, 704 &qb->qb_ih_cookie); 705 if (error != 0) { 706 device_printf(dev, "failed to set up bank intr\n"); 707 return error; 708 } 709 error = bus_bind_intr(dev, qb->qb_ih, (i - 1) % mp_ncpus); 710 if (error != 0) 711 device_printf(dev, "failed to bind intr %d\n", i); 712 } 713 714 rid = i; 715 sc->sc_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 716 RF_ACTIVE); 717 if (sc->sc_ih == NULL) 718 return ENXIO; 719 error = bus_setup_intr(dev, sc->sc_ih, INTR_TYPE_NET | INTR_MPSAFE, 720 NULL, qat_ae_cluster_intr, sc, &sc->sc_ih_cookie); 721 722 return error; 723} 724 725static void 726qat_etr_init(struct qat_softc *sc) 727{ 728 int i; 729 730 sc->sc_etr_banks = qat_alloc_mem( 731 sizeof(struct qat_bank) * sc->sc_hw.qhw_num_banks); 732 733 for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) 734 qat_etr_bank_init(sc, i); 735 736 if (sc->sc_hw.qhw_num_ap_banks) { 737 sc->sc_etr_ap_banks = qat_alloc_mem( 738 sizeof(struct qat_ap_bank) * sc->sc_hw.qhw_num_ap_banks); 739 qat_etr_ap_bank_init(sc); 740 } 741} 742 743static void 744qat_etr_deinit(struct qat_softc *sc) 745{ 746 int i; 747 748 if (sc->sc_etr_banks != NULL) { 749 for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) 750 qat_etr_bank_deinit(sc, i); 751 qat_free_mem(sc->sc_etr_banks); 752 sc->sc_etr_banks = NULL; 753 } 754 if (sc->sc_etr_ap_banks != NULL) { 755 qat_free_mem(sc->sc_etr_ap_banks); 756 sc->sc_etr_ap_banks = NULL; 757 } 758} 759 760static void 761qat_etr_bank_init(struct qat_softc *sc, int bank) 762{ 763 struct qat_bank *qb = &sc->sc_etr_banks[bank]; 764 int i, tx_rx_gap = sc->sc_hw.qhw_tx_rx_gap; 765 766 MPASS(bank < sc->sc_hw.qhw_num_banks); 767 768 mtx_init(&qb->qb_bank_mtx, "qb bank", NULL, MTX_DEF); 769 770 qb->qb_sc = sc; 771 qb->qb_bank = bank; 772 qb->qb_coalescing_time = COALESCING_TIME_INTERVAL_DEFAULT; 773 774 /* Clean CSRs for all rings within the bank */ 775 for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) { 776 struct qat_ring *qr = &qb->qb_et_rings[i]; 777 778 qat_etr_bank_ring_write_4(sc, bank, i, 779 ETR_RING_CONFIG, 0); 780 qat_etr_bank_ring_base_write_8(sc, bank, i, 0); 781 782 if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) { 783 qr->qr_inflight = qat_alloc_mem(sizeof(uint32_t)); 784 } else if (sc->sc_hw.qhw_tx_rings_mask & 785 (1 << (i - tx_rx_gap))) { 786 /* Share inflight counter with rx and tx */ 787 qr->qr_inflight = 788 qb->qb_et_rings[i - tx_rx_gap].qr_inflight; 789 } 790 } 791 792 if (sc->sc_hw.qhw_init_etr_intr != NULL) { 793 sc->sc_hw.qhw_init_etr_intr(sc, bank); 794 } else { 795 /* common code in qat 1.7 */ 796 qat_etr_bank_write_4(sc, bank, ETR_INT_REG, 797 ETR_INT_REG_CLEAR_MASK); 798 for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank / 799 ETR_RINGS_PER_INT_SRCSEL; i++) { 800 qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL + 801 (i * ETR_INT_SRCSEL_NEXT_OFFSET), 802 ETR_INT_SRCSEL_MASK); 803 } 804 } 805} 806 807static void 808qat_etr_bank_deinit(struct qat_softc *sc, int bank) 809{ 810 struct qat_bank *qb; 811 struct qat_ring *qr; 812 int i; 813 814 qb = &sc->sc_etr_banks[bank]; 815 for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) { 816 if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) { 817 qr = &qb->qb_et_rings[i]; 818 qat_free_mem(qr->qr_inflight); 819 } 820 } 821} 822 823static void 824qat_etr_ap_bank_init(struct qat_softc *sc) 825{ 826 int ap_bank; 827 828 for (ap_bank = 0; ap_bank < sc->sc_hw.qhw_num_ap_banks; ap_bank++) { 829 struct qat_ap_bank *qab = &sc->sc_etr_ap_banks[ap_bank]; 830 831 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_MASK, 832 ETR_AP_NF_MASK_INIT); 833 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 0); 834 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_MASK, 835 ETR_AP_NE_MASK_INIT); 836 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 0); 837 838 memset(qab, 0, sizeof(*qab)); 839 } 840} 841 842static void 843qat_etr_ap_bank_set_ring_mask(uint32_t *ap_mask, uint32_t ring, int set_mask) 844{ 845 if (set_mask) 846 *ap_mask |= (1 << ETR_RING_NUMBER_IN_AP_BANK(ring)); 847 else 848 *ap_mask &= ~(1 << ETR_RING_NUMBER_IN_AP_BANK(ring)); 849} 850 851static void 852qat_etr_ap_bank_set_ring_dest(struct qat_softc *sc, uint32_t *ap_dest, 853 uint32_t ring, int set_dest) 854{ 855 uint32_t ae_mask; 856 uint8_t mailbox, ae, nae; 857 uint8_t *dest = (uint8_t *)ap_dest; 858 859 mailbox = ETR_RING_AP_MAILBOX_NUMBER(ring); 860 861 nae = 0; 862 ae_mask = sc->sc_ae_mask; 863 for (ae = 0; ae < sc->sc_hw.qhw_num_engines; ae++) { 864 if ((ae_mask & (1 << ae)) == 0) 865 continue; 866 867 if (set_dest) { 868 dest[nae] = __SHIFTIN(ae, ETR_AP_DEST_AE) | 869 __SHIFTIN(mailbox, ETR_AP_DEST_MAILBOX) | 870 ETR_AP_DEST_ENABLE; 871 } else { 872 dest[nae] = 0; 873 } 874 nae++; 875 if (nae == ETR_MAX_AE_PER_MAILBOX) 876 break; 877 } 878} 879 880static void 881qat_etr_ap_bank_setup_ring(struct qat_softc *sc, struct qat_ring *qr) 882{ 883 struct qat_ap_bank *qab; 884 int ap_bank; 885 886 if (sc->sc_hw.qhw_num_ap_banks == 0) 887 return; 888 889 ap_bank = ETR_RING_AP_BANK_NUMBER(qr->qr_ring); 890 MPASS(ap_bank < sc->sc_hw.qhw_num_ap_banks); 891 qab = &sc->sc_etr_ap_banks[ap_bank]; 892 893 if (qr->qr_cb == NULL) { 894 qat_etr_ap_bank_set_ring_mask(&qab->qab_ne_mask, qr->qr_ring, 1); 895 if (!qab->qab_ne_dest) { 896 qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_ne_dest, 897 qr->qr_ring, 1); 898 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 899 qab->qab_ne_dest); 900 } 901 } else { 902 qat_etr_ap_bank_set_ring_mask(&qab->qab_nf_mask, qr->qr_ring, 1); 903 if (!qab->qab_nf_dest) { 904 qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_nf_dest, 905 qr->qr_ring, 1); 906 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 907 qab->qab_nf_dest); 908 } 909 } 910} 911 912static int 913qat_etr_verify_ring_size(uint32_t msg_size, uint32_t num_msgs) 914{ 915 int i = QAT_MIN_RING_SIZE; 916 917 for (; i <= QAT_MAX_RING_SIZE; i++) 918 if ((msg_size * num_msgs) == QAT_SIZE_TO_RING_SIZE_IN_BYTES(i)) 919 return i; 920 921 return QAT_DEFAULT_RING_SIZE; 922} 923 924int 925qat_etr_setup_ring(struct qat_softc *sc, int bank, uint32_t ring, 926 uint32_t num_msgs, uint32_t msg_size, qat_cb_t cb, void *cb_arg, 927 const char *name, struct qat_ring **rqr) 928{ 929 struct qat_bank *qb; 930 struct qat_ring *qr = NULL; 931 int error; 932 uint32_t ring_size_bytes, ring_config; 933 uint64_t ring_base; 934 uint32_t wm_nf = ETR_RING_CONFIG_NEAR_WM_512; 935 uint32_t wm_ne = ETR_RING_CONFIG_NEAR_WM_0; 936 937 MPASS(bank < sc->sc_hw.qhw_num_banks); 938 939 /* Allocate a ring from specified bank */ 940 qb = &sc->sc_etr_banks[bank]; 941 942 if (ring >= sc->sc_hw.qhw_num_rings_per_bank) 943 return EINVAL; 944 if (qb->qb_allocated_rings & (1 << ring)) 945 return ENOENT; 946 qr = &qb->qb_et_rings[ring]; 947 qb->qb_allocated_rings |= 1 << ring; 948 949 /* Initialize allocated ring */ 950 qr->qr_ring = ring; 951 qr->qr_bank = bank; 952 qr->qr_name = name; 953 qr->qr_ring_id = qr->qr_bank * sc->sc_hw.qhw_num_rings_per_bank + ring; 954 qr->qr_ring_mask = (1 << ring); 955 qr->qr_cb = cb; 956 qr->qr_cb_arg = cb_arg; 957 958 /* Setup the shadow variables */ 959 qr->qr_head = 0; 960 qr->qr_tail = 0; 961 qr->qr_msg_size = QAT_BYTES_TO_MSG_SIZE(msg_size); 962 qr->qr_ring_size = qat_etr_verify_ring_size(msg_size, num_msgs); 963 964 /* 965 * To make sure that ring is alligned to ring size allocate 966 * at least 4k and then tell the user it is smaller. 967 */ 968 ring_size_bytes = QAT_SIZE_TO_RING_SIZE_IN_BYTES(qr->qr_ring_size); 969 ring_size_bytes = QAT_RING_SIZE_BYTES_MIN(ring_size_bytes); 970 error = qat_alloc_dmamem(sc, &qr->qr_dma, 1, ring_size_bytes, 971 ring_size_bytes); 972 if (error) 973 return error; 974 975 qr->qr_ring_vaddr = qr->qr_dma.qdm_dma_vaddr; 976 qr->qr_ring_paddr = qr->qr_dma.qdm_dma_seg.ds_addr; 977 978 memset(qr->qr_ring_vaddr, QAT_RING_PATTERN, 979 qr->qr_dma.qdm_dma_seg.ds_len); 980 981 bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map, 982 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 983 984 if (cb == NULL) { 985 ring_config = ETR_RING_CONFIG_BUILD(qr->qr_ring_size); 986 } else { 987 ring_config = 988 ETR_RING_CONFIG_BUILD_RESP(qr->qr_ring_size, wm_nf, wm_ne); 989 } 990 qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_CONFIG, ring_config); 991 992 ring_base = ETR_RING_BASE_BUILD(qr->qr_ring_paddr, qr->qr_ring_size); 993 qat_etr_bank_ring_base_write_8(sc, bank, ring, ring_base); 994 995 if (sc->sc_hw.qhw_init_arb != NULL) 996 qat_arb_update(sc, qb); 997 998 mtx_init(&qr->qr_ring_mtx, "qr ring", NULL, MTX_DEF); 999 1000 qat_etr_ap_bank_setup_ring(sc, qr); 1001 1002 if (cb != NULL) { 1003 uint32_t intr_mask; 1004 1005 qb->qb_intr_mask |= qr->qr_ring_mask; 1006 intr_mask = qb->qb_intr_mask; 1007 1008 qat_etr_bank_write_4(sc, bank, ETR_INT_COL_EN, intr_mask); 1009 qat_etr_bank_write_4(sc, bank, ETR_INT_COL_CTL, 1010 ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time); 1011 } 1012 1013 *rqr = qr; 1014 1015 return 0; 1016} 1017 1018static inline u_int 1019qat_modulo(u_int data, u_int shift) 1020{ 1021 u_int div = data >> shift; 1022 u_int mult = div << shift; 1023 return data - mult; 1024} 1025 1026int 1027qat_etr_put_msg(struct qat_softc *sc, struct qat_ring *qr, uint32_t *msg) 1028{ 1029 uint32_t inflight; 1030 uint32_t *addr; 1031 1032 mtx_lock(&qr->qr_ring_mtx); 1033 1034 inflight = atomic_fetchadd_32(qr->qr_inflight, 1) + 1; 1035 if (inflight > QAT_MAX_INFLIGHTS(qr->qr_ring_size, qr->qr_msg_size)) { 1036 atomic_subtract_32(qr->qr_inflight, 1); 1037 qr->qr_need_wakeup = true; 1038 mtx_unlock(&qr->qr_ring_mtx); 1039 counter_u64_add(sc->sc_ring_full_restarts, 1); 1040 return ERESTART; 1041 } 1042 1043 addr = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_tail); 1044 1045 memcpy(addr, msg, QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size)); 1046 1047 bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map, 1048 BUS_DMASYNC_PREWRITE); 1049 1050 qr->qr_tail = qat_modulo(qr->qr_tail + 1051 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 1052 QAT_RING_SIZE_MODULO(qr->qr_ring_size)); 1053 1054 qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring, 1055 ETR_RING_TAIL_OFFSET, qr->qr_tail); 1056 1057 mtx_unlock(&qr->qr_ring_mtx); 1058 1059 return 0; 1060} 1061 1062static int 1063qat_etr_ring_intr(struct qat_softc *sc, struct qat_bank *qb, 1064 struct qat_ring *qr) 1065{ 1066 uint32_t *msg, nmsg = 0; 1067 int handled = 0; 1068 bool blocked = false; 1069 1070 mtx_lock(&qr->qr_ring_mtx); 1071 1072 msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head); 1073 1074 bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map, 1075 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1076 1077 while (atomic_load_32(msg) != ETR_RING_EMPTY_ENTRY_SIG) { 1078 atomic_subtract_32(qr->qr_inflight, 1); 1079 1080 if (qr->qr_cb != NULL) { 1081 mtx_unlock(&qr->qr_ring_mtx); 1082 handled |= qr->qr_cb(sc, qr->qr_cb_arg, msg); 1083 mtx_lock(&qr->qr_ring_mtx); 1084 } 1085 1086 atomic_store_32(msg, ETR_RING_EMPTY_ENTRY_SIG); 1087 1088 qr->qr_head = qat_modulo(qr->qr_head + 1089 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 1090 QAT_RING_SIZE_MODULO(qr->qr_ring_size)); 1091 nmsg++; 1092 1093 msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head); 1094 } 1095 1096 bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map, 1097 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1098 1099 if (nmsg > 0) { 1100 qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring, 1101 ETR_RING_HEAD_OFFSET, qr->qr_head); 1102 if (qr->qr_need_wakeup) { 1103 blocked = true; 1104 qr->qr_need_wakeup = false; 1105 } 1106 } 1107 1108 mtx_unlock(&qr->qr_ring_mtx); 1109 1110 if (blocked) 1111 crypto_unblock(sc->sc_crypto.qcy_cid, CRYPTO_SYMQ); 1112 1113 return handled; 1114} 1115 1116static void 1117qat_etr_bank_intr(void *arg) 1118{ 1119 struct qat_bank *qb = arg; 1120 struct qat_softc *sc = qb->qb_sc; 1121 uint32_t estat; 1122 int i, handled = 0; 1123 1124 mtx_lock(&qb->qb_bank_mtx); 1125 1126 qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 0); 1127 1128 /* Now handle all the responses */ 1129 estat = ~qat_etr_bank_read_4(sc, qb->qb_bank, ETR_E_STAT); 1130 estat &= qb->qb_intr_mask; 1131 1132 qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 1133 ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time); 1134 1135 mtx_unlock(&qb->qb_bank_mtx); 1136 1137 while ((i = ffs(estat)) != 0) { 1138 struct qat_ring *qr = &qb->qb_et_rings[--i]; 1139 estat &= ~(1 << i); 1140 handled |= qat_etr_ring_intr(sc, qb, qr); 1141 } 1142} 1143 1144void 1145qat_arb_update(struct qat_softc *sc, struct qat_bank *qb) 1146{ 1147 1148 qat_arb_ringsrvarben_write_4(sc, qb->qb_bank, 1149 qb->qb_allocated_rings & 0xff); 1150} 1151 1152static struct qat_sym_cookie * 1153qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *qcb) 1154{ 1155 struct qat_sym_cookie *qsc; 1156 1157 mtx_lock(&qcb->qcb_bank_mtx); 1158 1159 if (qcb->qcb_symck_free_count == 0) { 1160 mtx_unlock(&qcb->qcb_bank_mtx); 1161 return NULL; 1162 } 1163 1164 qsc = qcb->qcb_symck_free[--qcb->qcb_symck_free_count]; 1165 1166 mtx_unlock(&qcb->qcb_bank_mtx); 1167 1168 return qsc; 1169} 1170 1171static void 1172qat_crypto_free_sym_cookie(struct qat_crypto_bank *qcb, 1173 struct qat_sym_cookie *qsc) 1174{ 1175 explicit_bzero(qsc->qsc_iv_buf, EALG_MAX_BLOCK_LEN); 1176 explicit_bzero(qsc->qsc_auth_res, QAT_SYM_HASH_BUFFER_LEN); 1177 1178 mtx_lock(&qcb->qcb_bank_mtx); 1179 qcb->qcb_symck_free[qcb->qcb_symck_free_count++] = qsc; 1180 mtx_unlock(&qcb->qcb_bank_mtx); 1181} 1182 1183void 1184qat_memcpy_htobe64(void *dst, const void *src, size_t len) 1185{ 1186 uint64_t *dst0 = dst; 1187 const uint64_t *src0 = src; 1188 size_t i; 1189 1190 MPASS(len % sizeof(*dst0) == 0); 1191 1192 for (i = 0; i < len / sizeof(*dst0); i++) 1193 *(dst0 + i) = htobe64(*(src0 + i)); 1194} 1195 1196void 1197qat_memcpy_htobe32(void *dst, const void *src, size_t len) 1198{ 1199 uint32_t *dst0 = dst; 1200 const uint32_t *src0 = src; 1201 size_t i; 1202 1203 MPASS(len % sizeof(*dst0) == 0); 1204 1205 for (i = 0; i < len / sizeof(*dst0); i++) 1206 *(dst0 + i) = htobe32(*(src0 + i)); 1207} 1208 1209void 1210qat_memcpy_htobe(void *dst, const void *src, size_t len, uint32_t wordbyte) 1211{ 1212 switch (wordbyte) { 1213 case 4: 1214 qat_memcpy_htobe32(dst, src, len); 1215 break; 1216 case 8: 1217 qat_memcpy_htobe64(dst, src, len); 1218 break; 1219 default: 1220 panic("invalid word size %u", wordbyte); 1221 } 1222} 1223 1224void 1225qat_crypto_gmac_precompute(const struct qat_crypto_desc *desc, 1226 const uint8_t *key, int klen, const struct qat_sym_hash_def *hash_def, 1227 uint8_t *state) 1228{ 1229 uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)]; 1230 char zeros[AES_BLOCK_LEN]; 1231 int rounds; 1232 1233 memset(zeros, 0, sizeof(zeros)); 1234 rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY); 1235 rijndaelEncrypt(ks, rounds, zeros, state); 1236 explicit_bzero(ks, sizeof(ks)); 1237} 1238 1239void 1240qat_crypto_hmac_precompute(const struct qat_crypto_desc *desc, 1241 const uint8_t *key, int klen, const struct qat_sym_hash_def *hash_def, 1242 uint8_t *state1, uint8_t *state2) 1243{ 1244 union authctx ctx; 1245 const struct auth_hash *sah = hash_def->qshd_alg->qshai_sah; 1246 uint32_t state_offset = hash_def->qshd_alg->qshai_state_offset; 1247 uint32_t state_size = hash_def->qshd_alg->qshai_state_size; 1248 uint32_t state_word = hash_def->qshd_alg->qshai_state_word; 1249 1250 hmac_init_ipad(sah, key, klen, &ctx); 1251 qat_memcpy_htobe(state1, (uint8_t *)&ctx + state_offset, state_size, 1252 state_word); 1253 hmac_init_opad(sah, key, klen, &ctx); 1254 qat_memcpy_htobe(state2, (uint8_t *)&ctx + state_offset, state_size, 1255 state_word); 1256 explicit_bzero(&ctx, sizeof(ctx)); 1257} 1258 1259static enum hw_cipher_algo 1260qat_aes_cipher_algo(int klen) 1261{ 1262 switch (klen) { 1263 case HW_AES_128_KEY_SZ: 1264 return HW_CIPHER_ALGO_AES128; 1265 case HW_AES_192_KEY_SZ: 1266 return HW_CIPHER_ALGO_AES192; 1267 case HW_AES_256_KEY_SZ: 1268 return HW_CIPHER_ALGO_AES256; 1269 default: 1270 panic("invalid key length %d", klen); 1271 } 1272} 1273 1274uint16_t 1275qat_crypto_load_cipher_session(const struct qat_crypto_desc *desc, 1276 const struct qat_session *qs) 1277{ 1278 enum hw_cipher_algo algo; 1279 enum hw_cipher_dir dir; 1280 enum hw_cipher_convert key_convert; 1281 enum hw_cipher_mode mode; 1282 1283 dir = desc->qcd_cipher_dir; 1284 key_convert = HW_CIPHER_NO_CONVERT; 1285 mode = qs->qs_cipher_mode; 1286 switch (mode) { 1287 case HW_CIPHER_CBC_MODE: 1288 case HW_CIPHER_XTS_MODE: 1289 algo = qs->qs_cipher_algo; 1290 1291 /* 1292 * AES decrypt key needs to be reversed. 1293 * Instead of reversing the key at session registration, 1294 * it is instead reversed on-the-fly by setting the KEY_CONVERT 1295 * bit here. 1296 */ 1297 if (desc->qcd_cipher_dir == HW_CIPHER_DECRYPT) 1298 key_convert = HW_CIPHER_KEY_CONVERT; 1299 break; 1300 case HW_CIPHER_CTR_MODE: 1301 algo = qs->qs_cipher_algo; 1302 dir = HW_CIPHER_ENCRYPT; 1303 break; 1304 default: 1305 panic("unhandled cipher mode %d", mode); 1306 break; 1307 } 1308 1309 return HW_CIPHER_CONFIG_BUILD(mode, algo, key_convert, dir); 1310} 1311 1312uint16_t 1313qat_crypto_load_auth_session(const struct qat_crypto_desc *desc, 1314 const struct qat_session *qs, const struct qat_sym_hash_def **hash_def) 1315{ 1316 enum qat_sym_hash_algorithm algo; 1317 1318 switch (qs->qs_auth_algo) { 1319 case HW_AUTH_ALGO_SHA1: 1320 algo = QAT_SYM_HASH_SHA1; 1321 break; 1322 case HW_AUTH_ALGO_SHA256: 1323 algo = QAT_SYM_HASH_SHA256; 1324 break; 1325 case HW_AUTH_ALGO_SHA384: 1326 algo = QAT_SYM_HASH_SHA384; 1327 break; 1328 case HW_AUTH_ALGO_SHA512: 1329 algo = QAT_SYM_HASH_SHA512; 1330 break; 1331 case HW_AUTH_ALGO_GALOIS_128: 1332 algo = QAT_SYM_HASH_AES_GCM; 1333 break; 1334 default: 1335 panic("unhandled auth algorithm %d", qs->qs_auth_algo); 1336 break; 1337 } 1338 *hash_def = &qat_sym_hash_defs[algo]; 1339 1340 return HW_AUTH_CONFIG_BUILD(qs->qs_auth_mode, 1341 (*hash_def)->qshd_qat->qshqi_algo_enc, 1342 (*hash_def)->qshd_alg->qshai_digest_len); 1343} 1344 1345struct qat_crypto_load_cb_arg { 1346 struct qat_session *qs; 1347 struct qat_sym_cookie *qsc; 1348 struct cryptop *crp; 1349 int error; 1350}; 1351 1352static int 1353qat_crypto_populate_buf_list(struct buffer_list_desc *buffers, 1354 bus_dma_segment_t *segs, int niseg, int noseg, int skip) 1355{ 1356 struct flat_buffer_desc *flatbuf; 1357 bus_addr_t addr; 1358 bus_size_t len; 1359 int iseg, oseg; 1360 1361 for (iseg = 0, oseg = noseg; iseg < niseg && oseg < QAT_MAXSEG; 1362 iseg++) { 1363 addr = segs[iseg].ds_addr; 1364 len = segs[iseg].ds_len; 1365 1366 if (skip > 0) { 1367 if (skip < len) { 1368 addr += skip; 1369 len -= skip; 1370 skip = 0; 1371 } else { 1372 skip -= len; 1373 continue; 1374 } 1375 } 1376 1377 flatbuf = &buffers->flat_bufs[oseg++]; 1378 flatbuf->data_len_in_bytes = (uint32_t)len; 1379 flatbuf->phy_buffer = (uint64_t)addr; 1380 } 1381 buffers->num_buffers = oseg; 1382 return iseg < niseg ? E2BIG : 0; 1383} 1384 1385static void 1386qat_crypto_load_aadbuf_cb(void *_arg, bus_dma_segment_t *segs, int nseg, 1387 int error) 1388{ 1389 struct qat_crypto_load_cb_arg *arg; 1390 struct qat_sym_cookie *qsc; 1391 1392 arg = _arg; 1393 if (error != 0) { 1394 arg->error = error; 1395 return; 1396 } 1397 1398 qsc = arg->qsc; 1399 arg->error = qat_crypto_populate_buf_list(&qsc->qsc_buf_list, segs, 1400 nseg, 0, 0); 1401} 1402 1403static void 1404qat_crypto_load_buf_cb(void *_arg, bus_dma_segment_t *segs, int nseg, 1405 int error) 1406{ 1407 struct cryptop *crp; 1408 struct qat_crypto_load_cb_arg *arg; 1409 struct qat_session *qs; 1410 struct qat_sym_cookie *qsc; 1411 int noseg, skip; 1412 1413 arg = _arg; 1414 if (error != 0) { 1415 arg->error = error; 1416 return; 1417 } 1418 1419 crp = arg->crp; 1420 qs = arg->qs; 1421 qsc = arg->qsc; 1422 1423 if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) { 1424 /* AAD was handled in qat_crypto_load(). */ 1425 skip = crp->crp_payload_start; 1426 noseg = 0; 1427 } else if (crp->crp_aad == NULL && crp->crp_aad_length > 0) { 1428 skip = crp->crp_aad_start; 1429 noseg = 0; 1430 } else { 1431 skip = crp->crp_payload_start; 1432 noseg = crp->crp_aad == NULL ? 1433 0 : qsc->qsc_buf_list.num_buffers; 1434 } 1435 arg->error = qat_crypto_populate_buf_list(&qsc->qsc_buf_list, segs, 1436 nseg, noseg, skip); 1437} 1438 1439static void 1440qat_crypto_load_obuf_cb(void *_arg, bus_dma_segment_t *segs, int nseg, 1441 int error) 1442{ 1443 struct buffer_list_desc *ibufs, *obufs; 1444 struct flat_buffer_desc *ibuf, *obuf; 1445 struct cryptop *crp; 1446 struct qat_crypto_load_cb_arg *arg; 1447 struct qat_session *qs; 1448 struct qat_sym_cookie *qsc; 1449 int buflen, osegs, tocopy; 1450 1451 arg = _arg; 1452 if (error != 0) { 1453 arg->error = error; 1454 return; 1455 } 1456 1457 crp = arg->crp; 1458 qs = arg->qs; 1459 qsc = arg->qsc; 1460 1461 /* 1462 * The payload must start at the same offset in the output SG list as in 1463 * the input SG list. Copy over SG entries from the input corresponding 1464 * to the AAD buffer. 1465 */ 1466 osegs = 0; 1467 if (qs->qs_auth_algo != HW_AUTH_ALGO_GALOIS_128 && 1468 crp->crp_aad_length > 0) { 1469 tocopy = crp->crp_aad == NULL ? 1470 crp->crp_payload_start - crp->crp_aad_start : 1471 crp->crp_aad_length; 1472 1473 ibufs = &qsc->qsc_buf_list; 1474 obufs = &qsc->qsc_obuf_list; 1475 for (; osegs < ibufs->num_buffers && tocopy > 0; osegs++) { 1476 ibuf = &ibufs->flat_bufs[osegs]; 1477 obuf = &obufs->flat_bufs[osegs]; 1478 1479 obuf->phy_buffer = ibuf->phy_buffer; 1480 buflen = imin(ibuf->data_len_in_bytes, tocopy); 1481 obuf->data_len_in_bytes = buflen; 1482 tocopy -= buflen; 1483 } 1484 } 1485 1486 arg->error = qat_crypto_populate_buf_list(&qsc->qsc_obuf_list, segs, 1487 nseg, osegs, crp->crp_payload_output_start); 1488} 1489 1490static int 1491qat_crypto_load(struct qat_session *qs, struct qat_sym_cookie *qsc, 1492 struct qat_crypto_desc const *desc, struct cryptop *crp) 1493{ 1494 struct qat_crypto_load_cb_arg arg; 1495 int error; 1496 1497 crypto_read_iv(crp, qsc->qsc_iv_buf); 1498 1499 arg.crp = crp; 1500 arg.qs = qs; 1501 arg.qsc = qsc; 1502 arg.error = 0; 1503 1504 error = 0; 1505 if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128 && 1506 crp->crp_aad_length > 0) { 1507 /* 1508 * The firmware expects AAD to be in a contiguous buffer and 1509 * padded to a multiple of 16 bytes. To satisfy these 1510 * constraints we bounce the AAD into a per-request buffer. 1511 * There is a small limit on the AAD size so this is not too 1512 * onerous. 1513 */ 1514 memset(qsc->qsc_gcm_aad, 0, QAT_GCM_AAD_SIZE_MAX); 1515 if (crp->crp_aad == NULL) { 1516 crypto_copydata(crp, crp->crp_aad_start, 1517 crp->crp_aad_length, qsc->qsc_gcm_aad); 1518 } else { 1519 memcpy(qsc->qsc_gcm_aad, crp->crp_aad, 1520 crp->crp_aad_length); 1521 } 1522 } else if (crp->crp_aad != NULL) { 1523 error = bus_dmamap_load( 1524 qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dma_tag, 1525 qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dmamap, 1526 crp->crp_aad, crp->crp_aad_length, 1527 qat_crypto_load_aadbuf_cb, &arg, BUS_DMA_NOWAIT); 1528 if (error == 0) 1529 error = arg.error; 1530 } 1531 if (error == 0) { 1532 error = bus_dmamap_load_crp_buffer( 1533 qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dma_tag, 1534 qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dmamap, 1535 &crp->crp_buf, qat_crypto_load_buf_cb, &arg, 1536 BUS_DMA_NOWAIT); 1537 if (error == 0) 1538 error = arg.error; 1539 } 1540 if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 1541 error = bus_dmamap_load_crp_buffer( 1542 qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dma_tag, 1543 qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dmamap, 1544 &crp->crp_obuf, qat_crypto_load_obuf_cb, &arg, 1545 BUS_DMA_NOWAIT); 1546 if (error == 0) 1547 error = arg.error; 1548 } 1549 return error; 1550} 1551 1552static inline struct qat_crypto_bank * 1553qat_crypto_select_bank(struct qat_crypto *qcy) 1554{ 1555 u_int cpuid = PCPU_GET(cpuid); 1556 1557 return &qcy->qcy_banks[cpuid % qcy->qcy_num_banks]; 1558} 1559 1560static int 1561qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb) 1562{ 1563 char *name; 1564 int bank, curname, error, i, j; 1565 1566 bank = qcb->qcb_bank; 1567 curname = 0; 1568 1569 name = qcb->qcb_ring_names[curname++]; 1570 snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_tx", bank); 1571 error = qat_etr_setup_ring(sc, qcb->qcb_bank, 1572 sc->sc_hw.qhw_ring_sym_tx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_req_size, 1573 NULL, NULL, name, &qcb->qcb_sym_tx); 1574 if (error) 1575 return error; 1576 1577 name = qcb->qcb_ring_names[curname++]; 1578 snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_rx", bank); 1579 error = qat_etr_setup_ring(sc, qcb->qcb_bank, 1580 sc->sc_hw.qhw_ring_sym_rx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_resp_size, 1581 qat_crypto_sym_rxintr, qcb, name, &qcb->qcb_sym_rx); 1582 if (error) 1583 return error; 1584 1585 for (i = 0; i < QAT_NSYMCOOKIE; i++) { 1586 struct qat_dmamem *qdm = &qcb->qcb_symck_dmamems[i]; 1587 struct qat_sym_cookie *qsc; 1588 1589 error = qat_alloc_dmamem(sc, qdm, 1, 1590 sizeof(struct qat_sym_cookie), QAT_OPTIMAL_ALIGN); 1591 if (error) 1592 return error; 1593 1594 qsc = qdm->qdm_dma_vaddr; 1595 qsc->qsc_self_dmamap = qdm->qdm_dma_map; 1596 qsc->qsc_self_dma_tag = qdm->qdm_dma_tag; 1597 qsc->qsc_bulk_req_params_buf_paddr = 1598 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, 1599 qsc_bulk_cookie.qsbc_req_params_buf); 1600 qsc->qsc_buffer_list_desc_paddr = 1601 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, 1602 qsc_buf_list); 1603 qsc->qsc_obuffer_list_desc_paddr = 1604 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, 1605 qsc_obuf_list); 1606 qsc->qsc_obuffer_list_desc_paddr = 1607 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, 1608 qsc_obuf_list); 1609 qsc->qsc_iv_buf_paddr = 1610 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, 1611 qsc_iv_buf); 1612 qsc->qsc_auth_res_paddr = 1613 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, 1614 qsc_auth_res); 1615 qsc->qsc_gcm_aad_paddr = 1616 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, 1617 qsc_gcm_aad); 1618 qsc->qsc_content_desc_paddr = 1619 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, 1620 qsc_content_desc); 1621 qcb->qcb_symck_free[i] = qsc; 1622 qcb->qcb_symck_free_count++; 1623 1624 for (j = 0; j < QAT_SYM_DMA_COUNT; j++) { 1625 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1626 1, 0, /* alignment, boundary */ 1627 BUS_SPACE_MAXADDR, /* lowaddr */ 1628 BUS_SPACE_MAXADDR, /* highaddr */ 1629 NULL, NULL, /* filter, filterarg */ 1630 QAT_MAXLEN, /* maxsize */ 1631 QAT_MAXSEG, /* nsegments */ 1632 QAT_MAXLEN, /* maxsegsize */ 1633 BUS_DMA_COHERENT, /* flags */ 1634 NULL, NULL, /* lockfunc, lockarg */ 1635 &qsc->qsc_dma[j].qsd_dma_tag); 1636 if (error != 0) 1637 return error; 1638 error = bus_dmamap_create(qsc->qsc_dma[j].qsd_dma_tag, 1639 BUS_DMA_COHERENT, &qsc->qsc_dma[j].qsd_dmamap); 1640 if (error != 0) 1641 return error; 1642 } 1643 } 1644 1645 return 0; 1646} 1647 1648static int 1649qat_crypto_bank_init(struct qat_softc *sc, struct qat_crypto_bank *qcb) 1650{ 1651 mtx_init(&qcb->qcb_bank_mtx, "qcb bank", NULL, MTX_DEF); 1652 1653 return qat_crypto_setup_ring(sc, qcb); 1654} 1655 1656static void 1657qat_crypto_bank_deinit(struct qat_softc *sc, struct qat_crypto_bank *qcb) 1658{ 1659 struct qat_dmamem *qdm; 1660 struct qat_sym_cookie *qsc; 1661 int i, j; 1662 1663 for (i = 0; i < QAT_NSYMCOOKIE; i++) { 1664 qdm = &qcb->qcb_symck_dmamems[i]; 1665 qsc = qcb->qcb_symck_free[i]; 1666 for (j = 0; j < QAT_SYM_DMA_COUNT; j++) { 1667 bus_dmamap_destroy(qsc->qsc_dma[j].qsd_dma_tag, 1668 qsc->qsc_dma[j].qsd_dmamap); 1669 bus_dma_tag_destroy(qsc->qsc_dma[j].qsd_dma_tag); 1670 } 1671 qat_free_dmamem(sc, qdm); 1672 } 1673 qat_free_dmamem(sc, &qcb->qcb_sym_tx->qr_dma); 1674 qat_free_dmamem(sc, &qcb->qcb_sym_rx->qr_dma); 1675 1676 mtx_destroy(&qcb->qcb_bank_mtx); 1677} 1678 1679static int 1680qat_crypto_init(struct qat_softc *sc) 1681{ 1682 struct qat_crypto *qcy = &sc->sc_crypto; 1683 struct sysctl_ctx_list *ctx; 1684 struct sysctl_oid *oid; 1685 struct sysctl_oid_list *children; 1686 int bank, error, num_banks; 1687 1688 qcy->qcy_sc = sc; 1689 1690 if (sc->sc_hw.qhw_init_arb != NULL) 1691 num_banks = imin(mp_ncpus, sc->sc_hw.qhw_num_banks); 1692 else 1693 num_banks = sc->sc_ae_num; 1694 1695 qcy->qcy_num_banks = num_banks; 1696 1697 qcy->qcy_banks = 1698 qat_alloc_mem(sizeof(struct qat_crypto_bank) * num_banks); 1699 1700 for (bank = 0; bank < num_banks; bank++) { 1701 struct qat_crypto_bank *qcb = &qcy->qcy_banks[bank]; 1702 qcb->qcb_bank = bank; 1703 error = qat_crypto_bank_init(sc, qcb); 1704 if (error) 1705 return error; 1706 } 1707 1708 mtx_init(&qcy->qcy_crypto_mtx, "qcy crypto", NULL, MTX_DEF); 1709 1710 ctx = device_get_sysctl_ctx(sc->sc_dev); 1711 oid = device_get_sysctl_tree(sc->sc_dev); 1712 children = SYSCTL_CHILDREN(oid); 1713 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", 1714 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics"); 1715 children = SYSCTL_CHILDREN(oid); 1716 1717 sc->sc_gcm_aad_restarts = counter_u64_alloc(M_WAITOK); 1718 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_aad_restarts", 1719 CTLFLAG_RD, &sc->sc_gcm_aad_restarts, 1720 "GCM requests deferred due to AAD size change"); 1721 sc->sc_gcm_aad_updates = counter_u64_alloc(M_WAITOK); 1722 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_aad_updates", 1723 CTLFLAG_RD, &sc->sc_gcm_aad_updates, 1724 "GCM requests that required session state update"); 1725 sc->sc_ring_full_restarts = counter_u64_alloc(M_WAITOK); 1726 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ring_full", 1727 CTLFLAG_RD, &sc->sc_ring_full_restarts, 1728 "Requests deferred due to in-flight max reached"); 1729 sc->sc_sym_alloc_failures = counter_u64_alloc(M_WAITOK); 1730 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sym_alloc_failures", 1731 CTLFLAG_RD, &sc->sc_sym_alloc_failures, 1732 "Request allocation failures"); 1733 1734 return 0; 1735} 1736 1737static void 1738qat_crypto_deinit(struct qat_softc *sc) 1739{ 1740 struct qat_crypto *qcy = &sc->sc_crypto; 1741 struct qat_crypto_bank *qcb; 1742 int bank; 1743 1744 counter_u64_free(sc->sc_sym_alloc_failures); 1745 counter_u64_free(sc->sc_ring_full_restarts); 1746 counter_u64_free(sc->sc_gcm_aad_updates); 1747 counter_u64_free(sc->sc_gcm_aad_restarts); 1748 1749 if (qcy->qcy_banks != NULL) { 1750 for (bank = 0; bank < qcy->qcy_num_banks; bank++) { 1751 qcb = &qcy->qcy_banks[bank]; 1752 qat_crypto_bank_deinit(sc, qcb); 1753 } 1754 qat_free_mem(qcy->qcy_banks); 1755 mtx_destroy(&qcy->qcy_crypto_mtx); 1756 } 1757} 1758 1759static int 1760qat_crypto_start(struct qat_softc *sc) 1761{ 1762 struct qat_crypto *qcy; 1763 1764 qcy = &sc->sc_crypto; 1765 qcy->qcy_cid = crypto_get_driverid(sc->sc_dev, 1766 sizeof(struct qat_session), CRYPTOCAP_F_HARDWARE); 1767 if (qcy->qcy_cid < 0) { 1768 device_printf(sc->sc_dev, 1769 "could not get opencrypto driver id\n"); 1770 return ENOENT; 1771 } 1772 1773 return 0; 1774} 1775 1776static void 1777qat_crypto_stop(struct qat_softc *sc) 1778{ 1779 struct qat_crypto *qcy; 1780 1781 qcy = &sc->sc_crypto; 1782 if (qcy->qcy_cid >= 0) 1783 (void)crypto_unregister_all(qcy->qcy_cid); 1784} 1785 1786static void 1787qat_crypto_sym_dma_unload(struct qat_sym_cookie *qsc, enum qat_sym_dma i) 1788{ 1789 bus_dmamap_sync(qsc->qsc_dma[i].qsd_dma_tag, qsc->qsc_dma[i].qsd_dmamap, 1790 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1791 bus_dmamap_unload(qsc->qsc_dma[i].qsd_dma_tag, 1792 qsc->qsc_dma[i].qsd_dmamap); 1793} 1794 1795static int 1796qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg, void *msg) 1797{ 1798 char icv[QAT_SYM_HASH_BUFFER_LEN]; 1799 struct qat_crypto_bank *qcb = arg; 1800 struct qat_crypto *qcy; 1801 struct qat_session *qs; 1802 struct qat_sym_cookie *qsc; 1803 struct qat_sym_bulk_cookie *qsbc; 1804 struct cryptop *crp; 1805 int error; 1806 uint16_t auth_sz; 1807 bool blocked; 1808 1809 qsc = *(void **)((uintptr_t)msg + sc->sc_hw.qhw_crypto_opaque_offset); 1810 1811 qsbc = &qsc->qsc_bulk_cookie; 1812 qcy = qsbc->qsbc_crypto; 1813 qs = qsbc->qsbc_session; 1814 crp = qsbc->qsbc_cb_tag; 1815 1816 bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap, 1817 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1818 1819 if (crp->crp_aad != NULL) 1820 qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_AADBUF); 1821 qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_BUF); 1822 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) 1823 qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_OBUF); 1824 1825 error = 0; 1826 if ((auth_sz = qs->qs_auth_mlen) != 0) { 1827 if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) { 1828 crypto_copydata(crp, crp->crp_digest_start, 1829 auth_sz, icv); 1830 if (timingsafe_bcmp(icv, qsc->qsc_auth_res, 1831 auth_sz) != 0) { 1832 error = EBADMSG; 1833 } 1834 } else { 1835 crypto_copyback(crp, crp->crp_digest_start, 1836 auth_sz, qsc->qsc_auth_res); 1837 } 1838 } 1839 1840 qat_crypto_free_sym_cookie(qcb, qsc); 1841 1842 blocked = false; 1843 mtx_lock(&qs->qs_session_mtx); 1844 MPASS(qs->qs_status & QAT_SESSION_STATUS_ACTIVE); 1845 qs->qs_inflight--; 1846 if (__predict_false(qs->qs_need_wakeup && qs->qs_inflight == 0)) { 1847 blocked = true; 1848 qs->qs_need_wakeup = false; 1849 } 1850 mtx_unlock(&qs->qs_session_mtx); 1851 1852 crp->crp_etype = error; 1853 crypto_done(crp); 1854 1855 if (blocked) 1856 crypto_unblock(qcy->qcy_cid, CRYPTO_SYMQ); 1857 1858 return 1; 1859} 1860 1861static int 1862qat_probesession(device_t dev, const struct crypto_session_params *csp) 1863{ 1864 if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) != 1865 0) 1866 return EINVAL; 1867 1868 if (csp->csp_cipher_alg == CRYPTO_AES_XTS && 1869 qat_lookup(dev)->qatp_chip == QAT_CHIP_C2XXX) { 1870 /* 1871 * AES-XTS is not supported by the NanoQAT. 1872 */ 1873 return EINVAL; 1874 } 1875 1876 switch (csp->csp_mode) { 1877 case CSP_MODE_CIPHER: 1878 switch (csp->csp_cipher_alg) { 1879 case CRYPTO_AES_CBC: 1880 case CRYPTO_AES_ICM: 1881 if (csp->csp_ivlen != AES_BLOCK_LEN) 1882 return EINVAL; 1883 break; 1884 case CRYPTO_AES_XTS: 1885 if (csp->csp_ivlen != AES_XTS_IV_LEN) 1886 return EINVAL; 1887 break; 1888 default: 1889 return EINVAL; 1890 } 1891 break; 1892 case CSP_MODE_DIGEST: 1893 switch (csp->csp_auth_alg) { 1894 case CRYPTO_SHA1: 1895 case CRYPTO_SHA1_HMAC: 1896 case CRYPTO_SHA2_256: 1897 case CRYPTO_SHA2_256_HMAC: 1898 case CRYPTO_SHA2_384: 1899 case CRYPTO_SHA2_384_HMAC: 1900 case CRYPTO_SHA2_512: 1901 case CRYPTO_SHA2_512_HMAC: 1902 break; 1903 case CRYPTO_AES_NIST_GMAC: 1904 if (csp->csp_ivlen != AES_GCM_IV_LEN) 1905 return EINVAL; 1906 break; 1907 default: 1908 return EINVAL; 1909 } 1910 break; 1911 case CSP_MODE_AEAD: 1912 switch (csp->csp_cipher_alg) { 1913 case CRYPTO_AES_NIST_GCM_16: 1914 if (csp->csp_ivlen != AES_GCM_IV_LEN) 1915 return EINVAL; 1916 break; 1917 default: 1918 return EINVAL; 1919 } 1920 break; 1921 case CSP_MODE_ETA: 1922 switch (csp->csp_auth_alg) { 1923 case CRYPTO_SHA1_HMAC: 1924 case CRYPTO_SHA2_256_HMAC: 1925 case CRYPTO_SHA2_384_HMAC: 1926 case CRYPTO_SHA2_512_HMAC: 1927 switch (csp->csp_cipher_alg) { 1928 case CRYPTO_AES_CBC: 1929 case CRYPTO_AES_ICM: 1930 if (csp->csp_ivlen != AES_BLOCK_LEN) 1931 return EINVAL; 1932 break; 1933 case CRYPTO_AES_XTS: 1934 if (csp->csp_ivlen != AES_XTS_IV_LEN) 1935 return EINVAL; 1936 break; 1937 default: 1938 return EINVAL; 1939 } 1940 break; 1941 default: 1942 return EINVAL; 1943 } 1944 break; 1945 default: 1946 return EINVAL; 1947 } 1948 1949 return CRYPTODEV_PROBE_HARDWARE; 1950} 1951 1952static int 1953qat_newsession(device_t dev, crypto_session_t cses, 1954 const struct crypto_session_params *csp) 1955{ 1956 struct qat_crypto *qcy; 1957 struct qat_dmamem *qdm; 1958 struct qat_session *qs; 1959 struct qat_softc *sc; 1960 struct qat_crypto_desc *ddesc, *edesc; 1961 int error, slices; 1962 1963 sc = device_get_softc(dev); 1964 qs = crypto_get_driver_session(cses); 1965 qcy = &sc->sc_crypto; 1966 1967 qdm = &qs->qs_desc_mem; 1968 error = qat_alloc_dmamem(sc, qdm, QAT_MAXSEG, 1969 sizeof(struct qat_crypto_desc) * 2, QAT_OPTIMAL_ALIGN); 1970 if (error != 0) 1971 return error; 1972 1973 mtx_init(&qs->qs_session_mtx, "qs session", NULL, MTX_DEF); 1974 qs->qs_aad_length = -1; 1975 1976 qs->qs_dec_desc = ddesc = qdm->qdm_dma_vaddr; 1977 qs->qs_enc_desc = edesc = ddesc + 1; 1978 1979 ddesc->qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr; 1980 ddesc->qcd_hash_state_paddr = ddesc->qcd_desc_paddr + 1981 offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf); 1982 edesc->qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr + 1983 sizeof(struct qat_crypto_desc); 1984 edesc->qcd_hash_state_paddr = edesc->qcd_desc_paddr + 1985 offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf); 1986 1987 qs->qs_status = QAT_SESSION_STATUS_ACTIVE; 1988 qs->qs_inflight = 0; 1989 1990 qs->qs_cipher_key = csp->csp_cipher_key; 1991 qs->qs_cipher_klen = csp->csp_cipher_klen; 1992 qs->qs_auth_key = csp->csp_auth_key; 1993 qs->qs_auth_klen = csp->csp_auth_klen; 1994 1995 switch (csp->csp_cipher_alg) { 1996 case CRYPTO_AES_CBC: 1997 qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen); 1998 qs->qs_cipher_mode = HW_CIPHER_CBC_MODE; 1999 break; 2000 case CRYPTO_AES_ICM: 2001 qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen); 2002 qs->qs_cipher_mode = HW_CIPHER_CTR_MODE; 2003 break; 2004 case CRYPTO_AES_XTS: 2005 qs->qs_cipher_algo = 2006 qat_aes_cipher_algo(csp->csp_cipher_klen / 2); 2007 qs->qs_cipher_mode = HW_CIPHER_XTS_MODE; 2008 break; 2009 case CRYPTO_AES_NIST_GCM_16: 2010 qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen); 2011 qs->qs_cipher_mode = HW_CIPHER_CTR_MODE; 2012 qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128; 2013 qs->qs_auth_mode = HW_AUTH_MODE1; 2014 break; 2015 case 0: 2016 break; 2017 default: 2018 panic("%s: unhandled cipher algorithm %d", __func__, 2019 csp->csp_cipher_alg); 2020 } 2021 2022 switch (csp->csp_auth_alg) { 2023 case CRYPTO_SHA1_HMAC: 2024 qs->qs_auth_algo = HW_AUTH_ALGO_SHA1; 2025 qs->qs_auth_mode = HW_AUTH_MODE1; 2026 break; 2027 case CRYPTO_SHA1: 2028 qs->qs_auth_algo = HW_AUTH_ALGO_SHA1; 2029 qs->qs_auth_mode = HW_AUTH_MODE0; 2030 break; 2031 case CRYPTO_SHA2_256_HMAC: 2032 qs->qs_auth_algo = HW_AUTH_ALGO_SHA256; 2033 qs->qs_auth_mode = HW_AUTH_MODE1; 2034 break; 2035 case CRYPTO_SHA2_256: 2036 qs->qs_auth_algo = HW_AUTH_ALGO_SHA256; 2037 qs->qs_auth_mode = HW_AUTH_MODE0; 2038 break; 2039 case CRYPTO_SHA2_384_HMAC: 2040 qs->qs_auth_algo = HW_AUTH_ALGO_SHA384; 2041 qs->qs_auth_mode = HW_AUTH_MODE1; 2042 break; 2043 case CRYPTO_SHA2_384: 2044 qs->qs_auth_algo = HW_AUTH_ALGO_SHA384; 2045 qs->qs_auth_mode = HW_AUTH_MODE0; 2046 break; 2047 case CRYPTO_SHA2_512_HMAC: 2048 qs->qs_auth_algo = HW_AUTH_ALGO_SHA512; 2049 qs->qs_auth_mode = HW_AUTH_MODE1; 2050 break; 2051 case CRYPTO_SHA2_512: 2052 qs->qs_auth_algo = HW_AUTH_ALGO_SHA512; 2053 qs->qs_auth_mode = HW_AUTH_MODE0; 2054 break; 2055 case CRYPTO_AES_NIST_GMAC: 2056 qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_auth_klen); 2057 qs->qs_cipher_mode = HW_CIPHER_CTR_MODE; 2058 qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128; 2059 qs->qs_auth_mode = HW_AUTH_MODE1; 2060 2061 qs->qs_cipher_key = qs->qs_auth_key; 2062 qs->qs_cipher_klen = qs->qs_auth_klen; 2063 break; 2064 case 0: 2065 break; 2066 default: 2067 panic("%s: unhandled auth algorithm %d", __func__, 2068 csp->csp_auth_alg); 2069 } 2070 2071 slices = 0; 2072 switch (csp->csp_mode) { 2073 case CSP_MODE_AEAD: 2074 case CSP_MODE_ETA: 2075 /* auth then decrypt */ 2076 ddesc->qcd_slices[0] = FW_SLICE_AUTH; 2077 ddesc->qcd_slices[1] = FW_SLICE_CIPHER; 2078 ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT; 2079 ddesc->qcd_cmd_id = FW_LA_CMD_HASH_CIPHER; 2080 /* encrypt then auth */ 2081 edesc->qcd_slices[0] = FW_SLICE_CIPHER; 2082 edesc->qcd_slices[1] = FW_SLICE_AUTH; 2083 edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT; 2084 edesc->qcd_cmd_id = FW_LA_CMD_CIPHER_HASH; 2085 slices = 2; 2086 break; 2087 case CSP_MODE_CIPHER: 2088 /* decrypt */ 2089 ddesc->qcd_slices[0] = FW_SLICE_CIPHER; 2090 ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT; 2091 ddesc->qcd_cmd_id = FW_LA_CMD_CIPHER; 2092 /* encrypt */ 2093 edesc->qcd_slices[0] = FW_SLICE_CIPHER; 2094 edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT; 2095 edesc->qcd_cmd_id = FW_LA_CMD_CIPHER; 2096 slices = 1; 2097 break; 2098 case CSP_MODE_DIGEST: 2099 if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) { 2100 /* auth then decrypt */ 2101 ddesc->qcd_slices[0] = FW_SLICE_AUTH; 2102 ddesc->qcd_slices[1] = FW_SLICE_CIPHER; 2103 ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT; 2104 ddesc->qcd_cmd_id = FW_LA_CMD_HASH_CIPHER; 2105 /* encrypt then auth */ 2106 edesc->qcd_slices[0] = FW_SLICE_CIPHER; 2107 edesc->qcd_slices[1] = FW_SLICE_AUTH; 2108 edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT; 2109 edesc->qcd_cmd_id = FW_LA_CMD_CIPHER_HASH; 2110 slices = 2; 2111 } else { 2112 ddesc->qcd_slices[0] = FW_SLICE_AUTH; 2113 ddesc->qcd_cmd_id = FW_LA_CMD_AUTH; 2114 edesc->qcd_slices[0] = FW_SLICE_AUTH; 2115 edesc->qcd_cmd_id = FW_LA_CMD_AUTH; 2116 slices = 1; 2117 } 2118 break; 2119 default: 2120 panic("%s: unhandled crypto algorithm %d, %d", __func__, 2121 csp->csp_cipher_alg, csp->csp_auth_alg); 2122 } 2123 ddesc->qcd_slices[slices] = FW_SLICE_DRAM_WR; 2124 edesc->qcd_slices[slices] = FW_SLICE_DRAM_WR; 2125 2126 qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, ddesc); 2127 qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, edesc); 2128 2129 if (csp->csp_auth_mlen != 0) 2130 qs->qs_auth_mlen = csp->csp_auth_mlen; 2131 else 2132 qs->qs_auth_mlen = edesc->qcd_auth_sz; 2133 2134 /* Compute the GMAC by specifying a null cipher payload. */ 2135 if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC) 2136 ddesc->qcd_cmd_id = edesc->qcd_cmd_id = FW_LA_CMD_AUTH; 2137 2138 return 0; 2139} 2140 2141static void 2142qat_crypto_clear_desc(struct qat_crypto_desc *desc) 2143{ 2144 explicit_bzero(desc->qcd_content_desc, sizeof(desc->qcd_content_desc)); 2145 explicit_bzero(desc->qcd_hash_state_prefix_buf, 2146 sizeof(desc->qcd_hash_state_prefix_buf)); 2147 explicit_bzero(desc->qcd_req_cache, sizeof(desc->qcd_req_cache)); 2148} 2149 2150static void 2151qat_freesession(device_t dev, crypto_session_t cses) 2152{ 2153 struct qat_session *qs; 2154 2155 qs = crypto_get_driver_session(cses); 2156 KASSERT(qs->qs_inflight == 0, 2157 ("%s: session %p has requests in flight", __func__, qs)); 2158 2159 qat_crypto_clear_desc(qs->qs_enc_desc); 2160 qat_crypto_clear_desc(qs->qs_dec_desc); 2161 qat_free_dmamem(device_get_softc(dev), &qs->qs_desc_mem); 2162 mtx_destroy(&qs->qs_session_mtx); 2163} 2164 2165static int 2166qat_process(device_t dev, struct cryptop *crp, int hint) 2167{ 2168 struct qat_crypto *qcy; 2169 struct qat_crypto_bank *qcb; 2170 struct qat_crypto_desc const *desc; 2171 struct qat_session *qs; 2172 struct qat_softc *sc; 2173 struct qat_sym_cookie *qsc; 2174 struct qat_sym_bulk_cookie *qsbc; 2175 int error; 2176 2177 sc = device_get_softc(dev); 2178 qcy = &sc->sc_crypto; 2179 qs = crypto_get_driver_session(crp->crp_session); 2180 qsc = NULL; 2181 2182 if (__predict_false(crypto_buffer_len(&crp->crp_buf) > QAT_MAXLEN)) { 2183 error = E2BIG; 2184 goto fail1; 2185 } 2186 2187 mtx_lock(&qs->qs_session_mtx); 2188 if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) { 2189 if (crp->crp_aad_length > QAT_GCM_AAD_SIZE_MAX) { 2190 error = E2BIG; 2191 mtx_unlock(&qs->qs_session_mtx); 2192 goto fail1; 2193 } 2194 2195 /* 2196 * The firmware interface for GCM annoyingly requires the AAD 2197 * size to be stored in the session's content descriptor, which 2198 * is not really meant to be updated after session 2199 * initialization. For IPSec the AAD size is fixed so this is 2200 * not much of a problem in practice, but we have to catch AAD 2201 * size updates here so that the device code can safely update 2202 * the session's recorded AAD size. 2203 */ 2204 if (__predict_false(crp->crp_aad_length != qs->qs_aad_length)) { 2205 if (qs->qs_inflight == 0) { 2206 if (qs->qs_aad_length != -1) { 2207 counter_u64_add(sc->sc_gcm_aad_updates, 2208 1); 2209 } 2210 qs->qs_aad_length = crp->crp_aad_length; 2211 } else { 2212 qs->qs_need_wakeup = true; 2213 mtx_unlock(&qs->qs_session_mtx); 2214 counter_u64_add(sc->sc_gcm_aad_restarts, 1); 2215 error = ERESTART; 2216 goto fail1; 2217 } 2218 } 2219 } 2220 qs->qs_inflight++; 2221 mtx_unlock(&qs->qs_session_mtx); 2222 2223 qcb = qat_crypto_select_bank(qcy); 2224 2225 qsc = qat_crypto_alloc_sym_cookie(qcb); 2226 if (qsc == NULL) { 2227 counter_u64_add(sc->sc_sym_alloc_failures, 1); 2228 error = ENOBUFS; 2229 goto fail2; 2230 } 2231 2232 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 2233 desc = qs->qs_enc_desc; 2234 else 2235 desc = qs->qs_dec_desc; 2236 2237 error = qat_crypto_load(qs, qsc, desc, crp); 2238 if (error != 0) 2239 goto fail2; 2240 2241 qsbc = &qsc->qsc_bulk_cookie; 2242 qsbc->qsbc_crypto = qcy; 2243 qsbc->qsbc_session = qs; 2244 qsbc->qsbc_cb_tag = crp; 2245 2246 sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc, crp); 2247 2248 if (crp->crp_aad != NULL) { 2249 bus_dmamap_sync(qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dma_tag, 2250 qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dmamap, 2251 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2252 } 2253 bus_dmamap_sync(qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dma_tag, 2254 qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dmamap, 2255 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2256 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 2257 bus_dmamap_sync(qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dma_tag, 2258 qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dmamap, 2259 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2260 } 2261 bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap, 2262 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2263 2264 error = qat_etr_put_msg(sc, qcb->qcb_sym_tx, 2265 (uint32_t *)qsbc->qsbc_msg); 2266 if (error) 2267 goto fail2; 2268 2269 return 0; 2270 2271fail2: 2272 if (qsc) 2273 qat_crypto_free_sym_cookie(qcb, qsc); 2274 mtx_lock(&qs->qs_session_mtx); 2275 qs->qs_inflight--; 2276 mtx_unlock(&qs->qs_session_mtx); 2277fail1: 2278 crp->crp_etype = error; 2279 crypto_done(crp); 2280 return 0; 2281} 2282 2283static device_method_t qat_methods[] = { 2284 /* Device interface */ 2285 DEVMETHOD(device_probe, qat_probe), 2286 DEVMETHOD(device_attach, qat_attach), 2287 DEVMETHOD(device_detach, qat_detach), 2288 2289 /* Cryptodev interface */ 2290 DEVMETHOD(cryptodev_probesession, qat_probesession), 2291 DEVMETHOD(cryptodev_newsession, qat_newsession), 2292 DEVMETHOD(cryptodev_freesession, qat_freesession), 2293 DEVMETHOD(cryptodev_process, qat_process), 2294 2295 DEVMETHOD_END 2296}; 2297 2298static devclass_t qat_devclass; 2299 2300static driver_t qat_driver = { 2301 .name = "qat", 2302 .methods = qat_methods, 2303 .size = sizeof(struct qat_softc), 2304}; 2305 2306DRIVER_MODULE(qat, pci, qat_driver, qat_devclass, 0, 0); 2307MODULE_VERSION(qat, 1); 2308MODULE_DEPEND(qat, crypto, 1, 1, 1); 2309MODULE_DEPEND(qat, pci, 1, 1, 1); 2310