Lines Matching defs:areq_ctx

55 	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
58 cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
59 (skip - areq_ctx->req_authsize), skip, dir);
289 cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
295 sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
296 AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
297 if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
302 &sg_dma_address(&areq_ctx->ccm_adata_sg),
303 sg_page(&areq_ctx->ccm_adata_sg),
304 sg_virt(&areq_ctx->ccm_adata_sg),
305 areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
308 cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
309 (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
315 static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
321 sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
322 if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
327 &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
328 sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
329 areq_ctx->buff_sg->length);
330 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
331 areq_ctx->curr_sg = areq_ctx->buff_sg;
332 areq_ctx->in_nents = 0;
334 cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
447 dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
459 struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
460 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
464 if (areq_ctx->mac_buf_dma_addr) {
465 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
469 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
470 if (areq_ctx->hkey_dma_addr) {
471 dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
475 if (areq_ctx->gcm_block_len_dma_addr) {
476 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
480 if (areq_ctx->gcm_iv_inc1_dma_addr) {
481 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
485 if (areq_ctx->gcm_iv_inc2_dma_addr) {
486 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
491 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
492 if (areq_ctx->ccm_iv0_dma_addr) {
493 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
497 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
499 if (areq_ctx->gen_ctx.iv_dma_addr) {
500 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
502 kfree_sensitive(areq_ctx->gen_ctx.iv);
506 if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
507 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
508 (areq_ctx->mlli_params.mlli_virt_addr)) {
510 &areq_ctx->mlli_params.mlli_dma_addr,
511 areq_ctx->mlli_params.mlli_virt_addr);
512 dma_pool_free(areq_ctx->mlli_params.curr_pool,
513 areq_ctx->mlli_params.mlli_virt_addr,
514 areq_ctx->mlli_params.mlli_dma_addr);
517 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
518 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
519 areq_ctx->assoclen, req->cryptlen);
521 dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
525 dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
528 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
549 struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
550 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
556 areq_ctx->gen_ctx.iv_dma_addr = 0;
557 areq_ctx->gen_ctx.iv = NULL;
561 areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
562 if (!areq_ctx->gen_ctx.iv)
565 areq_ctx->gen_ctx.iv_dma_addr =
566 dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
568 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
571 kfree_sensitive(areq_ctx->gen_ctx.iv);
572 areq_ctx->gen_ctx.iv = NULL;
578 hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
589 struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
599 if (areq_ctx->assoclen == 0) {
600 areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
601 areq_ctx->assoc.nents = 0;
602 areq_ctx->assoc.mlli_nents = 0;
604 cc_dma_buf_type(areq_ctx->assoc_buff_type),
605 areq_ctx->assoc.nents);
609 mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
618 areq_ctx->assoc.nents = mapped_nents;
623 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
626 (areq_ctx->assoc.nents + 1),
633 if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
634 areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
636 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
638 if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
640 cc_dma_buf_type(areq_ctx->assoc_buff_type),
641 areq_ctx->assoc.nents);
642 cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
643 areq_ctx->assoclen, 0, is_last,
644 &areq_ctx->assoc.mlli_nents);
645 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
655 struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
656 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
657 unsigned int authsize = areq_ctx->req_authsize;
661 areq_ctx->is_icv_fragmented = false;
664 sg = areq_ctx->src_sgl;
667 sg = areq_ctx->dst_sgl;
671 areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
672 areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
681 struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
682 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
683 unsigned int authsize = areq_ctx->req_authsize;
689 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
690 areq_ctx->src_sgl, areq_ctx->cryptlen,
691 areq_ctx->src_offset, is_last_table,
692 &areq_ctx->src.mlli_nents);
694 areq_ctx->is_icv_fragmented =
695 cc_is_icv_frag(areq_ctx->src.nents, authsize,
698 if (areq_ctx->is_icv_fragmented) {
712 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
714 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
715 areq_ctx->icv_dma_addr =
716 areq_ctx->mac_buf_dma_addr;
719 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
721 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
723 areq_ctx->icv_virt_addr = sg_virt(sg) +
729 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
730 areq_ctx->src_sgl, areq_ctx->cryptlen,
731 areq_ctx->src_offset, is_last_table,
732 &areq_ctx->src.mlli_nents);
733 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
734 areq_ctx->dst_sgl, areq_ctx->cryptlen,
735 areq_ctx->dst_offset, is_last_table,
736 &areq_ctx->dst.mlli_nents);
738 areq_ctx->is_icv_fragmented =
739 cc_is_icv_frag(areq_ctx->src.nents, authsize,
746 if (areq_ctx->is_icv_fragmented) {
748 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
751 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
753 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
755 areq_ctx->icv_virt_addr = sg_virt(sg) +
761 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
762 areq_ctx->dst_sgl, areq_ctx->cryptlen,
763 areq_ctx->dst_offset, is_last_table,
764 &areq_ctx->dst.mlli_nents);
765 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
766 areq_ctx->src_sgl, areq_ctx->cryptlen,
767 areq_ctx->src_offset, is_last_table,
768 &areq_ctx->src.mlli_nents);
770 areq_ctx->is_icv_fragmented =
771 cc_is_icv_frag(areq_ctx->dst.nents, authsize,
774 if (!areq_ctx->is_icv_fragmented) {
775 sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
777 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
779 areq_ctx->icv_virt_addr = sg_virt(sg) +
782 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
783 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
793 struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
795 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
796 unsigned int authsize = areq_ctx->req_authsize;
812 areq_ctx->src_sgl = req->src;
813 areq_ctx->dst_sgl = req->dst;
819 sg_index = areq_ctx->src_sgl->length;
823 offset -= areq_ctx->src_sgl->length;
824 sgl = sg_next(areq_ctx->src_sgl);
827 areq_ctx->src_sgl = sgl;
828 sg_index += areq_ctx->src_sgl->length;
836 areq_ctx->src.nents = src_mapped_nents;
838 areq_ctx->src_offset = offset;
849 &areq_ctx->dst.mapped_nents,
858 sg_index = areq_ctx->dst_sgl->length;
864 offset -= areq_ctx->dst_sgl->length;
865 sgl = sg_next(areq_ctx->dst_sgl);
868 areq_ctx->dst_sgl = sgl;
869 sg_index += areq_ctx->dst_sgl->length;
876 areq_ctx->dst.nents = dst_mapped_nents;
877 areq_ctx->dst_offset = offset;
881 areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
886 areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
898 struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
901 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
902 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
903 curr_mlli_size = areq_ctx->assoc.mlli_nents *
907 if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
910 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
911 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
913 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
914 if (!areq_ctx->is_single_pass)
915 areq_ctx->assoc.mlli_nents +=
916 areq_ctx->src.mlli_nents;
918 if (areq_ctx->gen_ctx.op_type ==
920 areq_ctx->src.sram_addr =
923 areq_ctx->dst.sram_addr =
924 areq_ctx->src.sram_addr +
925 areq_ctx->src.mlli_nents *
927 if (!areq_ctx->is_single_pass)
928 areq_ctx->assoc.mlli_nents +=
929 areq_ctx->src.mlli_nents;
931 areq_ctx->dst.sram_addr =
934 areq_ctx->src.sram_addr =
935 areq_ctx->dst.sram_addr +
936 areq_ctx->dst.mlli_nents *
938 if (!areq_ctx->is_single_pass)
939 areq_ctx->assoc.mlli_nents +=
940 areq_ctx->dst.mlli_nents;
948 struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
949 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
952 unsigned int authsize = areq_ctx->req_authsize;
967 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
972 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
977 dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
981 MAX_MAC_SIZE, areq_ctx->mac_buf);
985 areq_ctx->mac_buf_dma_addr = dma_addr;
987 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
988 void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
996 areq_ctx->ccm_iv0_dma_addr = 0;
1000 areq_ctx->ccm_iv0_dma_addr = dma_addr;
1002 rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1003 &sg_data, areq_ctx->assoclen);
1008 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1009 dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1013 AES_BLOCK_SIZE, areq_ctx->hkey);
1017 areq_ctx->hkey_dma_addr = dma_addr;
1019 dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1023 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1027 areq_ctx->gcm_block_len_dma_addr = dma_addr;
1029 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1034 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1035 areq_ctx->gcm_iv_inc1_dma_addr = 0;
1039 areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1041 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1046 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1047 areq_ctx->gcm_iv_inc2_dma_addr = 0;
1051 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1056 if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
1063 &areq_ctx->src.mapped_nents,
1070 if (areq_ctx->is_single_pass) {
1121 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1122 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1130 areq_ctx->assoc.mlli_nents);
1131 dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1132 dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1145 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1147 u8 *curr_buff = cc_hash_buf(areq_ctx);
1148 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1149 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1156 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1158 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1161 areq_ctx->in_nents = 0;
1170 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1178 &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1183 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1184 memcpy(areq_ctx->buff_sg, src,
1186 areq_ctx->buff_sg->length = nbytes;
1187 areq_ctx->curr_sg = areq_ctx->buff_sg;
1188 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1190 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1195 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1198 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1199 0, true, &areq_ctx->mlli_nents);
1205 areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1206 dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1207 cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1211 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1215 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1224 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1226 u8 *curr_buff = cc_hash_buf(areq_ctx);
1227 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1228 u8 *next_buff = cc_next_buf(areq_ctx);
1229 u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1230 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1240 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1242 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1244 areq_ctx->curr_sg = NULL;
1246 areq_ctx->in_nents = 0;
1251 areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
1252 sg_copy_to_buffer(src, areq_ctx->in_nents,
1279 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1289 DMA_TO_DEVICE, &areq_ctx->in_nents,
1295 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1297 memcpy(areq_ctx->buff_sg, src,
1299 areq_ctx->buff_sg->length = update_data_len;
1300 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1301 areq_ctx->curr_sg = areq_ctx->buff_sg;
1303 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1307 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1310 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1312 &areq_ctx->mlli_nents);
1317 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1322 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1326 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1334 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1335 u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1340 if (areq_ctx->mlli_params.curr_pool) {
1342 &areq_ctx->mlli_params.mlli_dma_addr,
1343 areq_ctx->mlli_params.mlli_virt_addr);
1344 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1345 areq_ctx->mlli_params.mlli_virt_addr,
1346 areq_ctx->mlli_params.mlli_dma_addr);
1349 if (src && areq_ctx->in_nents) {
1353 areq_ctx->in_nents, DMA_TO_DEVICE);
1357 dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1358 sg_virt(areq_ctx->buff_sg),
1359 &sg_dma_address(areq_ctx->buff_sg),
1360 sg_dma_len(areq_ctx->buff_sg));
1361 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1368 areq_ctx->buff_index ^= 1;