Lines Matching refs:req_ctx

343 	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
345 if (req_ctx->gen_ctx.iv_dma_addr) {
347 &req_ctx->gen_ctx.iv_dma_addr, ivsize);
348 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
352 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
353 req_ctx->mlli_params.mlli_virt_addr) {
354 dma_pool_free(req_ctx->mlli_params.curr_pool,
355 req_ctx->mlli_params.mlli_virt_addr,
356 req_ctx->mlli_params.mlli_dma_addr);
360 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
361 dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
365 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
375 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
376 struct mlli_params *mlli_params = &req_ctx->mlli_params;
384 req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
391 req_ctx->gen_ctx.iv_dma_addr =
393 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
399 ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
401 req_ctx->gen_ctx.iv_dma_addr = 0;
405 rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents,
410 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
414 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
415 req_ctx->out_nents = 0;
416 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
418 &req_ctx->in_mlli_nents);
423 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
428 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
430 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
431 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
433 &req_ctx->in_mlli_nents);
434 cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
436 &req_ctx->out_mlli_nents);
440 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
448 cc_dma_buf_type(req_ctx->dma_buf_type));
453 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);