Lines Matching refs:sr

55 static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
57 struct nitrox_device *ndev = sr->ndev;
61 dma_unmap_sg(dev, sr->in.sg, sg_nents(sr->in.sg),
63 dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len,
65 kfree(sr->in.sgcomp);
66 sr->in.sg = NULL;
67 sr->in.sgmap_cnt = 0;
69 dma_unmap_sg(dev, sr->out.sg, sg_nents(sr->out.sg),
71 dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len,
73 kfree(sr->out.sgcomp);
74 sr->out.sg = NULL;
75 sr->out.sgmap_cnt = 0;
78 static void softreq_destroy(struct nitrox_softreq *sr)
80 softreq_unmap_sgbufs(sr);
81 kfree(sr);
86 * @sr: Request structure
107 static int create_sg_component(struct nitrox_softreq *sr,
110 struct nitrox_device *ndev = sr->ndev;
121 sgcomp = kzalloc(sz_comp, sr->gfp);
153 * @sr: Request structure
158 static int dma_map_inbufs(struct nitrox_softreq *sr,
161 struct device *dev = DEV(sr->ndev);
171 sr->in.total_bytes += sg_dma_len(sg);
173 sr->in.sg = req->src;
174 sr->in.sgmap_cnt = nents;
175 ret = create_sg_component(sr, &sr->in, sr->in.sgmap_cnt);
183 sr->in.sgmap_cnt = 0;
187 static int dma_map_outbufs(struct nitrox_softreq *sr,
190 struct device *dev = DEV(sr->ndev);
198 sr->out.sg = req->dst;
199 sr->out.sgmap_cnt = nents;
200 ret = create_sg_component(sr, &sr->out, sr->out.sgmap_cnt);
208 sr->out.sgmap_cnt = 0;
209 sr->out.sg = NULL;
213 static inline int softreq_map_iobuf(struct nitrox_softreq *sr,
218 ret = dma_map_inbufs(sr, creq);
222 ret = dma_map_outbufs(sr, creq);
224 softreq_unmap_sgbufs(sr);
229 static inline void backlog_list_add(struct nitrox_softreq *sr,
232 INIT_LIST_HEAD(&sr->backlog);
235 list_add_tail(&sr->backlog, &cmdq->backlog_head);
237 atomic_set(&sr->status, REQ_BACKLOG);
241 static inline void response_list_add(struct nitrox_softreq *sr,
244 INIT_LIST_HEAD(&sr->response);
247 list_add_tail(&sr->response, &cmdq->response_head);
251 static inline void response_list_del(struct nitrox_softreq *sr,
255 list_del(&sr->response);
281 * @sr: Request structure
287 static void post_se_instr(struct nitrox_softreq *sr,
290 struct nitrox_device *ndev = sr->ndev;
299 memcpy(ent, &sr->instr, cmdq->instr_size);
301 atomic_set(&sr->status, REQ_POSTED);
302 response_list_add(sr, cmdq);
303 sr->tstamp = jiffies;
321 struct nitrox_softreq *sr, *tmp;
329 list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
336 list_del(&sr->backlog);
342 post_se_instr(sr, cmdq);
349 static int nitrox_enqueue_request(struct nitrox_softreq *sr)
351 struct nitrox_cmdq *cmdq = sr->cmdq;
352 struct nitrox_device *ndev = sr->ndev;
358 if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
364 backlog_list_add(sr, cmdq);
367 post_se_instr(sr, cmdq);
386 struct nitrox_softreq *sr;
393 sr = kzalloc(sizeof(*sr), req->gfp);
394 if (!sr)
397 sr->ndev = ndev;
398 sr->flags = req->flags;
399 sr->gfp = req->gfp;
400 sr->callback = callback;
401 sr->cb_arg = cb_arg;
403 atomic_set(&sr->status, REQ_NOT_POSTED);
405 sr->resp.orh = req->orh;
406 sr->resp.completion = req->comp;
408 ret = softreq_map_iobuf(sr, req);
410 kfree(sr);
427 sr->cmdq = &ndev->pkt_inq[qno];
447 sr->instr.dptr0 = cpu_to_be64(sr->in.sgcomp_dma);
450 sr->instr.ih.value = 0;
451 sr->instr.ih.s.g = 1;
452 sr->instr.ih.s.gsz = sr->in.sgmap_cnt;
453 sr->instr.ih.s.ssz = sr->out.sgmap_cnt;
454 sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
455 sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
456 sr->instr.ih.bev = cpu_to_be64(sr->instr.ih.value);
459 sr->instr.irh.value[0] = 0;
460 sr->instr.irh.s.uddl = MIN_UDD_LEN;
462 sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8);
464 sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
465 sr->instr.irh.s.ctxc = req->ctrl.s.ctxc;
466 sr->instr.irh.s.arg = req->ctrl.s.arg;
467 sr->instr.irh.s.opcode = req->opcode;
468 sr->instr.irh.bev[0] = cpu_to_be64(sr->instr.irh.value[0]);
471 sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle);
474 sr->instr.slc.value[0] = 0;
475 sr->instr.slc.s.ssz = sr->out.sgmap_cnt;
476 sr->instr.slc.bev[0] = cpu_to_be64(sr->instr.slc.value[0]);
479 sr->instr.slc.s.rptr = cpu_to_be64(sr->out.sgcomp_dma);
486 sr->instr.fdata[0] = *((u64 *)&req->gph);
487 sr->instr.fdata[1] = 0;
489 ret = nitrox_enqueue_request(sr);
496 softreq_destroy(sr);
513 static bool sr_completed(struct nitrox_softreq *sr)
515 u64 orh = READ_ONCE(*sr->resp.orh);
521 while (READ_ONCE(*sr->resp.completion) == PENDING_SIG) {
540 struct nitrox_softreq *sr;
549 sr = get_first_response_entry(cmdq);
550 if (!sr)
553 if (atomic_read(&sr->status) != REQ_POSTED)
557 if (!sr_completed(sr)) {
559 if (!cmd_timeout(sr->tstamp, ndev->timeout))
563 READ_ONCE(*sr->resp.orh));
570 response_list_del(sr, cmdq);
572 err = READ_ONCE(*sr->resp.orh) & 0xff;
573 callback = sr->callback;
574 cb_arg = sr->cb_arg;
575 softreq_destroy(sr);