/linux-master/drivers/infiniband/hw/mlx5/ |
H A D | wr.h | 94 int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq); 97 int *size, void **cur_edge, int nreq, __be32 general_id, 101 u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode); 102 void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
|
H A D | wr.c | 29 int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) argument 35 if (likely(cur + nreq < wq->max_post)) 43 return cur + nreq >= wq->max_post; 726 int *size, void **cur_edge, int nreq, __be32 general_id, 729 if (unlikely(mlx5r_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) 751 void **cur_edge, int nreq) 753 return mlx5r_begin_wqe(qp, seg, ctrl, idx, size, cur_edge, nreq, 760 u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode) 773 qp->sq.wqe_head[idx] = qp->sq.head + nreq; 815 void **cur_edge, unsigned int *idx, int nreq, 724 mlx5r_begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, unsigned int *idx, int *size, void **cur_edge, int nreq, __be32 general_id, bool send_signaled, bool solicited) argument 748 begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, const struct ib_send_wr *wr, unsigned int *idx, int *size, void **cur_edge, int nreq) argument 758 mlx5r_finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl, void *seg, u8 size, void *cur_edge, unsigned int idx, u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode) argument 812 handle_psv(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, struct ib_sig_domain *domain, u32 psv_index, u8 next_fence) argument 843 handle_reg_mr_integrity(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, u8 fence, u8 next_fence) argument 931 handle_qpt_rc(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, u8 fence, u8 next_fence, int *num_sge) argument 1025 mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq, struct mlx5_wqe_ctrl_seg *ctrl) argument 1066 int nreq; local 1216 int nreq; local [all...] |
H A D | srq.c | 412 int nreq; local 423 for (nreq = 0; wr; nreq++, wr = wr->next) { 455 if (likely(nreq)) { 456 srq->wqe_ctr += nreq;
|
/linux-master/crypto/ |
H A D | echainiv.c | 45 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); 47 skcipher_request_set_sync_tfm(nreq, ctx->sknull); 48 skcipher_request_set_callback(nreq, req->base.flags, 50 skcipher_request_set_crypt(nreq, req->src, req->dst, 54 err = crypto_skcipher_encrypt(nreq);
|
H A D | seqiv.c | 68 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); 70 skcipher_request_set_sync_tfm(nreq, ctx->sknull); 71 skcipher_request_set_callback(nreq, req->base.flags, 73 skcipher_request_set_crypt(nreq, req->src, req->dst, 77 err = crypto_skcipher_encrypt(nreq);
|
H A D | gcm.c | 962 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null); 964 skcipher_request_set_sync_tfm(nreq, ctx->null); 965 skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL); 966 skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL); 968 return crypto_skcipher_encrypt(nreq);
|
/linux-master/net/ipv4/ |
H A D | inet_connection_sock.c | 917 struct request_sock *nreq; local 919 nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN); 920 if (!nreq) { 929 nreq_sk = req_to_sk(nreq); 944 nreq->rsk_listener = sk; 949 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener) 950 rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq); 952 return nreq; 1015 struct request_sock *nreq local 1357 struct request_sock *nreq; local 1412 struct request_sock *nreq; local [all...] |
/linux-master/drivers/infiniband/hw/mthca/ |
H A D | mthca_srq.c | 493 int nreq; local 502 for (nreq = 0; wr; wr = wr->next) { 543 ++nreq; 544 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { 545 nreq = 0; 561 if (likely(nreq)) { 568 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, 586 int nreq; local 592 for (nreq = 0; wr; ++nreq, w [all...] |
H A D | mthca_qp.c | 1568 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, argument 1575 if (likely(cur + nreq < wq->max)) 1583 return cur + nreq >= wq->max; 1634 int nreq; local 1638 * f0 and size0 are only used if nreq != 0, and they will 1640 * before nreq is incremented. So nreq cannot become non-zero 1655 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1656 if (mthca_wq_overflow(&qp->sq, nreq, q 1830 int nreq; local 1938 int nreq; local 2169 int nreq; local [all...] |
/linux-master/drivers/usb/gadget/function/ |
H A D | uvc_queue.c | 48 unsigned int nreq; local 64 nreq = DIV_ROUND_UP(DIV_ROUND_UP(sizes[0], 2), req_size); 65 nreq = clamp(nreq, 4U, 64U); 66 video->uvc_num_requests = nreq;
|
/linux-master/drivers/infiniband/hw/mlx4/ |
H A D | srq.c | 315 int nreq; local 326 for (nreq = 0; wr; ++nreq, wr = wr->next) { 358 if (likely(nreq)) { 359 srq->wqe_ctr += nreq;
|
H A D | qp.c | 3278 static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) argument 3284 if (likely(cur + nreq < wq->max_post)) 3292 return cur + nreq >= wq->max_post; 3530 int nreq; local 3566 nreq = 0; 3572 for (nreq = 0; wr; ++nreq, wr = wr->next) { 3576 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 3589 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; 3815 if (likely(nreq)) { 3850 int nreq; local [all...] |
/linux-master/fs/nfs/ |
H A D | pnfs_nfs.c | 459 unsigned int nreq = 0; local 471 nreq++; 475 return nreq; 480 return nreq; 515 unsigned int nreq = 0; local 526 nreq++; 529 nreq += pnfs_alloc_ds_commits_list(&list, fl_cinfo, cinfo); 530 if (nreq == 0)
|
/linux-master/drivers/crypto/inside-secure/ |
H A D | safexcel.c | 824 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; local 864 nreq++; 875 if (!nreq) 880 priv->ring[ring].requests += nreq; 1016 int ret, i, nreq, ndesc, tot_descs, handled = 0; local 1022 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); 1023 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; 1024 nreq &= EIP197_xDR_PROC_xD_PKT_MASK; 1025 if (!nreq) 1028 for (i = 0; i < nreq; [all...] |
/linux-master/drivers/dma/ |
H A D | bcm-sba-raid.c | 289 struct sba_request *nreq; local 295 list_for_each_entry(nreq, &req->next, next) 296 _sba_free_request(sba, nreq); 412 struct sba_request *nreq, *first = req->first; local 434 list_for_each_entry(nreq, &first->next, next) 435 _sba_free_request(sba, nreq); 520 struct sba_request *req, *nreq; local 532 list_for_each_entry(nreq, &req->next, next) 533 _sba_pending_request(sba, nreq);
|
/linux-master/fs/nilfs2/ |
H A D | btree.c | 1741 union nilfs_bmap_ptr_req *nreq, 1768 if (nreq != NULL) { 1769 nreq->bpr_ptr = dreq->bpr_ptr + 1; 1770 ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat); 1774 ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh); 1787 nilfs_bmap_abort_alloc_ptr(btree, nreq, dat); 1801 union nilfs_bmap_ptr_req *nreq, 1819 if (nreq != NULL) { 1821 nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); 1837 tmpptr = nreq 1739 nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key, union nilfs_bmap_ptr_req *dreq, union nilfs_bmap_ptr_req *nreq, struct buffer_head **bhp, struct nilfs_bmap_stats *stats) argument 1796 nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr, const __u64 *keys, const __u64 *ptrs, int n, union nilfs_bmap_ptr_req *dreq, union nilfs_bmap_ptr_req *nreq, struct buffer_head *bh) argument 1873 union nilfs_bmap_ptr_req dreq, nreq, *di, *ni; local [all...] |
/linux-master/drivers/crypto/intel/qat/qat_common/ |
H A D | qat_algs.c | 1070 struct skcipher_request *nreq = skcipher_request_ctx(req); local 1076 memcpy(nreq, req, sizeof(*req)); 1077 skcipher_request_set_tfm(nreq, ctx->ftfm); 1078 return crypto_skcipher_encrypt(nreq); 1138 struct skcipher_request *nreq = skcipher_request_ctx(req); local 1144 memcpy(nreq, req, sizeof(*req)); 1145 skcipher_request_set_tfm(nreq, ctx->ftfm); 1146 return crypto_skcipher_decrypt(nreq);
|
/linux-master/drivers/crypto/rockchip/ |
H A D | rk3288_crypto.h | 223 unsigned long nreq; member in struct:rk_crypto_info
|
H A D | rk3288_crypto_ahash.c | 282 rkc->nreq++;
|
H A D | rk3288_crypto.c | 203 dd->nreq);
|
H A D | rk3288_crypto_skcipher.c | 322 rkc->nreq++;
|
/linux-master/drivers/infiniband/hw/hns/ |
H A D | hns_roce_hw_v2.c | 700 u32 nreq; local 708 nreq = 0; 714 for (nreq = 0; wr; ++nreq, wr = wr->next) { 715 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 721 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); 734 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); 749 if (likely(nreq)) { 750 qp->sq.head += nreq; 753 if (nreq 820 u32 wqe_idx, nreq, max_sge; local 966 u32 nreq; local [all...] |
H A D | hns_roce_qp.c | 1508 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq, argument 1515 if (likely(cur + nreq < hr_wq->wqe_cnt)) 1523 return cur + nreq >= hr_wq->wqe_cnt;
|
/linux-master/drivers/net/ethernet/mellanox/mlx4/ |
H A D | main.c | 2968 int nreq = min3(dev->caps.num_ports * local 2974 nreq = min_t(int, nreq, msi_x); 2976 entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); 2980 for (i = 0; i < nreq; ++i) 2983 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, 2984 nreq); 2986 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { 2991 dev->caps.num_comp_vectors = nreq [all...] |
/linux-master/drivers/nvme/host/ |
H A D | apple.c | 789 struct nvme_request *nreq = nvme_req(req); local 792 nreq->ctrl = &anv->ctrl; 793 nreq->cmd = &iod->cmd;
|