Lines Matching refs:inbox

692 			      struct mlx4_cmd_mailbox *inbox)
694 u8 sched = *(u8 *)(inbox->buf + 64);
695 u8 orig_index = *(u8 *)(inbox->buf + 35);
703 *(u8 *)(inbox->buf + 35) = new_index;
706 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
709 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
710 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
750 struct mlx4_cmd_mailbox *inbox,
753 struct mlx4_qp_context *qpc = inbox->buf + 8;
779 *(__be32 *)inbox->buf =
780 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
2296 struct mlx4_cmd_mailbox *inbox,
2610 struct mlx4_cmd_mailbox *inbox,
2757 struct mlx4_cmd_mailbox *inbox,
2765 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2777 if (!mr_is_region(inbox->buf)) {
2783 pd = mr_get_pd(inbox->buf);
2790 if (mr_is_fmr(inbox->buf)) {
2792 if (mr_is_bind_enabled(inbox->buf)) {
2797 if (!mr_is_region(inbox->buf)) {
2803 phys = mr_phys_mpt(inbox->buf);
2810 mr_get_mtt_size(inbox->buf), mtt);
2817 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2840 struct mlx4_cmd_mailbox *inbox,
2854 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2872 struct mlx4_cmd_mailbox *inbox,
2908 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2950 struct mlx4_cmd_mailbox *inbox);
2954 struct mlx4_cmd_mailbox *inbox,
2962 struct mlx4_qp_context *qpc = inbox->buf + 8;
2974 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3017 update_pkey_index(dev, slave, inbox);
3018 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3095 struct mlx4_cmd_mailbox *inbox,
3102 struct mlx4_eq_context *eqc = inbox->buf;
3123 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3144 struct mlx4_cmd_mailbox *inbox,
3154 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3185 struct mlx4_cmd_mailbox *inbox,
3195 qp_ctx = inbox->buf + 8;
3197 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
3265 struct mlx4_cmd_mailbox *inbox,
3270 __be64 *page_list = inbox->buf;
3284 * - Translate inbox contents to simple addresses in host endianness */
3303 struct mlx4_cmd_mailbox *inbox,
3320 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3409 struct mlx4_cmd_mailbox *inbox,
3427 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3436 struct mlx4_cmd_mailbox *inbox,
3442 struct mlx4_cq_context *cqc = inbox->buf;
3456 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3474 struct mlx4_cmd_mailbox *inbox,
3485 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3499 struct mlx4_cmd_mailbox *inbox,
3514 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3523 struct mlx4_cmd_mailbox *inbox,
3531 struct mlx4_cq_context *cqc = inbox->buf;
3550 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3571 struct mlx4_cmd_mailbox *inbox,
3587 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3591 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3612 struct mlx4_cmd_mailbox *inbox,
3620 struct mlx4_srq_context *srqc = inbox->buf;
3637 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3657 struct mlx4_cmd_mailbox *inbox,
3668 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3686 struct mlx4_cmd_mailbox *inbox,
3701 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3709 struct mlx4_cmd_mailbox *inbox,
3726 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3734 struct mlx4_cmd_mailbox *inbox,
3750 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3758 struct mlx4_cmd_mailbox *inbox,
3762 struct mlx4_qp_context *context = inbox->buf + 8;
3764 update_pkey_index(dev, slave, inbox);
3765 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3770 struct mlx4_cmd_mailbox *inbox)
3772 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3803 struct mlx4_cmd_mailbox *inbox)
3808 u8 sched = *(u8 *)(inbox->buf + 64);
3822 struct mlx4_cmd_mailbox *inbox,
3827 struct mlx4_qp_context *qpc = inbox->buf + 8;
3837 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3840 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3844 if (roce_verify_mac(dev, slave, qpc, inbox))
3847 update_pkey_index(dev, slave, inbox);
3848 update_gid(dev, inbox, (u8)slave);
3860 err = update_vport_qp_param(dev, inbox, slave, qpn);
3864 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3884 struct mlx4_cmd_mailbox *inbox,
3889 struct mlx4_qp_context *context = inbox->buf + 8;
3891 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3894 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3898 update_pkey_index(dev, slave, inbox);
3899 update_gid(dev, inbox, (u8)slave);
3901 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3906 struct mlx4_cmd_mailbox *inbox,
3911 struct mlx4_qp_context *context = inbox->buf + 8;
3913 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3916 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3920 update_pkey_index(dev, slave, inbox);
3921 update_gid(dev, inbox, (u8)slave);
3923 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3929 struct mlx4_cmd_mailbox *inbox,
3933 struct mlx4_qp_context *context = inbox->buf + 8;
3934 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3938 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3943 struct mlx4_cmd_mailbox *inbox,
3948 struct mlx4_qp_context *context = inbox->buf + 8;
3950 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3953 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3958 update_gid(dev, inbox, (u8)slave);
3959 update_pkey_index(dev, slave, inbox);
3960 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3965 struct mlx4_cmd_mailbox *inbox,
3970 struct mlx4_qp_context *context = inbox->buf + 8;
3972 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3975 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3980 update_gid(dev, inbox, (u8)slave);
3981 update_pkey_index(dev, slave, inbox);
3982 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3987 struct mlx4_cmd_mailbox *inbox,
3998 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4140 struct mlx4_cmd_mailbox *inbox,
4145 u8 *gid = inbox->buf;
4226 struct mlx4_cmd_mailbox *inbox,
4238 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4242 /* Clear a space in the inbox for eth header */
4286 struct mlx4_cmd_mailbox *inbox,
4299 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4336 err = mlx4_cmd(dev, inbox->dma,
4368 struct mlx4_cmd_mailbox *inbox,
4389 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4419 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4432 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4450 mbox_size = qp_attach_mbox_size(inbox->buf);
4458 memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4502 struct mlx4_cmd_mailbox *inbox,
4566 struct mlx4_cmd_mailbox *inbox,
4577 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);