Lines Matching refs:inbox

181 	u32 *inbox;
192 inbox = mailbox->buf;
194 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
195 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
327 struct mlx4_cmd_mailbox *inbox,
1274 struct mlx4_cmd_mailbox *inbox,
1408 struct mlx4_cmd_mailbox *inbox,
1745 struct mlx4_cmd_mailbox *inbox,
1836 __be32 *inbox;
1899 inbox = mailbox->buf;
1907 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1909 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
1914 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
1918 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1922 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1926 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1930 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13);
1934 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
1943 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
1956 MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 |
1965 *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
1968 u8 *dst = (u8 *)(inbox + INIT_HCA_DRIVER_VERSION_OFFSET / 4);
1976 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
1977 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
1978 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
1979 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
1980 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
1981 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
1982 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
1983 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
1984 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
1985 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
1986 MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET);
1987 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
1988 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1993 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1997 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1998 MLX4_PUT(inbox, param->log_mc_entry_sz,
2000 MLX4_PUT(inbox, param->log_mc_table_sz,
2007 MLX4_PUT(inbox,
2010 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
2015 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
2017 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
2022 MLX4_PUT(inbox,
2027 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
2028 MLX4_PUT(inbox, param->log_mc_entry_sz,
2030 MLX4_PUT(inbox, param->log_mc_hash_sz,
2032 MLX4_PUT(inbox, param->log_mc_table_sz,
2035 MLX4_PUT(inbox, (u8) (1 << 3),
2041 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
2042 MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET);
2043 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
2044 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
2045 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
2049 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
2050 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
2055 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
2255 struct mlx4_cmd_mailbox *inbox,
2299 u32 *inbox;
2322 inbox = mailbox->buf;
2327 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
2330 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
2332 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
2334 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
2353 struct mlx4_cmd_mailbox *inbox,
2919 struct mlx4_cmd_mailbox *inbox, *outbox;
2923 inbox = mlx4_alloc_cmd_mailbox(dev);
2924 if (IS_ERR(inbox))
2925 return PTR_ERR(inbox);
2929 mlx4_free_cmd_mailbox(dev, inbox);
2933 inbuf = inbox->buf;
2947 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0,
2963 mlx4_free_cmd_mailbox(dev, inbox);
2995 struct mlx4_cmd_mailbox *inbox,
2999 struct mlx4_access_reg *inbuf = inbox->buf;
3016 return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier,