Searched refs:mw (Results 1 - 25 of 39) sorted by relevance

12

/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_mw.c18 struct rxe_mw *mw = to_rmw(ibmw); local
25 ret = rxe_add_to_pool(&rxe->mw_pool, mw);
31 mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1);
32 mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
34 spin_lock_init(&mw->lock);
36 rxe_finalize(mw);
43 struct rxe_mw *mw = to_rmw(ibmw); local
45 rxe_cleanup(mw);
50 rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_mw *mw, struct rxe_mr *mr, int access) argument
135 rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_mw *mw, struct rxe_mr *mr, int access) argument
167 struct rxe_mw *mw; local
224 rxe_check_invalidate_mw(struct rxe_qp *qp, struct rxe_mw *mw) argument
236 rxe_do_invalidate_mw(struct rxe_mw *mw) argument
261 struct rxe_mw *mw; local
294 struct rxe_mw *mw; local
314 struct rxe_mw *mw = container_of(elem, typeof(*mw), elem); local
[all...]
H A Drxe.h58 #define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \
59 "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
79 #define rxe_err_mw(mw, fmt, ...) ibdev_err_ratelimited((mw)->ibmw.device, \
80 "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
100 #define rxe_info_mw(mw, fmt, ...) ibdev_info_ratelimited((mw)
[all...]
H A Drxe_verbs.h453 static inline struct rxe_mw *to_rmw(struct ib_mw *mw) argument
455 return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
468 static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw) argument
470 return to_rpd(mw->ibmw.pd);
H A Drxe_resp.c431 struct rxe_mw *mw = NULL; local
483 mw = rxe_lookup_mw(qp, access, rkey);
484 if (!mw) {
490 mr = mw->mr;
497 if (mw->access & IB_ZERO_BASED)
498 qp->resp.offset = mw->addr;
501 rxe_put(mw);
502 mw = NULL;
556 if (mw)
557 rxe_put(mw);
830 struct rxe_mw *mw; local
[all...]
/linux-master/arch/mips/txx9/generic/
H A Dmem_tx4927.c46 unsigned int mw = 0; local
61 mw = 8 >> sdccr_mw;
64 return rs * cs * mw * bs;
/linux-master/net/netfilter/ipvs/
H A Dip_vs_wrr.c28 * - mw: maximum weight
31 * As result, all weights are in the [di..mw] range with a step=di.
33 * First, we start with cw = mw and select dests with weight >= cw.
35 * Last pass should be with cw = di. We have mw/di passes in total:
47 * So, we modify how mw is calculated, now it is reduced with (di - 1),
63 int mw; /* maximum weight */ member in struct:ip_vs_wrr_mark
119 mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1);
120 mark->cw = mark->mw;
146 mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1);
147 if (mark->cw > mark->mw || !mar
[all...]
H A Dip_vs_mh.c347 int mw, shift; local
365 mw = weight / gcd;
368 shift = fls(mw) - IP_VS_MH_TAB_BITS;
/linux-master/drivers/ntb/
H A Dntb_transport.c617 struct ntb_transport_mw *mw; local
629 mw = &nt->mw_vec[mw_num];
631 if (!mw->virt_addr)
639 rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
640 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
793 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; local
796 if (!mw->virt_addr)
800 dma_free_coherent(&pdev->dev, mw->alloc_size,
801 mw->alloc_addr, mw
809 ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, struct device *dma_dev, size_t align) argument
856 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; local
1251 struct ntb_transport_mw *mw; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A Dmr.c815 struct mlx4_mw *mw)
829 mw->key = hw_index_to_key(index);
830 mw->pd = pd;
831 mw->type = type;
832 mw->enabled = MLX4_MPT_DISABLED;
838 int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) argument
844 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
858 mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key));
859 mpt_entry->pd_flags = cpu_to_be32(mw->pd);
860 if (mw
814 mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, struct mlx4_mw *mw) argument
888 mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw) argument
[all...]
/linux-master/fs/ocfs2/
H A Ddlmglue.c434 struct ocfs2_mask_waiter *mw, int ret)
447 kt = ktime_sub(ktime_get(), mw->mw_lock_start);
474 struct ocfs2_mask_waiter *mw; local
481 mw = list_first_entry(&lockres->l_mask_waiters,
484 ktime_to_us(ktime_mono_to_real(mw->mw_lock_start));
487 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) argument
489 mw->mw_lock_start = ktime_get();
496 int level, struct ocfs2_mask_waiter *mw, int ret)
505 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) argument
891 struct ocfs2_mask_waiter *mw, *tm local
433 ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level, struct ocfs2_mask_waiter *mw, int ret) argument
495 ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level, struct ocfs2_mask_waiter *mw, int ret) argument
1389 ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw) argument
1396 ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw) argument
1404 lockres_add_mask_waiter(struct ocfs2_lock_res *lockres, struct ocfs2_mask_waiter *mw, unsigned long mask, unsigned long goal) argument
1421 __lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres, struct ocfs2_mask_waiter *mw) argument
1439 lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres, struct ocfs2_mask_waiter *mw) argument
1453 ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw, struct ocfs2_lock_res *lockres) argument
1476 struct ocfs2_mask_waiter mw; local
1911 struct ocfs2_mask_waiter mw; local
1981 struct ocfs2_mask_waiter mw; local
2077 struct ocfs2_mask_waiter mw; local
3511 struct ocfs2_mask_waiter mw; local
[all...]
/linux-master/drivers/clk/rockchip/
H A Dclk.h550 #define COMPOSITE(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\
561 .mux_width = mw, \
571 #define COMPOSITE_DIV_OFFSET(_id, cname, pnames, f, mo, ms, mw, \
582 .mux_width = mw, \
630 #define COMPOSITE_NODIV(_id, cname, pnames, f, mo, ms, mw, mf, \
641 .mux_width = mw, \
648 #define COMPOSITE_NOGATE(_id, cname, pnames, f, mo, ms, mw, mf, \
659 .mux_width = mw, \
668 mw, mf, ds, dw, df, dt) \
678 .mux_width = mw, \
[all...]
/linux-master/drivers/infiniband/hw/hns/
H A Dhns_roce_mr.c480 struct hns_roce_mw *mw)
485 if (mw->enabled) {
487 key_to_hw_index(mw->rkey) &
493 key_to_hw_index(mw->rkey));
497 (int)key_to_hw_index(mw->rkey));
501 struct hns_roce_mw *mw)
506 unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
520 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
533 mw->enabled = 1;
553 struct hns_roce_mw *mw local
479 hns_roce_mw_free(struct hns_roce_dev *hr_dev, struct hns_roce_mw *mw) argument
500 hns_roce_mw_enable(struct hns_roce_dev *hr_dev, struct hns_roce_mw *mw) argument
587 struct hns_roce_mw *mw = to_hr_mw(ibmw); local
[all...]
/linux-master/include/linux/usb/
H A Dpd.h250 #define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT)
375 #define RDO_BATT_OP_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_OP_PWR_SHIFT)
376 #define RDO_BATT_MAX_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_MAX_PWR_SHIFT)
/linux-master/drivers/net/ethernet/marvell/octeontx2/af/
H A Drvu.c33 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
2129 struct mbox_wq_info *mw; local
2134 mw = &rvu->afpf_wq_info;
2137 mw = &rvu->afvf_wq_info;
2143 devid = mwork - mw->mbox_wrk;
2144 mbox = &mw->mbox;
2149 if (mw->mbox_wrk[devid].num_msgs == 0)
2154 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
2187 mw->mbox_wrk[devid].num_msgs = 0;
2219 struct mbox_wq_info *mw; local
2355 rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, int type, int num, void (mbox_handler)(struct work_struct *), void (mbox_up_handler)(struct work_struct *)) argument
2474 rvu_mbox_destroy(struct mbox_wq_info *mw) argument
2495 rvu_queue_work(struct mbox_wq_info *mw, int first, int mdevs, u64 intr) argument
[all...]
/linux-master/drivers/infiniband/hw/mlx4/
H A Dmr.c616 struct mlx4_ib_mw *mw = to_mmw(ibmw); local
620 to_mlx4_type(ibmw->type), &mw->mmw);
624 err = mlx4_mw_enable(dev->dev, &mw->mmw);
628 ibmw->rkey = mw->mmw.key;
632 mlx4_mw_free(dev->dev, &mw->mmw);
638 struct mlx4_ib_mw *mw = to_mmw(ibmw); local
640 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
/linux-master/drivers/infiniband/hw/bnxt_re/
H A Dib_verbs.h53 struct ib_mw *mw; member in struct:bnxt_re_fence_data
238 int bnxt_re_dealloc_mw(struct ib_mw *mw);
H A Dib_verbs.c424 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
465 if (fence->mw) {
466 bnxt_re_dealloc_mw(fence->mw);
467 fence->mw = NULL;
493 struct ib_mw *mw; local
540 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
541 if (IS_ERR(mw)) {
544 rc = PTR_ERR(mw);
547 fence->mw = mw;
3989 struct bnxt_re_mw *mw; local
4021 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw); local
[all...]
/linux-master/drivers/mtd/maps/
H A Dphysmap-core.c147 map_word mw; local
155 mw.x[0] = word;
156 return mw;
181 static void physmap_addr_gpios_write(struct map_info *map, map_word mw, argument
192 word = mw.x[0];
/linux-master/drivers/pci/endpoint/functions/
H A Dpci-epf-ntb.c180 * @mw: Index of the memory window (either 0, 1, 2 or 3)
236 enum pci_epc_interface_type type, u32 mw)
252 peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET];
259 if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
262 if (size > ntb->mws_size[mw]) {
265 pci_epc_interface_string(type), mw, size,
266 ntb->mws_size[mw]);
278 pci_epc_interface_string(type), mw);
289 * @mw: Index of the memory window (either 0, 1, 2 or 3)
295 enum pci_epc_interface_type type, u32 mw)
235 epf_ntb_configure_mw(struct epf_ntb *ntb, enum pci_epc_interface_type type, u32 mw) argument
294 epf_ntb_teardown_mw(struct epf_ntb *ntb, enum pci_epc_interface_type type, u32 mw) argument
[all...]
/linux-master/include/uapi/rdma/
H A Drdma_user_rxe.h120 } mw; member in union:rxe_send_wr::__anon1274
/linux-master/scripts/dtc/include-prefixes/dt-bindings/usb/
H A Dpd.h44 #define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT)
/linux-master/include/dt-bindings/usb/
H A Dpd.h44 #define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT)
/linux-master/drivers/infiniband/core/
H A Duverbs.h245 int uverbs_dealloc_mw(struct ib_mw *mw);
/linux-master/drivers/media/platform/nxp/dw100/
H A Ddw100.c382 u32 sw, sh, mw, mh, idx; local
389 mw = ctrl->dims[0];
394 qdx = qsw / (mw - 1);
397 ctx->map_width = mw;
399 ctx->map_size = mh * mw * sizeof(u32);
402 qy = min_t(u32, (idx / mw) * qdy, qsh);
403 qx = min_t(u32, (idx % mw) * qdx, qsw);
/linux-master/drivers/infiniband/hw/mlx5/
H A Dmr.c2169 struct mlx5_ib_mw *mw = to_mmw(ibmw); local
2209 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
2213 mw->mmkey.type = MLX5_MKEY_MW;
2214 ibmw->rkey = mw->mmkey.key;
2215 mw->mmkey.ndescs = ndescs;
2226 err = mlx5r_store_odp_mkey(dev, &mw->mmkey);
2235 mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key);
2241 int mlx5_ib_dealloc_mw(struct ib_mw *mw) argument
2243 struct mlx5_ib_dev *dev = to_mdev(mw->device);
2244 struct mlx5_ib_mw *mmw = to_mmw(mw);
[all...]

Completed in 347 milliseconds

12