Searched refs:wr (Results 1 - 25 of 262) sorted by relevance

1234567891011

/linux-master/drivers/media/dvb-frontends/
H A Ddib3000mb.c148 wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4);
153 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_2K);
157 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_8K);
169 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_32);
173 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_16);
177 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_8);
181 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_4);
193 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_OFF);
200 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_ON);
209 wr(DIB3000MB_REG_QA
[all...]
/linux-master/tools/bpf/bpftool/
H A Djson_writer.c311 json_writer_t *wr = jsonw_new(stdout); local
313 jsonw_start_object(wr);
314 jsonw_pretty(wr, true);
315 jsonw_name(wr, "Vyatta");
316 jsonw_start_object(wr);
317 jsonw_string_field(wr, "url", "http://vyatta.com");
318 jsonw_uint_field(wr, "downloads", 2000000ul);
319 jsonw_float_field(wr, "stock", 8.16);
321 jsonw_name(wr, "ARGV");
322 jsonw_start_array(wr);
[all...]
/linux-master/tools/testing/selftests/bpf/
H A Djson_writer.c311 json_writer_t *wr = jsonw_new(stdout); local
313 jsonw_start_object(wr);
314 jsonw_pretty(wr, true);
315 jsonw_name(wr, "Vyatta");
316 jsonw_start_object(wr);
317 jsonw_string_field(wr, "url", "http://vyatta.com");
318 jsonw_uint_field(wr, "downloads", 2000000ul);
319 jsonw_float_field(wr, "stock", 8.16);
321 jsonw_name(wr, "ARGV");
322 jsonw_start_array(wr);
[all...]
/linux-master/lib/
H A Ddecompress_unlzma.c292 static inline size_t INIT get_pos(struct writer *wr) argument
295 wr->global_pos + wr->buffer_pos;
298 static inline uint8_t INIT peek_old_byte(struct writer *wr, argument
301 if (!wr->flush) {
303 while (offs > wr->header->dict_size)
304 offs -= wr->header->dict_size;
305 pos = wr->buffer_pos - offs;
306 return wr->buffer[pos];
308 uint32_t pos = wr
316 write_byte(struct writer *wr, uint8_t byte) argument
330 copy_byte(struct writer *wr, uint32_t offs) argument
335 copy_bytes(struct writer *wr, uint32_t rep0, int len) argument
347 process_bit0(struct writer *wr, struct rc *rc, struct cstate *cst, uint16_t *p, int pos_state, uint16_t *prob, int lc, uint32_t literal_pos_mask) argument
391 process_bit1(struct writer *wr, struct rc *rc, struct cstate *cst, uint16_t *p, int pos_state, uint16_t *prob) argument
553 struct writer wr; local
[all...]
/linux-master/tools/testing/selftests/breakpoints/
H A Dbreakpoint_test_arm64.c33 static void child(int size, int wr) argument
35 volatile uint8_t *addr = &var[32 + wr];
112 static bool run_test(int wr_size, int wp_size, int wr, int wp) argument
125 child(wr_size, wr);
204 int wr, wp, size; local
215 for (wr = 0; wr <= 32; wr = wr + size) {
216 for (wp = wr
[all...]
/linux-master/drivers/infiniband/hw/mlx5/
H A Dwr.h104 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
106 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
110 const struct ib_send_wr *wr,
113 return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
117 const struct ib_send_wr *wr,
120 return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
124 const struct ib_recv_wr *wr,
127 return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
131 const struct ib_recv_wr *wr,
134 return mlx5_ib_post_recv(ibqp, wr, bad_w
109 mlx5_ib_post_send_nodrain(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) argument
116 mlx5_ib_post_send_drain(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) argument
123 mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) argument
130 mlx5_ib_post_recv_drain(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) argument
[all...]
H A Dwr.c9 #include "wr.h"
54 static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp, argument
61 if (wr->send_flags & IB_SEND_IP_CSUM)
65 if (wr->opcode == IB_WR_LSO) {
66 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
103 const struct ib_send_wr *wr)
105 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
107 cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
108 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)
102 set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, const struct ib_send_wr *wr) argument
228 send_ieth(const struct ib_send_wr *wr) argument
260 set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, void **wqe, int *wqe_sz, void **cur_edge) argument
561 const struct ib_reg_wr *wr = reg_wr(send_wr); local
635 set_reg_wr(struct mlx5_ib_qp *qp, const struct ib_reg_wr *wr, void **seg, int *size, void **cur_edge, bool check_not_free) argument
748 begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, const struct ib_send_wr *wr, unsigned int *idx, int *size, void **cur_edge, int nreq) argument
787 handle_rdma_op(const struct ib_send_wr *wr, void **seg, int *size) argument
794 handle_local_inv(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int idx) argument
803 handle_reg_mr(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int idx) argument
812 handle_psv(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, struct ib_sig_domain *domain, u32 psv_index, u8 next_fence) argument
843 handle_reg_mr_integrity(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, u8 fence, u8 next_fence) argument
931 handle_qpt_rc(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, u8 fence, u8 next_fence, int *num_sge) argument
982 handle_qpt_uc(const struct ib_send_wr *wr, void **seg, int *size) argument
994 handle_qpt_hw_gsi(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, void **seg, int *size, void **cur_edge) argument
1004 handle_qpt_ud(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, void **seg, int *size, void **cur_edge) argument
1051 mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr, bool drain) argument
1206 mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr, bool drain) argument
[all...]
H A Dgsi.c51 struct mlx5_ib_gsi_wr *wr; local
56 wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr];
58 if (!wr->completed)
61 WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc));
62 wr->completed = false;
71 struct mlx5_ib_gsi_wr *wr = local
78 wr->completed = true;
79 wr_id = wr->wc.wr_id;
80 wr->wc = *wc;
81 wr
359 mlx5_ib_add_outstanding_wr(struct mlx5_ib_qp *mqp, struct ib_ud_wr *wr, struct ib_wc *wc) argument
391 mlx5_ib_gsi_silent_drop(struct mlx5_ib_qp *mqp, struct ib_ud_wr *wr) argument
411 get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr) argument
429 mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) argument
474 mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) argument
[all...]
/linux-master/fs/orangefs/
H A Dinode.c23 struct orangefs_write_range *wr = NULL; local
34 wr = (struct orangefs_write_range *)page_private(page);
35 WARN_ON(wr->pos >= len);
36 off = wr->pos;
37 if (off + wr->len > len)
40 wlen = wr->len;
57 len, wr, NULL, NULL);
92 struct orangefs_write_range *wrp, wr; local
116 wr.uid = ow->uid;
117 wr
155 struct orangefs_write_range *wr = folio->private; local
321 struct orangefs_write_range *wr; local
347 struct orangefs_write_range *wr; local
412 struct orangefs_write_range *wr = folio_get_private(folio); local
640 struct orangefs_write_range *wr; local
[all...]
/linux-master/drivers/infiniband/ulp/iser/
H A Diser_memory.c257 struct ib_reg_wr *wr = &tx_desc->reg_wr; local
268 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
280 memset(wr, 0, sizeof(*wr));
281 wr->wr.next = &tx_desc->send_wr;
282 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
283 wr
313 struct ib_reg_wr *wr = &tx_desc->reg_wr; local
[all...]
/linux-master/drivers/infiniband/sw/rdmavt/
H A Dqp.h20 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
22 int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
24 int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_mw.c83 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) {
117 if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) {
123 if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) ||
124 ((wqe->wr.wr.mw.addr + wqe->wr.wr
[all...]
H A Drxe_verbs.c488 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, argument
497 while (wr) {
498 err = post_one_recv(&srq->rq, wr);
501 wr = wr->next;
507 *bad_wr = wr;
662 /* send wr */
678 rxe_err_qp(qp, "bad wr opcode for qp type\n");
726 static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, argument
729 wr
914 rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) argument
1003 rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) argument
[all...]
/linux-master/drivers/infiniband/hw/qedr/
H A Dqedr_roce_cm.h49 int qedr_gsi_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
51 int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
/linux-master/include/trace/events/
H A Dib_mad.h24 TP_PROTO(struct ib_mad_send_wr_private *wr,
26 TP_ARGS(wr, qp_info),
55 __entry->dev_index = wr->mad_agent_priv->agent.device->index;
56 __entry->port_num = wr->mad_agent_priv->agent.port_num;
57 __entry->qp_num = wr->mad_agent_priv->qp_info->qp->qp_num;
58 __entry->agent_priv = wr->mad_agent_priv;
59 __entry->wrtid = wr->tid;
60 __entry->max_retries = wr->max_retries;
61 __entry->retries_left = wr->retries_left;
62 __entry->retry = wr
[all...]
/linux-master/include/linux/
H A Dhdlcdrv.h28 unsigned rd, wr; member in struct:hdlcdrv_hdlcbuffer
35 unsigned int wr; member in struct:hdlcdrv_bitbuffer
49 buf->buffer[buf->wr] = buf->shreg;
50 buf->wr = (buf->wr+1) % sizeof(buf->buffer);
58 buf->buffer[buf->wr] = bits & 0xff;
59 buf->wr = (buf->wr+1) % sizeof(buf->buffer);
60 buf->buffer[buf->wr] = (bits >> 8) & 0xff;
61 buf->wr
[all...]
/linux-master/arch/sparc/include/asm/
H A Dvisasm.h23 297: wr %g0, FPRS_FEF, %fprs; \
26 wr %g0, 0, %fprs;
43 297: wr %o5, FPRS_FEF, %fprs;
46 wr %o5, 0, %fprs;
58 " 298: wr %%g0, 0, %%fprs\n"
/linux-master/arch/mips/include/uapi/asm/
H A Ducontext.h28 * @wr: the most significant 64 bits of each MSA vector register
39 unsigned long long wr[32]; member in struct:msa_extcontext
/linux-master/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_qp.c647 const struct ib_reg_wr *wr)
649 struct pvrdma_user_mr *mr = to_vmr(wr->mr);
651 wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova;
652 wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma;
653 wqe_hdr->wr.fast_reg.page_shift = mr->page_shift;
654 wqe_hdr->wr.fast_reg.page_list_len = mr->npages;
655 wqe_hdr->wr.fast_reg.length = mr->ibmr.length;
656 wqe_hdr->wr.fast_reg.access_flags = wr->access;
657 wqe_hdr->wr
646 set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, const struct ib_reg_wr *wr) argument
671 pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) argument
873 pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) argument
[all...]
/linux-master/drivers/infiniband/core/
H A Drw.c81 reg->inv_wr.next = &reg->reg_wr.wr;
112 reg->reg_wr.wr.opcode = IB_WR_REG_MR;
152 prev->wr.wr.next = &reg->inv_wr;
154 prev->wr.wr.next = &reg->reg_wr.wr;
157 reg->reg_wr.wr.next = &reg->wr.wr;
[all...]
/linux-master/drivers/infiniband/ulp/rtrs/
H A Drtrs.c75 struct ib_recv_wr wr; local
87 wr = (struct ib_recv_wr) {
93 return ib_post_recv(con->qp, &wr, NULL);
99 struct ib_recv_wr wr; local
101 wr = (struct ib_recv_wr) {
105 return ib_post_recv(con->qp, &wr, NULL);
110 struct ib_send_wr *wr, struct ib_send_wr *tail)
117 next->next = wr;
119 head = wr;
123 wr
109 rtrs_post_send(struct ib_qp *qp, struct ib_send_wr *head, struct ib_send_wr *wr, struct ib_send_wr *tail) argument
132 struct ib_send_wr wr; local
161 struct ib_rdma_wr wr; local
192 struct ib_rdma_wr wr; local
[all...]
/linux-master/arch/sparc/kernel/
H A Dtrampoline_32.S47 wr %g1, 0x0, %psr ! traps off though
52 wr %g1, 0x0, %wim
56 wr %g3, 0x0, %tbr
71 wr %g1, PSR_ET, %psr ! traps on
101 wr %g1, 0x0, %psr ! traps off though
106 wr %g1, 0x0, %wim
111 wr %g1, 0x0, %tbr
132 wr %g1, PSR_ET, %psr ! traps on
160 wr %g1, 0x0, %psr ! traps off though
165 wr
[all...]
/linux-master/drivers/infiniband/sw/siw/
H A Dsiw_verbs.c675 static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr, argument
680 while (wr) {
683 switch (wr->opcode) {
713 sqe.id = wr->wr_id;
719 *bad_wr = wr;
722 wr = wr->next;
728 static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr, argument
734 while (wr) {
735 rqe.id = wr
756 siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) argument
996 siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) argument
1753 siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) argument
[all...]
/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c1501 const struct ib_ud_wr *wr,
1511 mthca_ah_grh_present(to_mah(wr->ah)), 0, 0, 0,
1514 err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header);
1525 switch (wr->wr.opcode) {
1533 sqp->ud_header.immediate_data = wr->wr.ex.imm_data;
1542 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
1547 ib_get_cached_pkey(&dev->ib_dev, qp->port, wr
1500 build_mlx_header(struct mthca_dev *dev, struct mthca_qp *qp, int ind, const struct ib_ud_wr *wr, struct mthca_mlx_seg *mlx, struct mthca_data_seg *data) argument
1594 set_atomic_seg(struct mthca_atomic_seg *aseg, const struct ib_atomic_wr *wr) argument
1607 set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, const struct ib_ud_wr *wr) argument
1617 set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, const struct ib_ud_wr *wr) argument
1625 mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) argument
1823 mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) argument
1928 mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) argument
2162 mthca_arbel_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) argument
[all...]
/linux-master/arch/mips/mm/
H A Dtlbex.c1996 struct work_registers wr = build_get_work_registers(p); local
1999 build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
2001 build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
2010 build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
2013 UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
2014 UASM_i_LW(p, wr.r2, 0, wr
2059 struct work_registers wr; local
2241 struct work_registers wr; local
2297 struct work_registers wr; local
[all...]

Completed in 252 milliseconds

1234567891011