Searched refs:rcd (Results 1 - 25 of 67) sorted by relevance

123

/linux-master/drivers/infiniband/hw/hfi1/
H A Dexp_rcv.c20 * hfi1_exp_tid_group_init - initialize rcd expected receive
21 * @rcd: the rcd
23 void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd) argument
25 hfi1_exp_tid_set_init(&rcd->tid_group_list);
26 hfi1_exp_tid_set_init(&rcd->tid_used_list);
27 hfi1_exp_tid_set_init(&rcd->tid_full_list);
32 * @rcd: the context to add the groupings to
34 int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd) argument
36 struct hfi1_devdata *dd = rcd
71 hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd) argument
[all...]
H A Daspm.h22 void __aspm_ctx_disable(struct hfi1_ctxtdata *rcd);
26 static inline void aspm_ctx_disable(struct hfi1_ctxtdata *rcd) argument
29 if (likely(!rcd->aspm_intr_supported))
32 __aspm_ctx_disable(rcd);
H A Daspm.c129 void __aspm_ctx_disable(struct hfi1_ctxtdata *rcd) argument
136 spin_lock_irqsave(&rcd->aspm_lock, flags);
138 if (!rcd->aspm_intr_enable)
141 prev = rcd->aspm_ts_last_intr;
143 rcd->aspm_ts_last_intr = now;
149 restart_timer = ktime_to_ns(ktime_sub(now, rcd->aspm_ts_timer_sched)) >
154 if (rcd->aspm_enabled && close_interrupts) {
155 aspm_disable_inc(rcd->dd);
156 rcd->aspm_enabled = false;
161 mod_timer(&rcd
172 struct hfi1_ctxtdata *rcd = from_timer(rcd, t, aspm_timer); local
187 struct hfi1_ctxtdata *rcd; local
209 struct hfi1_ctxtdata *rcd; local
230 aspm_ctx_init(struct hfi1_ctxtdata *rcd) argument
241 struct hfi1_ctxtdata *rcd; local
[all...]
H A Dinit.c88 struct hfi1_ctxtdata *rcd; local
94 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
105 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
111 if (rcd->ctxt == HFI1_CTRL_CTXT)
112 rcd->flags |= HFI1_CAP_DMA_RTAIL;
113 rcd->fast_handler = get_dma_rtail_setting(rcd) ?
117 hfi1_set_seq_cnt(rcd, 1);
119 rcd->sc = sc_alloc(dd, SC_ACK, rcd
162 hfi1_rcd_init(struct hfi1_ctxtdata *rcd) argument
175 struct hfi1_ctxtdata *rcd = local
193 hfi1_rcd_put(struct hfi1_ctxtdata *rcd) argument
210 hfi1_rcd_get(struct hfi1_ctxtdata *rcd) argument
225 allocate_rcd_index(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, u16 *index) argument
287 struct hfi1_ctxtdata *rcd = NULL; local
308 struct hfi1_ctxtdata *rcd; local
471 hfi1_free_ctxt(struct hfi1_ctxtdata *rcd) argument
680 struct hfi1_ctxtdata *rcd; local
702 struct hfi1_ctxtdata *rcd; local
848 struct hfi1_ctxtdata *rcd; local
1007 struct hfi1_ctxtdata *rcd; local
1085 hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) argument
1513 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; local
1758 hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) argument
1813 hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) argument
[all...]
H A Dmsix.h16 int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd);
22 int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd);
H A Ddriver.c156 static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf, argument
161 *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset;
162 return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) +
166 static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd, argument
171 return (void *)(rhf_addr - rcd->rhf_offset + offset);
174 static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd, argument
177 return (struct ib_header *)hfi1_get_header(rcd, rhf_addr);
181 *hfi1_get_16B_header(struct hfi1_ctxtdata *rcd, argument
184 return (struct hfi1_16b_header *)hfi1_get_header(rcd, rhf_addr);
207 static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struc argument
368 init_packet(struct hfi1_ctxtdata *rcd, struct hfi1_packet *packet) argument
493 struct hfi1_ctxtdata *rcd; member in struct:ps_mdata
504 struct hfi1_ctxtdata *rcd = packet->rcd; local
523 ps_done(struct ps_mdata *mdata, u64 rhf, struct hfi1_ctxtdata *rcd) argument
531 ps_skip(struct ps_mdata *mdata, u64 rhf, struct hfi1_ctxtdata *rcd) argument
544 update_ps_mdata(struct ps_mdata *mdata, struct hfi1_ctxtdata *rcd) argument
572 struct hfi1_ctxtdata *rcd = packet->rcd; local
638 struct hfi1_ctxtdata *rcd = packet->rcd; local
828 handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget) argument
852 handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread) argument
878 handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread) argument
907 set_all_fastpath(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) argument
933 struct hfi1_ctxtdata *rcd; local
1001 handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) argument
1107 handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget) argument
1177 struct hfi1_ctxtdata *rcd; local
1534 struct hfi1_ctxtdata *rcd = packet->rcd; local
1607 struct hfi1_ctxtdata *rcd = packet->rcd; local
1625 struct hfi1_ctxtdata *rcd = packet->rcd; local
1636 struct hfi1_ctxtdata *rcd = packet->rcd; local
1788 struct hfi1_ctxtdata *rcd = packet->rcd; local
1805 struct hfi1_ctxtdata *rcd = packet->rcd; local
1823 seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd) argument
[all...]
H A Dmsix.c126 static int msix_request_rcd_irq_common(struct hfi1_ctxtdata *rcd, argument
131 int nr = msix_request_irq(rcd->dd, rcd, handler, thread,
132 rcd->is_vnic ? IRQ_NETDEVCTXT : IRQ_RCVCTXT,
141 rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64;
142 rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64);
143 rcd->msix_intr = nr;
144 remap_intr(rcd
154 msix_request_rcd_irq(struct hfi1_ctxtdata *rcd) argument
170 msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd) argument
268 struct hfi1_ctxtdata *rcd = hfi1_rcd_get_by_index_safe(dd, i); local
341 struct hfi1_ctxtdata *rcd = hfi1_netdev_get_ctxt(dd, i); local
[all...]
H A Dintr.c202 void handle_user_interrupt(struct hfi1_ctxtdata *rcd) argument
204 struct hfi1_devdata *dd = rcd->dd;
208 if (bitmap_empty(rcd->in_use_ctxts, HFI1_MAX_SHARED_CTXTS))
211 if (test_and_clear_bit(HFI1_CTXT_WAITING_RCV, &rcd->event_flags)) {
212 wake_up_interruptible(&rcd->wait);
213 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_DIS, rcd);
215 &rcd->event_flags)) {
216 rcd->urgent++;
217 wake_up_interruptible(&rcd->wait);
H A Dtrace_rx.h27 TP_STRUCT__entry(DD_DEV_ENTRY(packet->rcd->dd)
36 TP_fast_assign(DD_DEV_ASSIGN(packet->rcd->dd);
38 __entry->ctxt = packet->rcd->ctxt;
59 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd),
60 TP_ARGS(dd, rcd),
67 __entry->ctxt = rcd->ctxt;
68 __entry->slow_path = hfi1_is_slowpath(rcd);
69 __entry->dma_rtail = get_dma_rtail_setting(rcd);
H A Dnetdev_rx.c205 rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd);
209 hfi1_rcd_get(rxq->rcd);
211 rxq->rcd->napi = &rxq->napi;
213 i, rxq->rcd->ctxt);
220 rc = msix_netdev_request_rcd_irq(rxq->rcd);
232 if (rxq->rcd) {
233 hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
234 hfi1_rcd_put(rxq->rcd);
235 rxq->rcd = NULL;
253 hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
[all...]
H A Drc.h25 static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd, argument
31 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
55 struct hfi1_ctxtdata *rcd);
H A Dexp_rcv.h147 * @rcd - the receive context
151 hfi1_tid_group_to_idx(struct hfi1_ctxtdata *rcd, struct tid_group *grp) argument
153 return grp - &rcd->groups[0];
158 * @rcd - the receive context
162 hfi1_idx_to_tid_group(struct hfi1_ctxtdata *rcd, u16 idx) argument
164 return &rcd->groups[idx];
167 int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd);
168 void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd);
169 void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd);
H A Dhfi.h161 typedef int (*intr_handler)(struct hfi1_ctxtdata *rcd, int data);
198 /* verbs rx_stats per rcd */
315 * @rcd: the receive context
320 static inline u32 rcvhdrq_size(struct hfi1_ctxtdata *rcd) argument
322 return PAGE_ALIGN(rcd->rcvhdrq_cnt *
323 rcd->rcvhdrqentsize * sizeof(u32));
337 struct hfi1_ctxtdata *rcd; member in struct:hfi1_packet
1127 spinlock_t uctxt_lock; /* protect rcd changes */
1309 struct hfi1_ctxtdata **rcd; member in struct:hfi1_devdata
1422 void handle_user_interrupt(struct hfi1_ctxtdata *rcd);
1463 hfi1_rcd_head(struct hfi1_ctxtdata *rcd) argument
1473 hfi1_set_rcd_head(struct hfi1_ctxtdata *rcd, u32 head) argument
1479 get_rhf_addr(struct hfi1_ctxtdata *rcd) argument
1485 get_dma_rtail_setting(struct hfi1_ctxtdata *rcd) argument
1509 hfi1_seq_cnt(struct hfi1_ctxtdata *rcd) argument
1520 hfi1_set_seq_cnt(struct hfi1_ctxtdata *rcd, u8 cnt) argument
1532 last_rcv_seq(struct hfi1_ctxtdata *rcd, u32 seq) argument
1544 hfi1_seq_incr(struct hfi1_ctxtdata *rcd, u32 seq) argument
1554 get_hdrqentsize(struct hfi1_ctxtdata *rcd) argument
1563 get_hdrq_cnt(struct hfi1_ctxtdata *rcd) argument
1572 hfi1_is_slowpath(struct hfi1_ctxtdata *rcd) argument
1581 hfi1_is_fastpath(struct hfi1_ctxtdata *rcd) argument
1593 hfi1_set_fast(struct hfi1_ctxtdata *rcd) argument
1936 rcd_to_iport(struct hfi1_ctxtdata *rcd) argument
2107 hfi1_rcvhdrtail_kvaddr(const struct hfi1_ctxtdata *rcd) argument
2112 clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd) argument
2120 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd) argument
2129 hfi1_packet_present(struct hfi1_ctxtdata *rcd) argument
[all...]
H A Dtid_rdma.c133 struct hfi1_ctxtdata *rcd,
197 p->qp = (RVT_KDETH_QP_PREFIX << 16) | priv->rcd->ctxt;
199 p->jkey = priv->rcd->jkey;
203 p->urg = is_urg_masked(priv->rcd);
298 int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit) argument
305 rcd->jkey = TID_RDMA_JKEY;
306 hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey);
307 return hfi1_alloc_ctxt_rcv_groups(rcd);
735 kern_set_hw_flow(struct hfi1_ctxtdata *rcd, u32 generation, u32 flow_idx) argument
780 hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) argument
820 hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) argument
848 hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd) argument
1204 kern_add_tid_node(struct tid_rdma_flow *flow, struct hfi1_ctxtdata *rcd, char *s, struct tid_group *grp, u8 cnt) argument
1239 struct hfi1_ctxtdata *rcd = flow->req->rcd; local
1303 struct hfi1_ctxtdata *rcd = flow->req->rcd; local
1370 struct hfi1_ctxtdata *rcd = flow->req->rcd; local
1399 struct hfi1_ctxtdata *rcd = flow->req->rcd; local
1466 struct hfi1_ctxtdata *rcd = req->rcd; variable in typeref:struct:hfi1_ctxtdata
1557 struct hfi1_ctxtdata *rcd = req->rcd; variable in typeref:struct:hfi1_ctxtdata
1990 struct hfi1_ctxtdata *rcd = ((struct hfi1_qp_priv *)qp->priv)->rcd; local
2230 struct hfi1_ctxtdata *rcd = packet->rcd; local
2454 struct hfi1_ctxtdata *rcd = packet->rcd; local
2629 restart_tid_rdma_read_req(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, struct rvt_swqe *wqe) argument
2845 hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, struct hfi1_packet *packet) argument
3468 struct hfi1_ctxtdata *rcd = qpriv->rcd; local
3659 struct hfi1_ctxtdata *rcd = packet->rcd; local
4043 struct hfi1_ctxtdata *rcd = packet->rcd; local
4273 struct hfi1_ctxtdata *rcd = priv->rcd; local
4874 struct hfi1_ctxtdata *rcd = qpriv->rcd; local
5516 update_r_next_psn_fecn(struct hfi1_packet *packet, struct hfi1_qp_priv *priv, struct hfi1_ctxtdata *rcd, struct tid_rdma_flow *flow, bool fecn) argument
[all...]
H A Dnetdev.h20 * @rcd: ptr to receive context data
25 struct hfi1_ctxtdata *rcd; member in struct:hfi1_netdev_rxq
73 return dd->netdev_rx->rxq[ctxt].rcd;
H A Dtrace_misc.h73 TP_STRUCT__entry(DD_DEV_ENTRY(packet->rcd->ppd->dd)
81 TP_fast_assign(DD_DEV_ASSIGN(packet->rcd->ppd->dd);
83 __entry->ctxt = packet->rcd->ctxt;
/linux-master/arch/x86/kernel/cpu/mce/
H A Dapei.c146 struct cper_mce_record rcd; local
148 memset(&rcd, 0, sizeof(rcd));
149 memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
150 rcd.hdr.revision = CPER_RECORD_REV;
151 rcd.hdr.signature_end = CPER_SIG_END;
152 rcd.hdr.section_count = 1;
153 rcd.hdr.error_severity = CPER_SEV_FATAL;
155 rcd.hdr.validation_bits = 0;
156 rcd
178 struct cper_mce_record rcd; local
[all...]
/linux-master/drivers/infiniband/hw/qib/
H A Dqib_file_ops.c101 struct qib_ctxtdata *rcd = ctxt_fp(fp); local
104 struct qib_devdata *dd = rcd->dd;
105 struct qib_pportdata *ppd = rcd->ppd;
110 subctxt_cnt = rcd->subctxt_cnt;
135 ret = dd->f_get_base_info(rcd, kinfo);
141 kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
147 rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
148 kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
150 rcd
284 qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp, const struct qib_tid_info *ti) argument
483 qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt, const struct qib_tid_info *ti) argument
572 qib_set_part_key(struct qib_ctxtdata *rcd, u16 key) argument
652 qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt, int start_stop) argument
681 qib_clean_part_key(struct qib_ctxtdata *rcd, struct qib_devdata *dd) argument
708 qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd, unsigned len, void *kvaddr, u32 write_ok, char *what) argument
781 mmap_piobufs(struct vm_area_struct *vma, struct qib_devdata *dd, struct qib_ctxtdata *rcd, unsigned piobufs, unsigned piocnt) argument
826 mmap_rcvegrbufs(struct vm_area_struct *vma, struct qib_ctxtdata *rcd) argument
892 mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, struct qib_ctxtdata *rcd, unsigned subctxt) argument
976 struct qib_ctxtdata *rcd; local
1080 qib_poll_urgent(struct qib_ctxtdata *rcd, struct file *fp, struct poll_table_struct *pt) argument
1102 qib_poll_next(struct qib_ctxtdata *rcd, struct file *fp, struct poll_table_struct *pt) argument
1125 struct qib_ctxtdata *rcd; local
1214 init_subctxts(struct qib_devdata *dd, struct qib_ctxtdata *rcd, const struct qib_user_info *uinfo) argument
1288 struct qib_ctxtdata *rcd; local
1489 struct qib_ctxtdata *rcd = dd->rcd[i]; local
1564 struct qib_ctxtdata *rcd = fd->rcd; local
1652 struct qib_ctxtdata *rcd = ctxt_fp(fp); local
1767 unlock_expected_tids(struct qib_ctxtdata *rcd) argument
1792 struct qib_ctxtdata *rcd; local
1882 struct qib_ctxtdata *rcd = ctxt_fp(fp); local
1940 disarm_req_delay(struct qib_ctxtdata *rcd) argument
1977 struct qib_ctxtdata *rcd; local
2015 qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt, unsigned long events) argument
2036 struct qib_ctxtdata *rcd; local
2244 struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp); local
[all...]
H A Dqib_init.c134 dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
135 if (!dd->rcd)
141 struct qib_ctxtdata *rcd; local
148 rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id);
149 if (!rcd) {
152 kfree(dd->rcd);
153 dd->rcd = NULL;
156 rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
157 rcd
169 struct qib_ctxtdata *rcd; local
487 struct qib_ctxtdata *rcd = dd->rcd[i]; local
637 struct qib_ctxtdata *rcd; local
905 qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd) argument
1362 struct qib_ctxtdata *rcd = tmp[ctxt]; local
1542 qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) argument
1615 qib_setup_eagerbufs(struct qib_ctxtdata *rcd) argument
[all...]
H A Dqib_tx.c80 int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd) argument
82 struct qib_devdata *dd = rcd->dd;
86 last = rcd->pio_base + rcd->piocnt;
92 if (rcd->user_event_mask) {
97 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
98 for (i = 1; i < rcd->subctxt_cnt; i++)
100 &rcd->user_event_mask[i]);
103 for (i = rcd->pio_base; i < last; i++) {
105 dd->f_sendctrl(rcd
131 struct qib_ctxtdata *rcd; local
379 qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start, unsigned len, u32 avail, struct qib_ctxtdata *rcd) argument
454 struct qib_ctxtdata *rcd; local
[all...]
H A Dqib_intr.c191 struct qib_ctxtdata *rcd; local
196 for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) {
199 rcd = dd->rcd[i];
200 if (!rcd || !rcd->cnt)
203 if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) {
204 wake_up_interruptible(&rcd->wait);
205 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS,
206 rcd
[all...]
H A Dqib_driver.c280 static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail) argument
282 const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift;
283 const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1);
285 return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift);
292 static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, argument
406 &rcd->qp_wait_list);
432 * @rcd: the qlogic_ib context
441 u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) argument
443 struct qib_devdata *dd = rcd
[all...]
H A Dqib_debugfs.c103 if (!dd->rcd[j])
105 n_packets += dd->rcd[j]->opstats->stats[i].n_packets;
106 n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes;
166 if (!dd->rcd[i])
169 for (j = 0; j < ARRAY_SIZE(dd->rcd[i]->opstats->stats); j++)
170 n_packets += dd->rcd[i]->opstats->stats[j].n_packets;
/linux-master/drivers/acpi/apei/
H A Derst.c1074 struct cper_pstore_record *rcd; local
1075 size_t rcd_len = sizeof(*rcd) + erst_info.bufsize;
1080 rcd = kmalloc(rcd_len, GFP_KERNEL);
1081 if (!rcd) {
1096 len = erst_read_record(record_id, &rcd->hdr, rcd_len, sizeof(*rcd),
1109 memcpy(record->buf, rcd->data, len - sizeof(*rcd));
1113 if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_DMESG_Z)) {
1116 } else if (guid_equal(&rcd
1136 struct cper_pstore_record *rcd = (struct cper_pstore_record *) local
[all...]
/linux-master/drivers/net/vmxnet3/
H A Dvmxnet3_xdp.h31 struct Vmxnet3_RxCompDesc *rcd,

Completed in 208 milliseconds

123