Searched refs:ring (Results 1 - 25 of 804) sorted by path

1234567891011>>

/linux-master/drivers/gpu/drm/radeon/
H A Duvd_v3_1.c33 * @ring: radeon_ring pointer
37 * Emit a semaphore command (either wait or signal) to the UVD ring.
40 struct radeon_ring *ring,
46 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
47 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
49 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
50 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
52 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
53 radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
39 uvd_v3_1_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) argument
/linux-master/drivers/gpu/drm/xen/
H A Dxen_drm_front_evtchnl.h17 #include <xen/interface/io/ring.h>
52 struct xen_displif_front_ring ring; member in struct:xen_drm_front_evtchnl::__anon3771::__anon3772
/linux-master/drivers/net/ethernet/apm/xgene-v2/
H A DMakefile6 xgene-enet-v2-objs := main.o mac.o enet.o ring.o mdio.o ethtool.o
H A Dring.c13 void xge_setup_desc(struct xge_desc_ring *ring) argument
21 raw_desc = &ring->raw_desc[i];
24 next_dma = ring->dma_addr + (offset * XGENE_ENET_DESC_SIZE);
36 struct xge_desc_ring *ring = pdata->tx_ring; local
37 dma_addr_t dma_addr = ring->dma_addr;
42 ring->head = 0;
43 ring->tail = 0;
48 struct xge_desc_ring *ring = pdata->rx_ring; local
49 dma_addr_t dma_addr = ring->dma_addr;
54 ring
[all...]
H A Dring.h72 /* software context of a descriptor ring */
101 void xge_setup_desc(struct xge_desc_ring *ring);
/linux-master/drivers/net/ethernet/apm/xgene/
H A Dxgene_enet_hw.h422 void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
H A Dxgene_enet_ring2.c12 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) argument
14 u32 *ring_cfg = ring->state;
15 u64 addr = ring->dma;
17 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
18 ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
27 ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize)
34 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) argument
36 u32 *ring_cfg = ring->state;
40 is_bufpool = xgene_enet_is_bufpool(ring->id);
47 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) argument
55 xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring, u32 offset, u32 data) argument
63 xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring) argument
75 xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring) argument
81 xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring) argument
95 xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) argument
118 xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring) argument
127 xgene_enet_setup_ring( struct xgene_enet_desc_ring *ring) argument
152 xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring) argument
158 xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count) argument
171 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) argument
182 xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) argument
[all...]
/linux-master/drivers/net/ethernet/neterion/
H A Ds2io-regs.h246 #define RX_MAT_SET(ring, msi) vBIT(msi, (8 * ring), 8)
559 /* Per-ring controller regs */
/linux-master/drivers/net/ethernet/pasemi/
H A Dpasemi_mac.h39 u64 *buffers; /* RX interface buffer ring */
64 int bufsz; /* RX ring buffer size */
101 #define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \
102 & ((ring)->size - 1))
103 #define RING_AVAIL(ring) ((ring->size) - RING_USED(ring))
/linux-master/drivers/net/wireless/ath/carl9170/
H A Ddebug.h124 struct carl9170_debug_mem_rbe ring[CARL9170_DEBUG_RING_SIZE]; member in struct:carl9170_debug
/linux-master/drivers/net/wireless/broadcom/b43/
H A Ddma.h199 struct b43_dmadesc_generic *(*idx2desc) (struct b43_dmaring * ring,
203 void (*fill_descriptor) (struct b43_dmaring * ring,
207 void (*poke_tx) (struct b43_dmaring * ring, int slot);
208 void (*tx_suspend) (struct b43_dmaring * ring);
209 void (*tx_resume) (struct b43_dmaring * ring);
210 int (*get_current_rxslot) (struct b43_dmaring * ring);
211 void (*set_current_rxslot) (struct b43_dmaring * ring, int slot);
229 /* Kernel virtual base address of the ring memory. */
235 * This is NULL for an RX ring.
238 /* (Unadjusted) DMA base bus-address of the ring memor
278 b43_dma_read(struct b43_dmaring *ring, u16 offset) argument
283 b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value) argument
[all...]
/linux-master/drivers/net/wireless/broadcom/b43legacy/
H A Ddma.h123 /* Kernel virtual base address of the ring memory. */
129 * This is NULL for an RX ring.
132 /* (Unadjusted) DMA base bus-address of the ring memory. */
134 /* Number of descriptor slots in the ring. */
138 /* Currently used slot in the ring. */
148 /* Boolean. Is this a TX ring? */
152 /* Boolean. Is this ring stopped at ieee80211 level? */
154 /* The QOS priority assigned to this ring. Only used for TX rings.
161 /* Last time we injected a ring overflow. */
168 u32 b43legacy_dma_read(struct b43legacy_dmaring *ring, argument
175 b43legacy_dma_write(struct b43legacy_dmaring *ring, u16 offset, u32 value) argument
219 b43legacy_dma_rx(struct b43legacy_dmaring *ring) argument
[all...]
/linux-master/drivers/scsi/ibmvscsi_tgt/
H A Dlibsrp.c25 struct srp_buf **ring)
42 iue->sbuf = ring[i];
61 struct srp_buf **ring; local
64 ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
65 if (!ring)
69 ring[i] = kzalloc(sizeof(*ring[i]), GFP_KERNEL);
70 if (!ring[i])
72 ring[i]->buf = dma_alloc_coherent(dev, size, &ring[
24 srp_iu_pool_alloc(struct srp_queue *q, size_t max, struct srp_buf **ring) argument
92 srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max, size_t size) argument
[all...]
/linux-master/include/linux/
H A Dskb_array.h26 struct ptr_ring ring; member in struct:skb_array
34 return __ptr_ring_full(&a->ring);
39 return ptr_ring_full(&a->ring);
44 return ptr_ring_produce(&a->ring, skb);
49 return ptr_ring_produce_irq(&a->ring, skb);
54 return ptr_ring_produce_bh(&a->ring, skb);
59 return ptr_ring_produce_any(&a->ring, skb);
68 return __ptr_ring_empty(&a->ring);
73 return __ptr_ring_peek(&a->ring);
78 return ptr_ring_empty(&a->ring);
[all...]
/linux-master/include/uapi/linux/genwqe/
H A Dgenwqe_card.h53 #define IO_EXTENDED_DIAG_MAP(ring) (0x00000500 | ((ring) << 3))
55 #define GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace) (((ring) << 8) | (trace))
/linux-master/net/dccp/ccids/lib/
H A Dloss_interval.h37 * @ring: Circular queue managed in LIFO manner
42 struct tfrc_loss_interval *ring[LIH_SIZE]; member in struct:tfrc_loss_hist
/linux-master/tools/testing/selftests/net/
H A Dpsock_tpacket.c66 struct ring { struct
71 void (*walk)(int sock, struct ring *ring);
220 static void walk_v1_v2_rx(int sock, struct ring *ring) argument
227 bug_on(ring->type != PACKET_RX_RING);
239 while (__v1_v2_rx_kernel_ready(ring->rd[frame_num].iov_base,
240 ring->version)) {
241 ppd.raw = ring->rd[frame_num].iov_base;
243 switch (ring
354 get_next_frame(struct ring *ring, int n) argument
369 walk_tx(int sock, struct ring *ring) argument
500 walk_v1_v2(int sock, struct ring *ring) argument
581 walk_v3_rx(int sock, struct ring *ring) argument
622 walk_v3(int sock, struct ring *ring) argument
630 __v1_v2_fill(struct ring *ring, unsigned int blocks) argument
646 __v3_fill(struct ring *ring, unsigned int blocks, int type) argument
667 setup_ring(int sock, struct ring *ring, int version, int type) argument
708 mmap_ring(int sock, struct ring *ring) argument
726 bind_ring(int sock, struct ring *ring) argument
746 walk_ring(int sock, struct ring *ring) argument
751 unmap_ring(int sock, struct ring *ring) argument
805 struct ring ring; local
[all...]
H A Dtxring_overwrite.c86 static int setup_tx(char **ring) argument
112 error(1, errno, "setsockopt ring");
114 *ring = mmap(0, req.tp_block_size * req.tp_block_nr,
116 if (*ring == MAP_FAILED)
161 char *ring; local
165 fdt = setup_tx(&ring);
167 send_pkt(fdt, ring, payload_patterns[0]);
168 send_pkt(fdt, ring, payload_patterns[1]);
/linux-master/tools/virtio/ringtest/
H A DMakefile4 all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring
11 ring.o: ring.c main.h
16 ring: ring.o main.o
24 -rm ring.o ring
H A Dring.c6 * Simple descriptor-based ring. virtio 0.9 compatible event index is used for
56 struct desc *ring; variable in typeref:struct:desc
76 /* implemented by ring */
82 ret = posix_memalign((void **)&ring, 0x1000, ring_size * sizeof *ring);
84 perror("Unable to allocate ring buffer.\n");
101 ring[i] = desc;
125 ring[head].addr = (unsigned long)(void*)buf;
126 ring[head].len = len;
133 index = ring[hea
[all...]
H A Dvirtio_ring_0_9.c22 struct vring ring; variable in typeref:struct:vring
24 /* enabling the below activates experimental ring polling code
26 * high bits of ring id ^ 0x8000).
30 * (which skips ring updates and reads and writes len in descriptor).
63 /* implemented by ring */
72 perror("Unable to allocate ring buffer.\n");
76 vring_init(&ring, ring_size, p, 0x1000);
86 ring.desc[i].next = i + 1;
117 desc = ring.desc;
136 ring
[all...]
/linux-master/arch/um/drivers/
H A Dvector_kern.c1439 struct ethtool_ringparam *ring,
1445 ring->rx_max_pending = vp->rx_queue->max_depth;
1446 ring->tx_max_pending = vp->tx_queue->max_depth;
1447 ring->rx_pending = vp->rx_queue->max_depth;
1448 ring->tx_pending = vp->tx_queue->max_depth;
1438 vector_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) argument
/linux-master/drivers/ata/
H A Dlibata-eh.c382 ent = &ering->ring[ering->cursor];
390 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
406 ent = &ering->ring[idx];
1706 * This function examines error ring of @dev and determines
/linux-master/drivers/block/xen-blkback/
H A Dblkback.c73 * to fill the ring, but since this might become too high, specially with
109 * Maximum order of pages to be used for the shared ring between front and
114 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
143 static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
144 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
147 static void make_response(struct xen_blkif_ring *ring, u64 id,
168 static int add_persistent_gnt(struct xen_blkif_ring *ring, argument
173 struct xen_blkif *blkif = ring->blkif;
175 if (ring->persistent_gnt_c >= max_pgrants) {
181 new = &ring
205 get_persistent_gnt(struct xen_blkif_ring *ring, grant_ref_t gref) argument
232 put_persistent_gnt(struct xen_blkif_ring *ring, struct persistent_gnt *persistent_gnt) argument
242 free_persistent_gnts(struct xen_blkif_ring *ring) argument
296 struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work); local
332 purge_persistent_gnt(struct xen_blkif_ring *ring) argument
414 alloc_req(struct xen_blkif_ring *ring) argument
433 free_req(struct xen_blkif_ring *ring, struct pending_req *req) argument
523 blkif_notify_work(struct xen_blkif_ring *ring) argument
539 print_stats(struct xen_blkif_ring *ring) argument
556 struct xen_blkif_ring *ring = arg; local
636 xen_blkbk_free_caches(struct xen_blkif_ring *ring) argument
645 xen_blkbk_unmap_prepare( struct xen_blkif_ring *ring, struct grant_page **pages, unsigned int num, struct gnttab_unmap_grant_ref *unmap_ops, struct page **unmap_pages) argument
674 struct xen_blkif_ring *ring = pending_req->ring; local
706 struct xen_blkif_ring *ring = req->ring; local
731 xen_blkbk_unmap(struct xen_blkif_ring *ring, struct grant_page *pages[], int num) argument
756 xen_blkbk_map(struct xen_blkif_ring *ring, struct grant_page *pages[], int num, bool ro) argument
919 struct xen_blkif_ring *ring = pending_req->ring; local
966 dispatch_discard_io(struct xen_blkif_ring *ring, struct blkif_request *req) argument
1010 dispatch_other_io(struct xen_blkif_ring *ring, struct blkif_request *req, struct pending_req *pending_req) argument
1020 xen_blk_drain_io(struct xen_blkif_ring *ring) argument
1187 __do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) argument
1271 do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) argument
1290 dispatch_rw_block_io(struct xen_blkif_ring *ring, struct blkif_request *req, struct pending_req *pending_req) argument
1489 make_response(struct xen_blkif_ring *ring, u64 id, unsigned short op, int st) argument
[all...]
H A Dcommon.h43 #include <xen/interface/io/ring.h>
70 /* Not a real protocol. Used to generate ring structs which contain
238 /* Number of requests that we can fit in a ring */
251 /* Per-ring information. */
263 /* One thread per blkif ring. */
345 struct xen_blkif_ring *ring; member in struct:pending_req
387 void xen_blkbk_free_caches(struct xen_blkif_ring *ring);

Completed in 578 milliseconds

1234567891011>>