Searched refs:ring (Results 276 - 300 of 806) sorted by last modified time

<<11121314151617181920>>

/linux-master/drivers/gpu/drm/qxl/
H A Dqxl_cmd.c26 /* QXL cmd/ring handling */
37 struct ring { struct
43 struct ring *ring; member in struct:qxl_ring
51 void qxl_ring_free(struct qxl_ring *ring) argument
53 kfree(ring);
63 struct qxl_ring *ring; local
65 ring = kmalloc(sizeof(*ring), GFP_KERNEL);
66 if (!ring)
78 qxl_check_header(struct qxl_ring *ring) argument
92 qxl_check_idle(struct qxl_ring *ring) argument
104 qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible) argument
151 qxl_ring_pop(struct qxl_ring *ring, void *element) argument
[all...]
/linux-master/drivers/gpu/drm/i915/gt/
H A Dintel_engine_pm.c83 CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
86 ce->ring->emit);
187 * that the ring is empty and we avoid any waits while constructing
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_ring.c40 * Most engines on the GPU are fed via ring buffers. Ring
46 * pointers are equal, the ring is idle. When the host
47 * writes commands to the ring buffer, it increments the
55 * @type: ring type for which to return the limit.
73 * amdgpu_ring_alloc - allocate space on the ring buffer
75 * @ring: amdgpu_ring structure holding ring information
76 * @ndw: number of dwords to allocate in the ring buffer
78 * Allocate @ndw dwords in the ring buffer (all asics).
81 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigne argument
109 amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) argument
125 amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) argument
140 amdgpu_ring_commit(struct amdgpu_ring *ring) argument
164 amdgpu_ring_undo(struct amdgpu_ring *ring) argument
196 amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned int max_dw, struct amdgpu_irq_src *irq_src, unsigned int irq_type, unsigned int hw_prio, atomic_t *sched_score) argument
372 amdgpu_ring_fini(struct amdgpu_ring *ring) argument
416 amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask) argument
433 amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, struct dma_fence *fence) argument
475 struct amdgpu_ring *ring = file_inode(f)->i_private; local
525 struct amdgpu_ring *ring = file_inode(f)->i_private; local
589 struct amdgpu_ring *ring = data; local
600 amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) argument
636 amdgpu_ring_test_helper(struct amdgpu_ring *ring) argument
654 amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring, struct amdgpu_mqd_prop *prop) argument
686 amdgpu_ring_init_mqd(struct amdgpu_ring *ring) argument
704 amdgpu_ring_ib_begin(struct amdgpu_ring *ring) argument
710 amdgpu_ring_ib_end(struct amdgpu_ring *ring) argument
716 amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring) argument
722 amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring) argument
728 amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring) argument
734 amdgpu_ring_sched_ready(struct amdgpu_ring *ring) argument
[all...]
H A Damdgpu_umsch_mm.h126 struct amdgpu_ring ring; member in struct:amdgpu_umsch_mm
/linux-master/drivers/ata/
H A Dlibata-eh.c382 ent = &ering->ring[ering->cursor];
390 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
406 ent = &ering->ring[idx];
1706 * This function examines error ring of @dev and determines
/linux-master/drivers/net/wwan/t7xx/
H A Dt7xx_hif_cldma.c370 struct cldma_ring *ring, enum dma_data_direction tx_rx)
374 list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
377 ring->pkt_size, tx_rx);
419 static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) argument
425 INIT_LIST_HEAD(&ring->gpd_ring);
426 ring->length = MAX_RX_BUDGET;
428 for (i = 0; i < ring->length; i++) {
429 req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size);
431 t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE);
437 gpd->rx_data_allow_len = cpu_to_le16(ring
369 t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring, enum dma_data_direction tx_rx) argument
469 t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) argument
[all...]
/linux-master/drivers/net/ethernet/hisilicon/hns3/
H A Dhns3_ethtool.c184 static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, argument
187 struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector;
216 struct hns3_enet_ring *ring = &priv->ring[i]; local
220 rx_group = &ring->tqp_vector->rx_group;
224 hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data);
239 struct hns3_enet_ring *ring = &priv->ring[i]; local
241 hns3_clean_tx_ring(ring, 0);
589 struct hns3_enet_ring *ring; local
[all...]
/linux-master/kernel/bpf/
H A Dcpumap.c62 /* XDP can run multiple RX-ring queues, need __percpu enqueue store */
118 static void __cpu_map_ring_cleanup(struct ptr_ring *ring) argument
127 while ((ptr = ptr_ring_consume(ring))) {
480 * cpu_map_kthread_run() makes sure the pointer ring is empty
/linux-master/include/linux/
H A Dthunderbolt.h475 * @lock: Must be held during ring creation/destruction. Is acquired by
486 * @interrupt_work: Work scheduled to handle ring interrupt when no
507 * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
508 * @lock: Lock serializing actions to this ring. Must be acquired after
511 * @size: Size of the ring
512 * @hop: Hop (DMA channel) associated with this ring
513 * @head: Head of the ring (write next descriptor here)
514 * @tail: Tail of the ring (complete next descriptor here)
515 * @descriptors: Allocated descriptors for this ring
516 * @queue: Queue holding frames to be transferred over this ring
635 tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame) argument
656 tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame) argument
673 tb_ring_dma_device(struct tb_ring *ring) argument
[all...]
/linux-master/drivers/usb/host/
H A Dxhci.h193 /* bit 0 is the command ring cycle state */
194 /* stop ring operation after completion of the currently executing command */
196 /* stop ring immediately - abort the currently executing command */
198 /* true: command ring is running */
221 * @erst_dequeue: Event ring dequeue pointer.
223 * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
224 * Ring Segment Table (ERST) associated with it. The event ring is comprised of
225 * multiple segments of the same size. The HC places events on the ring and
249 * between interrupts will be longer if there are no events on the event ring.
268 /* Event Handler Busy (EHB) - is the event ring schedule
651 struct xhci_ring *ring; member in struct:xhci_virt_ep
[all...]
H A Dxhci.c41 static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) argument
43 struct xhci_segment *seg = ring->first_seg;
51 } while (seg && seg != ring->first_seg);
466 * device contexts (?), set up a command ring segment (or two?), create event
467 * ring (one for now).
542 * set command ring pointer and event ring pointer.
645 "// Disabling event ring interrupts");
760 /* step 2: initialize command ring buffer */
768 "// Setting command ring addres
784 struct xhci_ring *ring; local
[all...]
H A Dxhci-mem.c23 * Allocates a generic ring segment from the ring pool, sets the dma address,
126 * Link the ring to the new segments.
127 * Set Toggle Cycle for the new ring if needed.
129 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, argument
136 if (!ring || !first || !last)
141 (ring->type == TYPE_ISOC &&
144 next = ring->enq_seg->next;
145 xhci_link_segments(ring->enq_seg, first, ring
194 xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map, struct xhci_ring *ring, struct xhci_segment *seg, gfp_t mem_flags) argument
226 xhci_update_stream_segment_mapping( struct radix_tree_root *trb_address_map, struct xhci_ring *ring, struct xhci_segment *first_seg, struct xhci_segment *last_seg, gfp_t mem_flags) argument
266 xhci_remove_stream_mapping(struct xhci_ring *ring) argument
280 xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags) argument
287 xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) argument
303 xhci_initialize_ring_info(struct xhci_ring *ring, unsigned int cycle_state) argument
382 struct xhci_ring *ring; local
430 xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, unsigned int num_new_segs, gfp_t flags) argument
[all...]
H A Dxhci-hub.c478 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
541 } else if (ep->ring && ep->ring->dequeue) {
1109 * and ring the endpoint doorbells.
H A Dxhci-dbgcap.c41 static void dbc_ring_free(struct device *dev, struct xhci_ring *ring) argument
43 if (!ring)
46 if (ring->first_seg) {
48 ring->first_seg->trbs,
49 ring->first_seg->dma);
50 kfree(ring->first_seg);
52 kfree(ring);
238 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1, argument
243 trb = ring->enqueue;
249 trace_xhci_dbc_gadget_ep_queue(ring,
267 struct xhci_ring *ring = dep->ring; local
439 struct xhci_ring *ring; local
697 struct xhci_ring *ring; local
751 inc_evt_deq(struct xhci_ring *ring) argument
[all...]
H A Dxhci-dbgcap.h91 struct xhci_ring *ring; member in struct:dbc_ep
/linux-master/drivers/thunderbolt/
H A Dnhi.c28 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
53 static int ring_interrupt_index(const struct tb_ring *ring) argument
55 int bit = ring->hop;
56 if (!ring->is_tx)
57 bit += ring->nhi->hop_count;
61 static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring) argument
66 val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
73 nhi_clear_interrupt(struct tb_nhi *nhi, int ring) argument
86 ring_interrupt_active(struct tb_ring *ring, bool active) argument
176 ring_desc_base(struct tb_ring *ring) argument
184 ring_options_base(struct tb_ring *ring) argument
192 ring_iowrite_cons(struct tb_ring *ring, u16 cons) argument
202 ring_iowrite_prod(struct tb_ring *ring, u16 prod) argument
208 ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) argument
213 ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset) argument
219 ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset) argument
224 ring_full(struct tb_ring *ring) argument
229 ring_empty(struct tb_ring *ring) argument
239 ring_write_descriptors(struct tb_ring *ring) argument
275 struct tb_ring *ring = container_of(work, typeof(*ring), work); local
323 __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) argument
349 tb_ring_poll(struct tb_ring *ring) argument
381 __ring_interrupt_mask(struct tb_ring *ring, bool mask) argument
397 __ring_interrupt(struct tb_ring *ring) argument
417 tb_ring_poll_complete(struct tb_ring *ring) argument
430 ring_clear_msix(const struct tb_ring *ring) argument
447 struct tb_ring *ring = data; local
459 ring_request_msix(struct tb_ring *ring, bool no_suspend) argument
493 ring_release_msix(struct tb_ring *ring) argument
504 nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring) argument
582 struct tb_ring *ring = NULL; local
680 tb_ring_start(struct tb_ring *ring) argument
764 tb_ring_stop(struct tb_ring *ring) argument
809 tb_ring_free(struct tb_ring *ring) argument
912 struct tb_ring *ring; local
[all...]
H A Dctl.c27 * @tx: Transmit ring
28 * @rx: Receive ring
342 static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame, argument
384 if (res) /* ring is stopped */
435 static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame, argument
444 * ring is stopped, packet is referenced from
/linux-master/drivers/staging/rtl8192e/rtl8192e/
H A Drtl_core.c214 struct rtl8192_tx_ring *ring = &priv->tx_ring[prio]; local
216 if (ring->entries - skb_queue_len(&ring->queue) >= 2)
834 struct rtl8192_tx_ring *ring = NULL; local
855 ring = &priv->tx_ring[QueueID];
857 if (skb_queue_len(&ring->queue) == 0) {
860 skb = __skb_peek(&ring->queue);
1113 struct rtl8192_tx_ring *ring = &priv->tx_ring[prio]; local
1115 while (skb_queue_len(&ring->queue)) {
1116 struct tx_desc *entry = &ring
1200 struct rtl8192_tx_ring *ring = &priv->tx_ring[prio]; local
1225 struct rtl8192_tx_ring *ring; local
1248 struct rtl8192_tx_ring *ring; local
1343 struct tx_desc *ring; local
1413 struct rtl8192_tx_ring *ring = &priv->tx_ring[i]; local
[all...]
H A Dr8192E_phy.c985 struct rtl8192_tx_ring *ring = NULL; local
1035 ring = &priv->tx_ring[QueueID];
1036 if (skb_queue_len(&ring->queue) == 0) {
1050 ring = &priv->tx_ring[QueueID];
1051 if (skb_queue_len(&ring->queue) == 0) {
/linux-master/drivers/soc/fsl/qbman/
H A Dqman.c127 /* Cache-enabled ring access */
198 struct qm_eqcr_entry *ring, *cursor; member in struct:qm_eqcr
207 const struct qm_dqrr_entry *ring, *cursor; member in struct:qm_dqrr
217 union qm_mr_entry *ring, *cursor; member in struct:qm_mr
394 /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
404 /* Bit-wise logic to convert a ring pointer to a ring index */
410 /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
430 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
434 eqcr->cursor = eqcr->ring
[all...]
/linux-master/drivers/net/wireguard/
H A Dreceive.c212 while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
499 while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
555 if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) {
556 ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb);
557 spin_unlock_bh(&wg->handshake_queue.ring.producer_lock);
560 ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb);
H A Ddevice.c130 while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL)
/linux-master/drivers/net/ethernet/broadcom/
H A Dcnic_if.h28 /* Use CPU native page size up to 16K for cnic ring sizes. */
162 struct drv_ctl_l2_ring ring; member in union:drv_ctl_info::__anon189
/linux-master/drivers/bus/mhi/host/
H A Dmain.c118 struct mhi_ring *ring = &mhi_event->ring; local
121 ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
127 struct mhi_ring *ring = &mhi_cmd->ring; local
129 db = ring->iommu_base + (ring->wp - ring->base);
130 *ring
137 struct mhi_ring *ring = &mhi_chan->tre_ring; local
230 get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, struct mhi_ring *ring) argument
246 mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) argument
251 mhi_add_ring_element(struct mhi_controller *mhi_cntrl, struct mhi_ring *ring) argument
261 mhi_del_ring_element(struct mhi_controller *mhi_cntrl, struct mhi_ring *ring) argument
271 is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr) argument
547 mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, struct mhi_ring *ring) argument
1109 mhi_is_ring_full(struct mhi_controller *mhi_cntrl, struct mhi_ring *ring) argument
1288 struct mhi_ring *ring = &mhi_cmd->ring; local
[all...]
H A Dinit.c138 /* MHI protocol requires the transfer ring to be aligned with ring length */
140 struct mhi_ring *ring,
143 ring->alloc_size = len + (len - 1);
144 ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
145 &ring->dma_handle, GFP_KERNEL);
146 if (!ring->pre_aligned)
149 ring->iommu_base = (ring
139 mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, struct mhi_ring *ring, u64 len) argument
239 struct mhi_ring *ring; local
342 struct mhi_ring *ring = &mhi_event->ring; local
388 struct mhi_ring *ring = &mhi_cmd->ring; local
410 struct mhi_ring *ring = &mhi_cmd->ring; local
423 struct mhi_ring *ring = &mhi_event->ring; local
[all...]

Completed in 339 milliseconds

<<11121314151617181920>>