Lines Matching refs:ring

12 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
14 u32 *ring_cfg = ring->state;
15 u64 addr = ring->dma;
17 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
18 ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
27 ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize)
34 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
36 u32 *ring_cfg = ring->state;
40 is_bufpool = xgene_enet_is_bufpool(ring->id);
47 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
49 u32 *ring_cfg = ring->state;
55 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
58 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
63 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
65 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
68 xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
70 xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
71 ring->state[i]);
75 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
77 memset(ring->state, 0, sizeof(ring->state));
78 xgene_enet_write_ring_state(ring);
81 static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
85 xgene_enet_ring_set_type(ring);
87 owner = xgene_enet_ring_owner(ring->id);
89 xgene_enet_ring_set_recombbuf(ring);
91 xgene_enet_ring_init(ring);
92 xgene_enet_write_ring_state(ring);
95 static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
100 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)
103 is_bufpool = xgene_enet_is_bufpool(ring->id);
105 ring_id_val = ring->id & GENMASK(9, 0);
108 ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
114 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
115 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
118 static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
122 ring_id = ring->id | OVERWRITE;
123 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
124 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
128 struct xgene_enet_desc_ring *ring)
133 xgene_enet_clr_ring_state(ring);
134 xgene_enet_set_ring_state(ring);
135 xgene_enet_set_ring_id(ring);
137 ring->slots = xgene_enet_get_numslots(ring->id, ring->size);
139 is_bufpool = xgene_enet_is_bufpool(ring->id);
140 if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
141 return ring;
143 addr = CSR_VMID0_INTR_MBOX + (4 * (ring->id & RING_BUFNUM_MASK));
144 xgene_enet_ring_wr32(ring, addr, ring->irq_mbox_dma >> 10);
146 for (i = 0; i < ring->slots; i++)
147 xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
149 return ring;
152 static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
154 xgene_enet_clr_desc_ring_id(ring);
155 xgene_enet_clr_ring_state(ring);
158 static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
162 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
163 data = SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK) |
168 iowrite32(data, ring->cmd);
171 static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
173 u32 __iomem *cmd_base = ring->cmd_base;
182 static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
186 xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
187 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data);
188 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
189 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data);
190 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data);
191 xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08);
192 xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10);