Lines Matching defs:comm

19 	struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, rx_napi);
36 rx_pos = comm->rx_pos[queue];
37 rx_count = comm->rx_desc_num[queue];
40 sinfo = comm->rx_skb_info[queue] + rx_pos;
41 desc = comm->rx_desc[queue] + rx_pos;
48 if (port < MAX_NETDEV_NUM && comm->ndev[port])
49 stats = &comm->ndev[port]->stats;
60 dma_unmap_single(&comm->pdev->dev, sinfo->mapping,
61 comm->rx_desc_buff_size, DMA_FROM_DEVICE);
66 skb->protocol = eth_type_trans(skb, comm->ndev[port]);
74 new_skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size);
76 desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
84 sinfo->mapping = dma_map_single(&comm->pdev->dev, new_skb->data,
85 comm->rx_desc_buff_size,
87 if (dma_mapping_error(&comm->pdev->dev, sinfo->mapping)) {
89 desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
101 desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
102 RXD_EOR | comm->rx_desc_buff_size :
103 comm->rx_desc_buff_size;
110 rx_pos = ((rx_pos + 1) == comm->rx_desc_num[queue]) ? 0 : rx_pos + 1;
121 comm->rx_pos[queue] = rx_pos;
125 h_desc = comm->rx_desc[queue] + rx_pos;
128 spin_lock_irqsave(&comm->int_mask_lock, flags);
129 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
131 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
132 spin_unlock_irqrestore(&comm->int_mask_lock, flags);
140 struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, tx_napi);
150 spin_lock(&comm->tx_lock);
152 tx_done_pos = comm->tx_done_pos;
153 while (((tx_done_pos != comm->tx_pos) || (comm->tx_desc_full == 1)) && budget_left) {
154 cmd = comm->tx_desc[tx_done_pos].cmd1;
158 skbinfo = &comm->tx_temp_skb_info[tx_done_pos];
163 if (i < MAX_NETDEV_NUM && comm->ndev[i])
164 stats = &comm->ndev[i]->stats;
176 dma_unmap_single(&comm->pdev->dev, skbinfo->mapping, skbinfo->len,
186 if (comm->tx_desc_full == 1)
187 comm->tx_desc_full = 0;
192 comm->tx_done_pos = tx_done_pos;
193 if (!comm->tx_desc_full)
195 if (comm->ndev[i])
196 if (netif_queue_stopped(comm->ndev[i]))
197 netif_wake_queue(comm->ndev[i]);
199 spin_unlock(&comm->tx_lock);
201 spin_lock_irqsave(&comm->int_mask_lock, flags);
202 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
204 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
205 spin_unlock_irqrestore(&comm->int_mask_lock, flags);
213 struct spl2sw_common *comm = (struct spl2sw_common *)dev_id;
218 status = readl(comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
220 dev_dbg(&comm->pdev->dev, "Interrupt status is null!\n");
223 writel(status, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
227 spin_lock(&comm->int_mask_lock);
228 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
230 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
231 spin_unlock(&comm->int_mask_lock);
235 if (comm->ndev[i]) {
236 comm->ndev[i]->stats.rx_fifo_errors++;
239 dev_dbg(&comm->pdev->dev, "Illegal RX Descriptor!\n");
242 napi_schedule(&comm->rx_napi);
247 spin_lock(&comm->int_mask_lock);
248 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
250 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
251 spin_unlock(&comm->int_mask_lock);
255 if (comm->ndev[i]) {
256 comm->ndev[i]->stats.tx_fifo_errors++;
259 dev_dbg(&comm->pdev->dev, "Illegal TX Descriptor Error\n");
261 spin_lock(&comm->int_mask_lock);
262 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
264 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
265 spin_unlock(&comm->int_mask_lock);
267 napi_schedule(&comm->tx_napi);