Lines Matching refs:tf

329 static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf)
331 return tf->frame.size ? : TBNET_FRAME_SIZE;
340 struct tbnet_frame *tf = &ring->frames[i];
345 if (!tf->page)
358 trace_tbnet_free_frame(i, tf->page, tf->frame.buffer_phy, dir);
360 if (tf->frame.buffer_phy)
361 dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
364 __free_pages(tf->page, order);
365 tf->page = NULL;
507 struct tbnet_frame *tf = &ring->frames[index];
510 if (tf->page)
517 tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER);
518 if (!tf->page) {
523 dma_addr = dma_map_page(dma_dev, tf->page, 0,
530 tf->frame.buffer_phy = dma_addr;
531 tf->dev = net->dev;
533 trace_tbnet_alloc_rx_frame(index, tf->page, dma_addr,
536 tb_ring_rx(ring->ring, &tf->frame);
552 struct tbnet_frame *tf;
560 tf = &ring->frames[index];
561 tf->frame.size = 0;
563 dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
564 tbnet_frame_size(tf), DMA_TO_DEVICE);
566 return tf;
572 struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
573 struct tbnet *net = netdev_priv(tf->dev);
589 struct tbnet_frame *tf = &ring->frames[i];
592 tf->page = alloc_page(GFP_KERNEL);
593 if (!tf->page) {
598 dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
601 __free_page(tf->page);
602 tf->page = NULL;
607 tf->dev = net->dev;
608 tf->frame.buffer_phy = dma_addr;
609 tf->frame.callback = tbnet_tx_callback;
610 tf->frame.sof = TBIP_PDF_FRAME_START;
611 tf->frame.eof = TBIP_PDF_FRAME_END;
613 trace_tbnet_alloc_tx_frame(i, tf->page, dma_addr, DMA_TO_DEVICE);
731 static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
737 if (tf->frame.flags & RING_DESC_CRC_ERROR) {
740 } else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) {
746 size = tbnet_frame_size(tf);
814 struct tbnet_frame *tf;
835 tf = container_of(frame, typeof(*tf), frame);
837 page = tf->page;
838 tf->page = NULL;
843 if (!tbnet_check_frame(net, tf, hdr)) {