1// SPDX-License-Identifier: GPL-2.0-only
2/****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include "net_driver.h"
12#include "efx.h"
13#include "nic_common.h"
14#include "tx_common.h"
15#include <net/gso.h>
16
17static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
18{
19	return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
20			    PAGE_SIZE >> EFX_TX_CB_ORDER);
21}
22
23int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
24{
25	struct efx_nic *efx = tx_queue->efx;
26	unsigned int entries;
27	int rc;
28
29	/* Create the smallest power-of-two aligned ring */
30	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
31	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
32	tx_queue->ptr_mask = entries - 1;
33
34	netif_dbg(efx, probe, efx->net_dev,
35		  "creating TX queue %d size %#x mask %#x\n",
36		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
37
38	/* Allocate software ring */
39	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
40				   GFP_KERNEL);
41	if (!tx_queue->buffer)
42		return -ENOMEM;
43
44	tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
45				    sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
46	if (!tx_queue->cb_page) {
47		rc = -ENOMEM;
48		goto fail1;
49	}
50
51	/* Allocate hardware ring, determine TXQ type */
52	rc = efx_nic_probe_tx(tx_queue);
53	if (rc)
54		goto fail2;
55
56	tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue;
57	return 0;
58
59fail2:
60	kfree(tx_queue->cb_page);
61	tx_queue->cb_page = NULL;
62fail1:
63	kfree(tx_queue->buffer);
64	tx_queue->buffer = NULL;
65	return rc;
66}
67
68void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
69{
70	struct efx_nic *efx = tx_queue->efx;
71
72	netif_dbg(efx, drv, efx->net_dev,
73		  "initialising TX queue %d\n", tx_queue->queue);
74
75	tx_queue->insert_count = 0;
76	tx_queue->notify_count = 0;
77	tx_queue->write_count = 0;
78	tx_queue->packet_write_count = 0;
79	tx_queue->old_write_count = 0;
80	tx_queue->read_count = 0;
81	tx_queue->old_read_count = 0;
82	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
83	tx_queue->xmit_pending = false;
84	tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
85				  tx_queue->channel == efx_ptp_channel(efx));
86	tx_queue->completed_timestamp_major = 0;
87	tx_queue->completed_timestamp_minor = 0;
88
89	tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
90	tx_queue->tso_version = 0;
91
92	/* Set up TX descriptor ring */
93	efx_nic_init_tx(tx_queue);
94
95	tx_queue->initialised = true;
96}
97
98void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
99{
100	struct efx_tx_buffer *buffer;
101
102	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
103		  "shutting down TX queue %d\n", tx_queue->queue);
104
105	tx_queue->initialised = false;
106
107	if (!tx_queue->buffer)
108		return;
109
110	/* Free any buffers left in the ring */
111	while (tx_queue->read_count != tx_queue->write_count) {
112		unsigned int pkts_compl = 0, bytes_compl = 0;
113		unsigned int efv_pkts_compl = 0;
114
115		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
116		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
117				   &efv_pkts_compl);
118
119		++tx_queue->read_count;
120	}
121	tx_queue->xmit_pending = false;
122	netdev_tx_reset_queue(tx_queue->core_txq);
123}
124
125void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
126{
127	int i;
128
129	if (!tx_queue->buffer)
130		return;
131
132	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
133		  "destroying TX queue %d\n", tx_queue->queue);
134	efx_nic_remove_tx(tx_queue);
135
136	if (tx_queue->cb_page) {
137		for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
138			efx_nic_free_buffer(tx_queue->efx,
139					    &tx_queue->cb_page[i]);
140		kfree(tx_queue->cb_page);
141		tx_queue->cb_page = NULL;
142	}
143
144	kfree(tx_queue->buffer);
145	tx_queue->buffer = NULL;
146	tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL;
147}
148
149void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
150			struct efx_tx_buffer *buffer,
151			unsigned int *pkts_compl,
152			unsigned int *bytes_compl,
153			unsigned int *efv_pkts_compl)
154{
155	if (buffer->unmap_len) {
156		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
157		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
158
159		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
160			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
161					 DMA_TO_DEVICE);
162		else
163			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
164				       DMA_TO_DEVICE);
165		buffer->unmap_len = 0;
166	}
167
168	if (buffer->flags & EFX_TX_BUF_SKB) {
169		struct sk_buff *skb = (struct sk_buff *)buffer->skb;
170
171		if (unlikely(buffer->flags & EFX_TX_BUF_EFV)) {
172			EFX_WARN_ON_PARANOID(!efv_pkts_compl);
173			(*efv_pkts_compl)++;
174		} else {
175			EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
176			(*pkts_compl)++;
177			(*bytes_compl) += skb->len;
178		}
179
180		if (tx_queue->timestamping &&
181		    (tx_queue->completed_timestamp_major ||
182		     tx_queue->completed_timestamp_minor)) {
183			struct skb_shared_hwtstamps hwtstamp;
184
185			hwtstamp.hwtstamp =
186				efx_ptp_nic_to_kernel_time(tx_queue);
187			skb_tstamp_tx(skb, &hwtstamp);
188
189			tx_queue->completed_timestamp_major = 0;
190			tx_queue->completed_timestamp_minor = 0;
191		}
192		dev_consume_skb_any((struct sk_buff *)buffer->skb);
193		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
194			   "TX queue %d transmission id %x complete\n",
195			   tx_queue->queue, tx_queue->read_count);
196	} else if (buffer->flags & EFX_TX_BUF_XDP) {
197		xdp_return_frame_rx_napi(buffer->xdpf);
198	}
199
200	buffer->len = 0;
201	buffer->flags = 0;
202}
203
204/* Remove packets from the TX queue
205 *
206 * This removes packets from the TX queue, up to and including the
207 * specified index.
208 */
209static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
210				unsigned int index,
211				unsigned int *pkts_compl,
212				unsigned int *bytes_compl,
213				unsigned int *efv_pkts_compl)
214{
215	struct efx_nic *efx = tx_queue->efx;
216	unsigned int stop_index, read_ptr;
217
218	stop_index = (index + 1) & tx_queue->ptr_mask;
219	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
220
221	while (read_ptr != stop_index) {
222		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
223
224		if (!efx_tx_buffer_in_use(buffer)) {
225			netif_err(efx, tx_err, efx->net_dev,
226				  "TX queue %d spurious TX completion id %d\n",
227				  tx_queue->queue, read_ptr);
228			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
229			return;
230		}
231
232		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl,
233				   efv_pkts_compl);
234
235		++tx_queue->read_count;
236		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
237	}
238}
239
240void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
241{
242	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
243		tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
244		if (tx_queue->read_count == tx_queue->old_write_count) {
245			/* Ensure that read_count is flushed. */
246			smp_mb();
247			tx_queue->empty_read_count =
248				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
249		}
250	}
251}
252
253int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
254{
255	unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
256	unsigned int efv_pkts_compl = 0;
257	struct efx_nic *efx = tx_queue->efx;
258
259	EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
260
261	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl,
262			    &efv_pkts_compl);
263	tx_queue->pkts_compl += pkts_compl;
264	tx_queue->bytes_compl += bytes_compl;
265
266	if (pkts_compl + efv_pkts_compl > 1)
267		++tx_queue->merge_events;
268
269	/* See if we need to restart the netif queue.  This memory
270	 * barrier ensures that we write read_count (inside
271	 * efx_dequeue_buffers()) before reading the queue status.
272	 */
273	smp_mb();
274	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
275	    likely(efx->port_enabled) &&
276	    likely(netif_device_present(efx->net_dev))) {
277		fill_level = efx_channel_tx_fill_level(tx_queue->channel);
278		if (fill_level <= efx->txq_wake_thresh)
279			netif_tx_wake_queue(tx_queue->core_txq);
280	}
281
282	efx_xmit_done_check_empty(tx_queue);
283
284	return pkts_compl + efv_pkts_compl;
285}
286
287/* Remove buffers put into a tx_queue for the current packet.
288 * None of the buffers must have an skb attached.
289 */
290void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
291			unsigned int insert_count)
292{
293	unsigned int efv_pkts_compl = 0;
294	struct efx_tx_buffer *buffer;
295	unsigned int bytes_compl = 0;
296	unsigned int pkts_compl = 0;
297
298	/* Work backwards until we hit the original insert pointer value */
299	while (tx_queue->insert_count != insert_count) {
300		--tx_queue->insert_count;
301		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
302		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
303				   &efv_pkts_compl);
304	}
305}
306
307struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
308				       dma_addr_t dma_addr, size_t len)
309{
310	const struct efx_nic_type *nic_type = tx_queue->efx->type;
311	struct efx_tx_buffer *buffer;
312	unsigned int dma_len;
313
314	/* Map the fragment taking account of NIC-dependent DMA limits. */
315	do {
316		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
317
318		if (nic_type->tx_limit_len)
319			dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
320		else
321			dma_len = len;
322
323		buffer->len = dma_len;
324		buffer->dma_addr = dma_addr;
325		buffer->flags = EFX_TX_BUF_CONT;
326		len -= dma_len;
327		dma_addr += dma_len;
328		++tx_queue->insert_count;
329	} while (len);
330
331	return buffer;
332}
333
334int efx_tx_tso_header_length(struct sk_buff *skb)
335{
336	size_t header_len;
337
338	if (skb->encapsulation)
339		header_len = skb_inner_transport_offset(skb) +
340				(inner_tcp_hdr(skb)->doff << 2u);
341	else
342		header_len = skb_transport_offset(skb) +
343				(tcp_hdr(skb)->doff << 2u);
344	return header_len;
345}
346
347/* Map all data from an SKB for DMA and create descriptors on the queue. */
348int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
349		    unsigned int segment_count)
350{
351	struct efx_nic *efx = tx_queue->efx;
352	struct device *dma_dev = &efx->pci_dev->dev;
353	unsigned int frag_index, nr_frags;
354	dma_addr_t dma_addr, unmap_addr;
355	unsigned short dma_flags;
356	size_t len, unmap_len;
357
358	nr_frags = skb_shinfo(skb)->nr_frags;
359	frag_index = 0;
360
361	/* Map header data. */
362	len = skb_headlen(skb);
363	dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
364	dma_flags = EFX_TX_BUF_MAP_SINGLE;
365	unmap_len = len;
366	unmap_addr = dma_addr;
367
368	if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
369		return -EIO;
370
371	if (segment_count) {
372		/* For TSO we need to put the header in to a separate
373		 * descriptor. Map this separately if necessary.
374		 */
375		size_t header_len = efx_tx_tso_header_length(skb);
376
377		if (header_len != len) {
378			tx_queue->tso_long_headers++;
379			efx_tx_map_chunk(tx_queue, dma_addr, header_len);
380			len -= header_len;
381			dma_addr += header_len;
382		}
383	}
384
385	/* Add descriptors for each fragment. */
386	do {
387		struct efx_tx_buffer *buffer;
388		skb_frag_t *fragment;
389
390		buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
391
392		/* The final descriptor for a fragment is responsible for
393		 * unmapping the whole fragment.
394		 */
395		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
396		buffer->unmap_len = unmap_len;
397		buffer->dma_offset = buffer->dma_addr - unmap_addr;
398
399		if (frag_index >= nr_frags) {
400			/* Store SKB details with the final buffer for
401			 * the completion.
402			 */
403			buffer->skb = skb;
404			buffer->flags = EFX_TX_BUF_SKB | dma_flags;
405			return 0;
406		}
407
408		/* Move on to the next fragment. */
409		fragment = &skb_shinfo(skb)->frags[frag_index++];
410		len = skb_frag_size(fragment);
411		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
412					    DMA_TO_DEVICE);
413		dma_flags = 0;
414		unmap_len = len;
415		unmap_addr = dma_addr;
416
417		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
418			return -EIO;
419	} while (1);
420}
421
422unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
423{
424	/* Header and payload descriptor for each output segment, plus
425	 * one for every input fragment boundary within a segment
426	 */
427	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
428
429	/* Possibly one more per segment for option descriptors */
430	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
431		max_descs += EFX_TSO_MAX_SEGS;
432
433	/* Possibly more for PCIe page boundaries within input fragments */
434	if (PAGE_SIZE > EFX_PAGE_SIZE)
435		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
436				   DIV_ROUND_UP(GSO_LEGACY_MAX_SIZE,
437						EFX_PAGE_SIZE));
438
439	return max_descs;
440}
441
442/*
443 * Fallback to software TSO.
444 *
445 * This is used if we are unable to send a GSO packet through hardware TSO.
446 * This should only ever happen due to per-queue restrictions - unsupported
447 * packets should first be filtered by the feature flags.
448 *
449 * Returns 0 on success, error code otherwise.
450 */
451int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
452{
453	struct sk_buff *segments, *next;
454
455	segments = skb_gso_segment(skb, 0);
456	if (IS_ERR(segments))
457		return PTR_ERR(segments);
458
459	dev_consume_skb_any(skb);
460
461	skb_list_walk_safe(segments, skb, next) {
462		skb_mark_not_on_list(skb);
463		efx_enqueue_skb(tx_queue, skb);
464	}
465
466	return 0;
467}
468