1// SPDX-License-Identifier: GPL-2.0-only
2/****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2019 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include "net_driver.h"
12#include "ef100_rx.h"
13#include "rx_common.h"
14#include "efx.h"
15#include "nic_common.h"
16#include "mcdi_functions.h"
17#include "ef100_regs.h"
18#include "ef100_nic.h"
19#include "io.h"
20
21/* Get the value of a field in the RX prefix */
22#define PREFIX_OFFSET_W(_f)	(ESF_GZ_RX_PREFIX_ ## _f ## _LBN / 32)
23#define PREFIX_OFFSET_B(_f)	(ESF_GZ_RX_PREFIX_ ## _f ## _LBN % 32)
24#define PREFIX_WIDTH_MASK(_f)	((1ULL << ESF_GZ_RX_PREFIX_ ## _f ## _WIDTH) - 1)
25#define PREFIX_WORD(_p, _f)	le32_to_cpu((__force __le32)(_p)[PREFIX_OFFSET_W(_f)])
26#define PREFIX_FIELD(_p, _f)	((PREFIX_WORD(_p, _f) >> PREFIX_OFFSET_B(_f)) & \
27				 PREFIX_WIDTH_MASK(_f))
28
29#define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_LBN	\
30		(ESF_GZ_RX_PREFIX_CLASS_LBN + ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_LBN)
31#define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_WIDTH	\
32		ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH
33
34bool ef100_rx_buf_hash_valid(const u8 *prefix)
35{
36	return PREFIX_FIELD(prefix, RSS_HASH_VALID);
37}
38
39static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix)
40{
41	u16 rxclass;
42	u8 l2status;
43
44	rxclass = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, CLASS));
45	l2status = PREFIX_FIELD(&rxclass, HCLASS_L2_STATUS);
46
47	if (likely(l2status == ESE_GZ_RH_HCLASS_L2_STATUS_OK))
48		/* Everything is ok */
49		return false;
50
51	if (l2status == ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR)
52		channel->n_rx_eth_crc_err++;
53	return true;
54}
55
56void __ef100_rx_packet(struct efx_channel *channel)
57{
58	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
59	struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue,
60						     channel->rx_pkt_index);
61	struct efx_nic *efx = channel->efx;
62	struct ef100_nic_data *nic_data;
63	u8 *eh = efx_rx_buf_va(rx_buf);
64	__wsum csum = 0;
65	u16 ing_port;
66	u32 *prefix;
67
68	prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
69
70	if (channel->type->receive_raw) {
71		u32 mark = PREFIX_FIELD(prefix, USER_MARK);
72
73		if (channel->type->receive_raw(rx_queue, mark))
74			return; /* packet was consumed */
75	}
76
77	if (ef100_has_fcs_error(channel, prefix) &&
78	    unlikely(!(efx->net_dev->features & NETIF_F_RXALL)))
79		goto out;
80
81	rx_buf->len = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, LENGTH));
82	if (rx_buf->len <= sizeof(struct ethhdr)) {
83		if (net_ratelimit())
84			netif_err(channel->efx, rx_err, channel->efx->net_dev,
85				  "RX packet too small (%d)\n", rx_buf->len);
86		++channel->n_rx_frm_trunc;
87		goto out;
88	}
89
90	ing_port = le16_to_cpu((__force __le16) PREFIX_FIELD(prefix, INGRESS_MPORT));
91
92	nic_data = efx->nic_data;
93
94	if (nic_data->have_mport && ing_port != nic_data->base_mport) {
95#ifdef CONFIG_SFC_SRIOV
96		struct efx_rep *efv;
97
98		rcu_read_lock();
99		efv = efx_ef100_find_rep_by_mport(efx, ing_port);
100		if (efv) {
101			if (efv->net_dev->flags & IFF_UP)
102				efx_ef100_rep_rx_packet(efv, rx_buf);
103			rcu_read_unlock();
104			/* Representor Rx doesn't care about PF Rx buffer
105			 * ownership, it just makes a copy. So, we are done
106			 * with the Rx buffer from PF point of view and should
107			 * free it.
108			 */
109			goto free_rx_buffer;
110		}
111		rcu_read_unlock();
112#endif
113		if (net_ratelimit())
114			netif_warn(efx, drv, efx->net_dev,
115				   "Unrecognised ing_port %04x (base %04x), dropping\n",
116				   ing_port, nic_data->base_mport);
117		channel->n_rx_mport_bad++;
118		goto free_rx_buffer;
119	}
120
121	if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) {
122		if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) {
123			++channel->n_rx_ip_hdr_chksum_err;
124		} else {
125			u16 sum = be16_to_cpu((__force __be16)PREFIX_FIELD(prefix, CSUM_FRAME));
126
127			csum = (__force __wsum) sum;
128		}
129	}
130
131	if (channel->type->receive_skb) {
132		/* no support for special channels yet, so just discard */
133		WARN_ON_ONCE(1);
134		goto free_rx_buffer;
135	}
136
137	efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
138	goto out;
139
140free_rx_buffer:
141	efx_free_rx_buffers(rx_queue, rx_buf, 1);
142out:
143	channel->rx_pkt_n_frags = 0;
144}
145
146static void ef100_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index)
147{
148	struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue, index);
149	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
150	struct efx_nic *efx = rx_queue->efx;
151
152	++rx_queue->rx_packets;
153
154	netif_vdbg(efx, rx_status, efx->net_dev,
155		   "RX queue %d received id %x\n",
156		   efx_rx_queue_index(rx_queue), index);
157
158	efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
159
160	prefetch(efx_rx_buf_va(rx_buf));
161
162	rx_buf->page_offset += efx->rx_prefix_size;
163
164	efx_recycle_rx_pages(channel, rx_buf, 1);
165
166	efx_rx_flush_packet(channel);
167	channel->rx_pkt_n_frags = 1;
168	channel->rx_pkt_index = index;
169}
170
171void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event)
172{
173	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
174	unsigned int n_packets =
175		EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_NUM_PKT);
176	int i;
177
178	WARN_ON_ONCE(!n_packets);
179	if (n_packets > 1)
180		++channel->n_rx_merge_events;
181
182	channel->irq_mod_score += 2 * n_packets;
183
184	for (i = 0; i < n_packets; ++i) {
185		ef100_rx_packet(rx_queue,
186				rx_queue->removed_count & rx_queue->ptr_mask);
187		++rx_queue->removed_count;
188	}
189}
190
191void ef100_rx_write(struct efx_rx_queue *rx_queue)
192{
193	unsigned int notified_count = rx_queue->notified_count;
194	struct efx_rx_buffer *rx_buf;
195	unsigned int idx;
196	efx_qword_t *rxd;
197	efx_dword_t rxdb;
198
199	while (notified_count != rx_queue->added_count) {
200		idx = notified_count & rx_queue->ptr_mask;
201		rx_buf = efx_rx_buffer(rx_queue, idx);
202		rxd = efx_rx_desc(rx_queue, idx);
203
204		EFX_POPULATE_QWORD_1(*rxd, ESF_GZ_RX_BUF_ADDR, rx_buf->dma_addr);
205
206		++notified_count;
207	}
208	if (notified_count == rx_queue->notified_count)
209		return;
210
211	wmb();
212	EFX_POPULATE_DWORD_1(rxdb, ERF_GZ_RX_RING_PIDX,
213			     rx_queue->added_count & rx_queue->ptr_mask);
214	efx_writed_page(rx_queue->efx, &rxdb,
215			ER_GZ_RX_RING_DOORBELL, efx_rx_queue_index(rx_queue));
216	if (rx_queue->grant_credits)
217		wmb();
218	rx_queue->notified_count = notified_count;
219	if (rx_queue->grant_credits)
220		schedule_work(&rx_queue->grant_work);
221}
222