1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7#include <linux/skbuff.h>
8
9#include "rxe.h"
10#include "rxe_loc.h"
11
12/* check that QP matches packet opcode type and is in a valid state */
13static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
14			    struct rxe_qp *qp)
15{
16	unsigned int pkt_type;
17	unsigned long flags;
18
19	if (unlikely(!qp->valid))
20		return -EINVAL;
21
22	pkt_type = pkt->opcode & 0xe0;
23
24	switch (qp_type(qp)) {
25	case IB_QPT_RC:
26		if (unlikely(pkt_type != IB_OPCODE_RC))
27			return -EINVAL;
28		break;
29	case IB_QPT_UC:
30		if (unlikely(pkt_type != IB_OPCODE_UC))
31			return -EINVAL;
32		break;
33	case IB_QPT_UD:
34	case IB_QPT_GSI:
35		if (unlikely(pkt_type != IB_OPCODE_UD))
36			return -EINVAL;
37		break;
38	default:
39		return -EINVAL;
40	}
41
42	spin_lock_irqsave(&qp->state_lock, flags);
43	if (pkt->mask & RXE_REQ_MASK) {
44		if (unlikely(qp_state(qp) < IB_QPS_RTR)) {
45			spin_unlock_irqrestore(&qp->state_lock, flags);
46			return -EINVAL;
47		}
48	} else {
49		if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
50			spin_unlock_irqrestore(&qp->state_lock, flags);
51			return -EINVAL;
52		}
53	}
54	spin_unlock_irqrestore(&qp->state_lock, flags);
55
56	return 0;
57}
58
59static void set_bad_pkey_cntr(struct rxe_port *port)
60{
61	spin_lock_bh(&port->port_lock);
62	port->attr.bad_pkey_cntr = min((u32)0xffff,
63				       port->attr.bad_pkey_cntr + 1);
64	spin_unlock_bh(&port->port_lock);
65}
66
67static void set_qkey_viol_cntr(struct rxe_port *port)
68{
69	spin_lock_bh(&port->port_lock);
70	port->attr.qkey_viol_cntr = min((u32)0xffff,
71					port->attr.qkey_viol_cntr + 1);
72	spin_unlock_bh(&port->port_lock);
73}
74
75static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
76		      u32 qpn, struct rxe_qp *qp)
77{
78	struct rxe_port *port = &rxe->port;
79	u16 pkey = bth_pkey(pkt);
80
81	pkt->pkey_index = 0;
82
83	if (!pkey_match(pkey, IB_DEFAULT_PKEY_FULL)) {
84		set_bad_pkey_cntr(port);
85		return -EINVAL;
86	}
87
88	if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) {
89		u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
90
91		if (unlikely(deth_qkey(pkt) != qkey)) {
92			set_qkey_viol_cntr(port);
93			return -EINVAL;
94		}
95	}
96
97	return 0;
98}
99
100static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
101		      struct rxe_qp *qp)
102{
103	struct sk_buff *skb = PKT_TO_SKB(pkt);
104
105	if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC)
106		return 0;
107
108	if (unlikely(pkt->port_num != qp->attr.port_num))
109		return -EINVAL;
110
111	if (skb->protocol == htons(ETH_P_IP)) {
112		struct in_addr *saddr =
113			&qp->pri_av.sgid_addr._sockaddr_in.sin_addr;
114		struct in_addr *daddr =
115			&qp->pri_av.dgid_addr._sockaddr_in.sin_addr;
116
117		if ((ip_hdr(skb)->daddr != saddr->s_addr) ||
118		    (ip_hdr(skb)->saddr != daddr->s_addr))
119			return -EINVAL;
120
121	} else if (skb->protocol == htons(ETH_P_IPV6)) {
122		struct in6_addr *saddr =
123			&qp->pri_av.sgid_addr._sockaddr_in6.sin6_addr;
124		struct in6_addr *daddr =
125			&qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr;
126
127		if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr)) ||
128		    memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr)))
129			return -EINVAL;
130	}
131
132	return 0;
133}
134
135static int hdr_check(struct rxe_pkt_info *pkt)
136{
137	struct rxe_dev *rxe = pkt->rxe;
138	struct rxe_port *port = &rxe->port;
139	struct rxe_qp *qp = NULL;
140	u32 qpn = bth_qpn(pkt);
141	int index;
142	int err;
143
144	if (unlikely(bth_tver(pkt) != BTH_TVER))
145		goto err1;
146
147	if (unlikely(qpn == 0))
148		goto err1;
149
150	if (qpn != IB_MULTICAST_QPN) {
151		index = (qpn == 1) ? port->qp_gsi_index : qpn;
152
153		qp = rxe_pool_get_index(&rxe->qp_pool, index);
154		if (unlikely(!qp))
155			goto err1;
156
157		err = check_type_state(rxe, pkt, qp);
158		if (unlikely(err))
159			goto err2;
160
161		err = check_addr(rxe, pkt, qp);
162		if (unlikely(err))
163			goto err2;
164
165		err = check_keys(rxe, pkt, qpn, qp);
166		if (unlikely(err))
167			goto err2;
168	} else {
169		if (unlikely((pkt->mask & RXE_GRH_MASK) == 0))
170			goto err1;
171	}
172
173	pkt->qp = qp;
174	return 0;
175
176err2:
177	rxe_put(qp);
178err1:
179	return -EINVAL;
180}
181
182static inline void rxe_rcv_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb)
183{
184	if (pkt->mask & RXE_REQ_MASK)
185		rxe_resp_queue_pkt(pkt->qp, skb);
186	else
187		rxe_comp_queue_pkt(pkt->qp, skb);
188}
189
190static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
191{
192	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
193	struct rxe_mcg *mcg;
194	struct rxe_mca *mca;
195	struct rxe_qp *qp;
196	union ib_gid dgid;
197	int err;
198
199	if (skb->protocol == htons(ETH_P_IP))
200		ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
201				       (struct in6_addr *)&dgid);
202	else if (skb->protocol == htons(ETH_P_IPV6))
203		memcpy(&dgid, &ipv6_hdr(skb)->daddr, sizeof(dgid));
204
205	/* lookup mcast group corresponding to mgid, takes a ref */
206	mcg = rxe_lookup_mcg(rxe, &dgid);
207	if (!mcg)
208		goto drop;	/* mcast group not registered */
209
210	spin_lock_bh(&rxe->mcg_lock);
211
212	/* this is unreliable datagram service so we let
213	 * failures to deliver a multicast packet to a
214	 * single QP happen and just move on and try
215	 * the rest of them on the list
216	 */
217	list_for_each_entry(mca, &mcg->qp_list, qp_list) {
218		qp = mca->qp;
219
220		/* validate qp for incoming packet */
221		err = check_type_state(rxe, pkt, qp);
222		if (err)
223			continue;
224
225		err = check_keys(rxe, pkt, bth_qpn(pkt), qp);
226		if (err)
227			continue;
228
229		/* for all but the last QP create a new clone of the
230		 * skb and pass to the QP. Pass the original skb to
231		 * the last QP in the list.
232		 */
233		if (mca->qp_list.next != &mcg->qp_list) {
234			struct sk_buff *cskb;
235			struct rxe_pkt_info *cpkt;
236
237			cskb = skb_clone(skb, GFP_ATOMIC);
238			if (unlikely(!cskb))
239				continue;
240
241			if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) {
242				kfree_skb(cskb);
243				break;
244			}
245
246			cpkt = SKB_TO_PKT(cskb);
247			cpkt->qp = qp;
248			rxe_get(qp);
249			rxe_rcv_pkt(cpkt, cskb);
250		} else {
251			pkt->qp = qp;
252			rxe_get(qp);
253			rxe_rcv_pkt(pkt, skb);
254			skb = NULL;	/* mark consumed */
255		}
256	}
257
258	spin_unlock_bh(&rxe->mcg_lock);
259
260	kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
261
262	if (likely(!skb))
263		return;
264
265	/* This only occurs if one of the checks fails on the last
266	 * QP in the list above
267	 */
268
269drop:
270	kfree_skb(skb);
271	ib_device_put(&rxe->ib_dev);
272}
273
274/**
275 * rxe_chk_dgid - validate destination IP address
276 * @rxe: rxe device that received packet
277 * @skb: the received packet buffer
278 *
279 * Accept any loopback packets
280 * Extract IP address from packet and
281 * Accept if multicast packet
282 * Accept if matches an SGID table entry
283 */
284static int rxe_chk_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
285{
286	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
287	const struct ib_gid_attr *gid_attr;
288	union ib_gid dgid;
289	union ib_gid *pdgid;
290
291	if (pkt->mask & RXE_LOOPBACK_MASK)
292		return 0;
293
294	if (skb->protocol == htons(ETH_P_IP)) {
295		ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
296				       (struct in6_addr *)&dgid);
297		pdgid = &dgid;
298	} else {
299		pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr;
300	}
301
302	if (rdma_is_multicast_addr((struct in6_addr *)pdgid))
303		return 0;
304
305	gid_attr = rdma_find_gid_by_port(&rxe->ib_dev, pdgid,
306					 IB_GID_TYPE_ROCE_UDP_ENCAP,
307					 1, skb->dev);
308	if (IS_ERR(gid_attr))
309		return PTR_ERR(gid_attr);
310
311	rdma_put_gid_attr(gid_attr);
312	return 0;
313}
314
315/* rxe_rcv is called from the interface driver */
316void rxe_rcv(struct sk_buff *skb)
317{
318	int err;
319	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
320	struct rxe_dev *rxe = pkt->rxe;
321
322	if (unlikely(skb->len < RXE_BTH_BYTES))
323		goto drop;
324
325	if (rxe_chk_dgid(rxe, skb) < 0)
326		goto drop;
327
328	pkt->opcode = bth_opcode(pkt);
329	pkt->psn = bth_psn(pkt);
330	pkt->qp = NULL;
331	pkt->mask |= rxe_opcode[pkt->opcode].mask;
332
333	if (unlikely(skb->len < header_size(pkt)))
334		goto drop;
335
336	err = hdr_check(pkt);
337	if (unlikely(err))
338		goto drop;
339
340	err = rxe_icrc_check(skb, pkt);
341	if (unlikely(err))
342		goto drop;
343
344	rxe_counter_inc(rxe, RXE_CNT_RCVD_PKTS);
345
346	if (unlikely(bth_qpn(pkt) == IB_MULTICAST_QPN))
347		rxe_rcv_mcast_pkt(rxe, skb);
348	else
349		rxe_rcv_pkt(pkt, skb);
350
351	return;
352
353drop:
354	if (pkt->qp)
355		rxe_put(pkt->qp);
356
357	kfree_skb(skb);
358	ib_device_put(&rxe->ib_dev);
359}
360