1/* This file is part of the Emulex RoCE Device Driver for
2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * EMULEX and SLI are trademarks of Emulex.
5 * www.emulex.com
6 *
7 * This software is available to you under a choice of one of two licenses.
8 * You may choose to be licensed under the terms of the GNU General Public
9 * License (GPL) Version 2, available from the file COPYING in the main
10 * directory of this source tree, or the BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * - Redistributions of source code must retain the above copyright notice,
17 *   this list of conditions and the following disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 *   notice, this list of conditions and the following disclaimer in
21 *   the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * Contact Information:
36 * linux-drivers@emulex.com
37 *
38 * Emulex
39 * 3333 Susan Street
40 * Costa Mesa, CA 92626
41 */
42
43#include <net/neighbour.h>
44#include <net/netevent.h>
45
46#include <rdma/ib_addr.h>
47#include <rdma/ib_mad.h>
48#include <rdma/ib_cache.h>
49
50#include "ocrdma.h"
51#include "ocrdma_verbs.h"
52#include "ocrdma_ah.h"
53#include "ocrdma_hw.h"
54#include "ocrdma_stats.h"
55
56#define OCRDMA_VID_PCP_SHIFT	0xD
57
58static u16 ocrdma_hdr_type_to_proto_num(int devid, u8 hdr_type)
59{
60	switch (hdr_type) {
61	case OCRDMA_L3_TYPE_IB_GRH:
62		return (u16)ETH_P_IBOE;
63	case OCRDMA_L3_TYPE_IPV4:
64		return (u16)0x0800;
65	case OCRDMA_L3_TYPE_IPV6:
66		return (u16)0x86dd;
67	default:
68		pr_err("ocrdma%d: Invalid network header\n", devid);
69		return 0;
70	}
71}
72
73static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
74			struct rdma_ah_attr *attr, const union ib_gid *sgid,
75			int pdid, bool *isvlan, u16 vlan_tag)
76{
77	int status;
78	struct ocrdma_eth_vlan eth;
79	struct ocrdma_grh grh;
80	int eth_sz;
81	u16 proto_num = 0;
82	u8 nxthdr = 0x11;
83	struct iphdr ipv4;
84	const struct ib_global_route *ib_grh;
85	union {
86		struct sockaddr_in  _sockaddr_in;
87		struct sockaddr_in6 _sockaddr_in6;
88	} sgid_addr, dgid_addr;
89
90	memset(&eth, 0, sizeof(eth));
91	memset(&grh, 0, sizeof(grh));
92
93	/* Protocol Number */
94	proto_num = ocrdma_hdr_type_to_proto_num(dev->id, ah->hdr_type);
95	if (!proto_num)
96		return -EINVAL;
97	nxthdr = (proto_num == ETH_P_IBOE) ? 0x1b : 0x11;
98	/* VLAN */
99	if (!vlan_tag || (vlan_tag > 0xFFF))
100		vlan_tag = dev->pvid;
101	if (vlan_tag || dev->pfc_state) {
102		if (!vlan_tag) {
103			pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
104				dev->id);
105			pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
106				dev->id);
107		}
108		eth.eth_type = cpu_to_be16(0x8100);
109		eth.roce_eth_type = cpu_to_be16(proto_num);
110		vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
111		eth.vlan_tag = cpu_to_be16(vlan_tag);
112		eth_sz = sizeof(struct ocrdma_eth_vlan);
113		*isvlan = true;
114	} else {
115		eth.eth_type = cpu_to_be16(proto_num);
116		eth_sz = sizeof(struct ocrdma_eth_basic);
117	}
118	/* MAC */
119	memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
120	status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
121	if (status)
122		return status;
123	ib_grh = rdma_ah_read_grh(attr);
124	ah->sgid_index = ib_grh->sgid_index;
125	/* Eth HDR */
126	memcpy(&ah->av->eth_hdr, &eth, eth_sz);
127	if (ah->hdr_type == RDMA_NETWORK_IPV4) {
128		*((__be16 *)&ipv4) = htons((4 << 12) | (5 << 8) |
129					   ib_grh->traffic_class);
130		ipv4.id = cpu_to_be16(pdid);
131		ipv4.frag_off = htons(IP_DF);
132		ipv4.tot_len = htons(0);
133		ipv4.ttl = ib_grh->hop_limit;
134		ipv4.protocol = nxthdr;
135		rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
136		ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
137		rdma_gid2ip((struct sockaddr*)&dgid_addr, &ib_grh->dgid);
138		ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
139		memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
140	} else {
141		memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
142		grh.tclass_flow = cpu_to_be32((6 << 28) |
143					      (ib_grh->traffic_class << 24) |
144					      ib_grh->flow_label);
145		memcpy(&grh.dgid[0], ib_grh->dgid.raw,
146		       sizeof(ib_grh->dgid.raw));
147		grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
148						(nxthdr << 8) |
149						ib_grh->hop_limit);
150		memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
151	}
152	if (*isvlan)
153		ah->av->valid |= OCRDMA_AV_VLAN_VALID;
154	ah->av->valid = cpu_to_le32(ah->av->valid);
155	return status;
156}
157
158int ocrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
159		     struct ib_udata *udata)
160{
161	u32 *ahid_addr;
162	int status;
163	struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
164	bool isvlan = false;
165	u16 vlan_tag = 0xffff;
166	const struct ib_gid_attr *sgid_attr;
167	struct ocrdma_pd *pd = get_ocrdma_pd(ibah->pd);
168	struct rdma_ah_attr *attr = init_attr->ah_attr;
169	struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
170
171	if ((attr->type != RDMA_AH_ATTR_TYPE_ROCE) ||
172	    !(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))
173		return -EINVAL;
174
175	if (atomic_cmpxchg(&dev->update_sl, 1, 0))
176		ocrdma_init_service_level(dev);
177
178	sgid_attr = attr->grh.sgid_attr;
179	status = rdma_read_gid_l2_fields(sgid_attr, &vlan_tag, NULL);
180	if (status)
181		return status;
182
183	status = ocrdma_alloc_av(dev, ah);
184	if (status)
185		goto av_err;
186
187	/* Get network header type for this GID */
188	ah->hdr_type = rdma_gid_attr_network_type(sgid_attr);
189
190	status = set_av_attr(dev, ah, attr, &sgid_attr->gid, pd->id,
191			     &isvlan, vlan_tag);
192	if (status)
193		goto av_conf_err;
194
195	/* if pd is for the user process, pass the ah_id to user space */
196	if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
197		ahid_addr = pd->uctx->ah_tbl.va + rdma_ah_get_dlid(attr);
198		*ahid_addr = 0;
199		*ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
200		if (ocrdma_is_udp_encap_supported(dev)) {
201			*ahid_addr |= ((u32)ah->hdr_type &
202				       OCRDMA_AH_L3_TYPE_MASK) <<
203				       OCRDMA_AH_L3_TYPE_SHIFT;
204		}
205		if (isvlan)
206			*ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
207				       OCRDMA_AH_VLAN_VALID_SHIFT);
208	}
209
210	return 0;
211
212av_conf_err:
213	ocrdma_free_av(dev, ah);
214av_err:
215	return status;
216}
217
218int ocrdma_destroy_ah(struct ib_ah *ibah, u32 flags)
219{
220	struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
221	struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
222
223	ocrdma_free_av(dev, ah);
224	return 0;
225}
226
227int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
228{
229	struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
230	struct ocrdma_av *av = ah->av;
231	struct ocrdma_grh *grh;
232
233	attr->type = ibah->type;
234	if (ah->av->valid & OCRDMA_AV_VALID) {
235		grh = (struct ocrdma_grh *)((u8 *)ah->av +
236				sizeof(struct ocrdma_eth_vlan));
237		rdma_ah_set_sl(attr, be16_to_cpu(av->eth_hdr.vlan_tag) >> 13);
238	} else {
239		grh = (struct ocrdma_grh *)((u8 *)ah->av +
240					sizeof(struct ocrdma_eth_basic));
241		rdma_ah_set_sl(attr, 0);
242	}
243	rdma_ah_set_grh(attr, NULL,
244			be32_to_cpu(grh->tclass_flow) & 0xffffffff,
245			ah->sgid_index,
246			be32_to_cpu(grh->pdid_hoplimit) & 0xff,
247			be32_to_cpu(grh->tclass_flow) >> 24);
248	rdma_ah_set_dgid_raw(attr, &grh->dgid[0]);
249	return 0;
250}
251
252int ocrdma_process_mad(struct ib_device *ibdev, int process_mad_flags,
253		       u32 port_num, const struct ib_wc *in_wc,
254		       const struct ib_grh *in_grh, const struct ib_mad *in,
255		       struct ib_mad *out, size_t *out_mad_size,
256		       u16 *out_mad_pkey_index)
257{
258	int status = IB_MAD_RESULT_SUCCESS;
259	struct ocrdma_dev *dev;
260
261	if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
262		dev = get_ocrdma_dev(ibdev);
263		ocrdma_pma_counters(dev, out);
264		status |= IB_MAD_RESULT_REPLY;
265	}
266
267	return status;
268}
269