1/*
2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#if HAVE_CONFIG_H
35#  include <config.h>
36#endif /* HAVE_CONFIG_H */
37
38#include <stdio.h>
39#include <netinet/in.h>
40#include <unistd.h>
41#include <stdlib.h>
42#include <errno.h>
43#include <string.h>
44
45#include "ibverbs.h"
46
47int ibv_rate_to_mult(enum ibv_rate rate)
48{
49	switch (rate) {
50	case IBV_RATE_2_5_GBPS: return  1;
51	case IBV_RATE_5_GBPS:   return  2;
52	case IBV_RATE_10_GBPS:  return  4;
53	case IBV_RATE_20_GBPS:  return  8;
54	case IBV_RATE_30_GBPS:  return 12;
55	case IBV_RATE_40_GBPS:  return 16;
56	case IBV_RATE_60_GBPS:  return 24;
57	case IBV_RATE_80_GBPS:  return 32;
58	case IBV_RATE_120_GBPS: return 48;
59	default:           return -1;
60	}
61}
62
63enum ibv_rate mult_to_ibv_rate(int mult)
64{
65	switch (mult) {
66	case 1:  return IBV_RATE_2_5_GBPS;
67	case 2:  return IBV_RATE_5_GBPS;
68	case 4:  return IBV_RATE_10_GBPS;
69	case 8:  return IBV_RATE_20_GBPS;
70	case 12: return IBV_RATE_30_GBPS;
71	case 16: return IBV_RATE_40_GBPS;
72	case 24: return IBV_RATE_60_GBPS;
73	case 32: return IBV_RATE_80_GBPS;
74	case 48: return IBV_RATE_120_GBPS;
75	default: return IBV_RATE_MAX;
76	}
77}
78
79int __ibv_query_device(struct ibv_context *context,
80		       struct ibv_device_attr *device_attr)
81{
82	return context->ops.query_device(context, device_attr);
83}
84default_symver(__ibv_query_device, ibv_query_device);
85
86int __ibv_query_port(struct ibv_context *context, uint8_t port_num,
87		     struct ibv_port_attr *port_attr)
88{
89	return context->ops.query_port(context, port_num, port_attr);
90}
91default_symver(__ibv_query_port, ibv_query_port);
92
93int __ibv_query_gid(struct ibv_context *context, uint8_t port_num,
94		    int index, union ibv_gid *gid)
95{
96	char name[24];
97	char attr[41];
98	uint16_t val;
99	int i;
100
101	snprintf(name, sizeof name, "ports/%d/gids/%d", port_num, index);
102
103	if (ibv_read_sysfs_file(context->device->ibdev_path, name,
104				attr, sizeof attr) < 0)
105		return -1;
106
107	for (i = 0; i < 8; ++i) {
108		if (sscanf(attr + i * 5, "%hx", &val) != 1)
109			return -1;
110		gid->raw[i * 2    ] = val >> 8;
111		gid->raw[i * 2 + 1] = val & 0xff;
112	}
113
114	return 0;
115}
116default_symver(__ibv_query_gid, ibv_query_gid);
117
118int __ibv_query_pkey(struct ibv_context *context, uint8_t port_num,
119		     int index, uint16_t *pkey)
120{
121	char name[24];
122	char attr[8];
123	uint16_t val;
124
125	snprintf(name, sizeof name, "ports/%d/pkeys/%d", port_num, index);
126
127	if (ibv_read_sysfs_file(context->device->ibdev_path, name,
128				attr, sizeof attr) < 0)
129		return -1;
130
131	if (sscanf(attr, "%hx", &val) != 1)
132		return -1;
133
134	*pkey = htons(val);
135	return 0;
136}
137default_symver(__ibv_query_pkey, ibv_query_pkey);
138
139struct ibv_pd *__ibv_alloc_pd(struct ibv_context *context)
140{
141	struct ibv_pd *pd;
142
143	pd = context->ops.alloc_pd(context);
144	if (pd)
145		pd->context = context;
146
147	return pd;
148}
149default_symver(__ibv_alloc_pd, ibv_alloc_pd);
150
151int __ibv_dealloc_pd(struct ibv_pd *pd)
152{
153	return pd->context->ops.dealloc_pd(pd);
154}
155default_symver(__ibv_dealloc_pd, ibv_dealloc_pd);
156
157struct ibv_mr *__ibv_reg_mr(struct ibv_pd *pd, void *addr,
158			    size_t length, int access)
159{
160	struct ibv_mr *mr;
161
162	if (ibv_dontfork_range(addr, length))
163		return NULL;
164
165	mr = pd->context->ops.reg_mr(pd, addr, length, access);
166	if (mr) {
167		mr->context = pd->context;
168		mr->pd      = pd;
169		mr->addr    = addr;
170		mr->length  = length;
171	} else
172		ibv_dofork_range(addr, length);
173
174	return mr;
175}
176default_symver(__ibv_reg_mr, ibv_reg_mr);
177
178int __ibv_dereg_mr(struct ibv_mr *mr)
179{
180	int ret;
181	void *addr	= mr->addr;
182	size_t length	= mr->length;
183
184	ret = mr->context->ops.dereg_mr(mr);
185	if (!ret)
186		ibv_dofork_range(addr, length);
187
188	return ret;
189}
190default_symver(__ibv_dereg_mr, ibv_dereg_mr);
191
192static struct ibv_comp_channel *ibv_create_comp_channel_v2(struct ibv_context *context)
193{
194	struct ibv_abi_compat_v2 *t = context->abi_compat;
195	static int warned;
196
197	if (!pthread_mutex_trylock(&t->in_use))
198		return &t->channel;
199
200	if (!warned) {
201		fprintf(stderr, PFX "Warning: kernel's ABI version %d limits capacity.\n"
202			"    Only one completion channel can be created per context.\n",
203			abi_ver);
204		++warned;
205	}
206
207	return NULL;
208}
209
210struct ibv_comp_channel *ibv_create_comp_channel(struct ibv_context *context)
211{
212	struct ibv_comp_channel            *channel;
213	struct ibv_create_comp_channel      cmd;
214	struct ibv_create_comp_channel_resp resp;
215
216	if (abi_ver <= 2)
217		return ibv_create_comp_channel_v2(context);
218
219	channel = malloc(sizeof *channel);
220	if (!channel)
221		return NULL;
222
223	IBV_INIT_CMD_RESP(&cmd, sizeof cmd, CREATE_COMP_CHANNEL, &resp, sizeof resp);
224	if (write(context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd) {
225		free(channel);
226		return NULL;
227	}
228
229	VALGRIND_MAKE_MEM_DEFINED(&resp, sizeof resp);
230
231	channel->context = context;
232	channel->fd      = resp.fd;
233	channel->refcnt  = 0;
234
235	return channel;
236}
237
238static int ibv_destroy_comp_channel_v2(struct ibv_comp_channel *channel)
239{
240	struct ibv_abi_compat_v2 *t = (struct ibv_abi_compat_v2 *) channel;
241	pthread_mutex_unlock(&t->in_use);
242	return 0;
243}
244
245int ibv_destroy_comp_channel(struct ibv_comp_channel *channel)
246{
247	struct ibv_context *context;
248	int ret;
249
250	context = channel->context;
251	pthread_mutex_lock(&context->mutex);
252
253	if (channel->refcnt) {
254		ret = EBUSY;
255		goto out;
256	}
257
258	if (abi_ver <= 2) {
259		ret = ibv_destroy_comp_channel_v2(channel);
260		goto out;
261	}
262
263	close(channel->fd);
264	free(channel);
265	ret = 0;
266
267out:
268	pthread_mutex_unlock(&context->mutex);
269
270	return ret;
271}
272
273struct ibv_cq *__ibv_create_cq(struct ibv_context *context, int cqe, void *cq_context,
274			       struct ibv_comp_channel *channel, int comp_vector)
275{
276	struct ibv_cq *cq;
277
278	pthread_mutex_lock(&context->mutex);
279
280	cq = context->ops.create_cq(context, cqe, channel, comp_vector);
281
282	if (cq) {
283		cq->context    	     	   = context;
284		cq->channel		   = channel;
285		if (channel)
286			++channel->refcnt;
287		cq->cq_context 	     	   = cq_context;
288		cq->comp_events_completed  = 0;
289		cq->async_events_completed = 0;
290		pthread_mutex_init(&cq->mutex, NULL);
291		pthread_cond_init(&cq->cond, NULL);
292	}
293
294	pthread_mutex_unlock(&context->mutex);
295
296	return cq;
297}
298default_symver(__ibv_create_cq, ibv_create_cq);
299
300int __ibv_resize_cq(struct ibv_cq *cq, int cqe)
301{
302	if (!cq->context->ops.resize_cq)
303		return ENOSYS;
304
305	return cq->context->ops.resize_cq(cq, cqe);
306}
307default_symver(__ibv_resize_cq, ibv_resize_cq);
308
309int __ibv_destroy_cq(struct ibv_cq *cq)
310{
311	struct ibv_comp_channel *channel = cq->channel;
312	int ret;
313
314	if (channel)
315		pthread_mutex_lock(&channel->context->mutex);
316
317	ret = cq->context->ops.destroy_cq(cq);
318
319	if (channel) {
320		if (!ret)
321			--channel->refcnt;
322		pthread_mutex_unlock(&channel->context->mutex);
323	}
324
325	return ret;
326}
327default_symver(__ibv_destroy_cq, ibv_destroy_cq);
328
329int __ibv_get_cq_event(struct ibv_comp_channel *channel,
330		       struct ibv_cq **cq, void **cq_context)
331{
332	struct ibv_comp_event ev;
333
334	if (read(channel->fd, &ev, sizeof ev) != sizeof ev)
335		return -1;
336
337	*cq         = (struct ibv_cq *) (uintptr_t) ev.cq_handle;
338	*cq_context = (*cq)->cq_context;
339
340	if ((*cq)->context->ops.cq_event)
341		(*cq)->context->ops.cq_event(*cq);
342
343	return 0;
344}
345default_symver(__ibv_get_cq_event, ibv_get_cq_event);
346
347void __ibv_ack_cq_events(struct ibv_cq *cq, unsigned int nevents)
348{
349	pthread_mutex_lock(&cq->mutex);
350	cq->comp_events_completed += nevents;
351	pthread_cond_signal(&cq->cond);
352	pthread_mutex_unlock(&cq->mutex);
353}
354default_symver(__ibv_ack_cq_events, ibv_ack_cq_events);
355
356struct ibv_srq *__ibv_create_srq(struct ibv_pd *pd,
357				 struct ibv_srq_init_attr *srq_init_attr)
358{
359	struct ibv_srq *srq;
360
361	if (!pd->context->ops.create_srq)
362		return NULL;
363
364	srq = pd->context->ops.create_srq(pd, srq_init_attr);
365	if (srq) {
366		srq->context          = pd->context;
367		srq->srq_context      = srq_init_attr->srq_context;
368		srq->pd               = pd;
369		srq->xrc_domain       = NULL;
370		srq->xrc_cq           = NULL;
371		srq->xrc_srq_num      = 0;
372		srq->events_completed = 0;
373		pthread_mutex_init(&srq->mutex, NULL);
374		pthread_cond_init(&srq->cond, NULL);
375	}
376
377	return srq;
378}
379default_symver(__ibv_create_srq, ibv_create_srq);
380
381struct ibv_srq *ibv_create_xrc_srq(struct ibv_pd *pd,
382				   struct ibv_xrc_domain *xrc_domain,
383				   struct ibv_cq *xrc_cq,
384				   struct ibv_srq_init_attr *srq_init_attr)
385{
386	struct ibv_srq *srq;
387
388	if (!pd->context->more_ops)
389		return NULL;
390
391	srq = pd->context->more_ops->create_xrc_srq(pd, xrc_domain,
392						    xrc_cq, srq_init_attr);
393	if (srq) {
394		srq->context          = pd->context;
395		srq->srq_context      = srq_init_attr->srq_context;
396		srq->pd               = pd;
397		srq->xrc_domain       = xrc_domain;
398		srq->xrc_cq           = xrc_cq;
399		srq->events_completed = 0;
400		pthread_mutex_init(&srq->mutex, NULL);
401		pthread_cond_init(&srq->cond, NULL);
402	}
403
404	return srq;
405}
406
407int __ibv_modify_srq(struct ibv_srq *srq,
408		     struct ibv_srq_attr *srq_attr,
409		     int srq_attr_mask)
410{
411	return srq->context->ops.modify_srq(srq, srq_attr, srq_attr_mask);
412}
413default_symver(__ibv_modify_srq, ibv_modify_srq);
414
415int __ibv_query_srq(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr)
416{
417	return srq->context->ops.query_srq(srq, srq_attr);
418}
419default_symver(__ibv_query_srq, ibv_query_srq);
420
421int __ibv_destroy_srq(struct ibv_srq *srq)
422{
423	return srq->context->ops.destroy_srq(srq);
424}
425default_symver(__ibv_destroy_srq, ibv_destroy_srq);
426
427struct ibv_qp *__ibv_create_qp(struct ibv_pd *pd,
428			       struct ibv_qp_init_attr *qp_init_attr)
429{
430	struct ibv_qp *qp = pd->context->ops.create_qp(pd, qp_init_attr);
431
432	if (qp) {
433		qp->context    	     = pd->context;
434		qp->qp_context 	     = qp_init_attr->qp_context;
435		qp->pd         	     = pd;
436		qp->send_cq    	     = qp_init_attr->send_cq;
437		qp->recv_cq    	     = qp_init_attr->recv_cq;
438		qp->srq        	     = qp_init_attr->srq;
439		qp->qp_type          = qp_init_attr->qp_type;
440		qp->state	     = IBV_QPS_RESET;
441		qp->events_completed = 0;
442		qp->xrc_domain       = qp_init_attr->qp_type == IBV_QPT_XRC ?
443			qp_init_attr->xrc_domain : NULL;
444		pthread_mutex_init(&qp->mutex, NULL);
445		pthread_cond_init(&qp->cond, NULL);
446	}
447
448	return qp;
449}
450default_symver(__ibv_create_qp, ibv_create_qp);
451
452int __ibv_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
453		   int attr_mask,
454		   struct ibv_qp_init_attr *init_attr)
455{
456	int ret;
457
458	ret = qp->context->ops.query_qp(qp, attr, attr_mask, init_attr);
459	if (ret)
460		return ret;
461
462	if (attr_mask & IBV_QP_STATE)
463		qp->state = attr->qp_state;
464
465	return 0;
466}
467default_symver(__ibv_query_qp, ibv_query_qp);
468
469int __ibv_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
470		    int attr_mask)
471{
472	int ret;
473
474	ret = qp->context->ops.modify_qp(qp, attr, attr_mask);
475	if (ret)
476		return ret;
477
478	if (attr_mask & IBV_QP_STATE)
479		qp->state = attr->qp_state;
480
481	return 0;
482}
483default_symver(__ibv_modify_qp, ibv_modify_qp);
484
485int __ibv_destroy_qp(struct ibv_qp *qp)
486{
487	return qp->context->ops.destroy_qp(qp);
488}
489default_symver(__ibv_destroy_qp, ibv_destroy_qp);
490
491struct ibv_ah *__ibv_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr)
492{
493	struct ibv_ah *ah = pd->context->ops.create_ah(pd, attr);
494
495	if (ah) {
496		ah->context = pd->context;
497		ah->pd      = pd;
498	}
499
500	return ah;
501}
502default_symver(__ibv_create_ah, ibv_create_ah);
503
504static int ibv_find_gid_index(struct ibv_context *context, uint8_t port_num,
505			      union ibv_gid *gid)
506{
507	union ibv_gid sgid;
508	int i = 0, ret;
509
510	do {
511		ret = ibv_query_gid(context, port_num, i++, &sgid);
512	} while (!ret && memcmp(&sgid, gid, sizeof *gid));
513
514	return ret ? ret : i - 1;
515}
516
517int ibv_init_ah_from_wc(struct ibv_context *context, uint8_t port_num,
518			struct ibv_wc *wc, struct ibv_grh *grh,
519			struct ibv_ah_attr *ah_attr)
520{
521	uint32_t flow_class;
522	int ret;
523
524	memset(ah_attr, 0, sizeof *ah_attr);
525	ah_attr->dlid = wc->slid;
526	ah_attr->sl = wc->sl;
527	ah_attr->src_path_bits = wc->dlid_path_bits;
528	ah_attr->port_num = port_num;
529
530	if (wc->wc_flags & IBV_WC_GRH) {
531		ah_attr->is_global = 1;
532		ah_attr->grh.dgid = grh->sgid;
533
534		ret = ibv_find_gid_index(context, port_num, &grh->dgid);
535		if (ret < 0)
536			return ret;
537
538		ah_attr->grh.sgid_index = (uint8_t) ret;
539		flow_class = ntohl(grh->version_tclass_flow);
540		ah_attr->grh.flow_label = flow_class & 0xFFFFF;
541		ah_attr->grh.hop_limit = grh->hop_limit;
542		ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
543	}
544	return 0;
545}
546
547struct ibv_ah *ibv_create_ah_from_wc(struct ibv_pd *pd, struct ibv_wc *wc,
548				     struct ibv_grh *grh, uint8_t port_num)
549{
550	struct ibv_ah_attr ah_attr;
551	int ret;
552
553	ret = ibv_init_ah_from_wc(pd->context, port_num, wc, grh, &ah_attr);
554	if (ret)
555		return NULL;
556
557	return ibv_create_ah(pd, &ah_attr);
558}
559
560int __ibv_destroy_ah(struct ibv_ah *ah)
561{
562	return ah->context->ops.destroy_ah(ah);
563}
564default_symver(__ibv_destroy_ah, ibv_destroy_ah);
565
566int __ibv_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid)
567{
568	return qp->context->ops.attach_mcast(qp, gid, lid);
569}
570default_symver(__ibv_attach_mcast, ibv_attach_mcast);
571
572int __ibv_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid)
573{
574	return qp->context->ops.detach_mcast(qp, gid, lid);
575}
576default_symver(__ibv_detach_mcast, ibv_detach_mcast);
577
578struct ibv_xrc_domain *ibv_open_xrc_domain(struct ibv_context *context,
579					   int fd, int oflag)
580{
581	struct ibv_xrc_domain *d;
582
583	if (!context->more_ops)
584		return NULL;
585
586	d = context->more_ops->open_xrc_domain(context, fd, oflag);
587	if (d)
588		d->context = context;
589
590	return d;
591}
592
593int ibv_close_xrc_domain(struct ibv_xrc_domain *d)
594{
595	if (!d->context->more_ops)
596		return 0;
597
598	return d->context->more_ops->close_xrc_domain(d);
599}
600
601int ibv_create_xrc_rcv_qp(struct ibv_qp_init_attr *init_attr,
602			  uint32_t *xrc_rcv_qpn)
603{
604	struct ibv_context *c;
605	if (!init_attr || !(init_attr->xrc_domain))
606		return EINVAL;
607
608	c = init_attr->xrc_domain->context;
609	if (!c->more_ops)
610		return ENOSYS;
611
612	return c->more_ops->create_xrc_rcv_qp(init_attr,
613					      xrc_rcv_qpn);
614}
615
616int ibv_modify_xrc_rcv_qp(struct ibv_xrc_domain *d,
617			  uint32_t xrc_rcv_qpn,
618			  struct ibv_qp_attr *attr,
619			  int attr_mask)
620{
621	if (!d || !attr)
622		return EINVAL;
623
624	if (!d->context->more_ops)
625		return ENOSYS;
626
627	return d->context->more_ops->modify_xrc_rcv_qp(d, xrc_rcv_qpn, attr,
628						       attr_mask);
629}
630
631int ibv_query_xrc_rcv_qp(struct ibv_xrc_domain *d,
632			 uint32_t xrc_rcv_qpn,
633			 struct ibv_qp_attr *attr,
634			 int attr_mask,
635			 struct ibv_qp_init_attr *init_attr)
636{
637	if (!d)
638		return EINVAL;
639
640	if (!d->context->more_ops)
641		return ENOSYS;
642
643	return d->context->more_ops->query_xrc_rcv_qp(d, xrc_rcv_qpn, attr,
644						      attr_mask, init_attr);
645}
646
647int ibv_reg_xrc_rcv_qp(struct ibv_xrc_domain *d,
648		       uint32_t xrc_rcv_qpn)
649{
650	return d->context->more_ops->reg_xrc_rcv_qp(d, xrc_rcv_qpn);
651}
652
653int ibv_unreg_xrc_rcv_qp(struct ibv_xrc_domain *d,
654			 uint32_t xrc_rcv_qpn)
655{
656	return d->context->more_ops->unreg_xrc_rcv_qp(d, xrc_rcv_qpn);
657}
658
659
660static uint16_t get_vlan_id(const union ibv_gid *dgid)
661{
662	return dgid->raw[11] << 8 | dgid->raw[12];
663}
664
665static void get_ll_mac(const union ibv_gid *gid, uint8_t *mac)
666{
667	memcpy(mac, &gid->raw[8], 3);
668	memcpy(mac + 3, &gid->raw[13], 3);
669	mac[0] ^= 2;
670}
671
672static int is_multicast_gid(const union ibv_gid *gid)
673{
674	return gid->raw[0] == 0xff;
675}
676
677static void get_mcast_mac(const union ibv_gid *gid, uint8_t *mac)
678{
679	int i;
680
681	mac[0] = 0x33;
682	mac[1] = 0x33;
683	for (i = 2; i < 6; ++i)
684		mac[i] = gid->raw[i + 10];
685}
686
687static int is_link_local_gid(const union ibv_gid *gid)
688{
689	uint32_t hi = *(uint32_t *)(gid->raw);
690	uint32_t lo = *(uint32_t *)(gid->raw + 4);
691	if (hi == htonl(0xfe800000) && lo == 0)
692		return 1;
693
694	return 0;
695}
696
697static int resolve_gid(const union ibv_gid *dgid, uint8_t *mac, uint8_t *is_mcast)
698{
699	if (is_link_local_gid(dgid)) {
700		get_ll_mac(dgid, mac);
701		*is_mcast = 0;
702	} else if (is_multicast_gid(dgid)) {
703		get_mcast_mac(dgid, mac);
704		*is_mcast = 1;
705	} else
706		return -EINVAL;
707
708	return 0;
709}
710
711static int is_tagged_vlan(const union ibv_gid *gid)
712{
713	uint16_t tag;
714
715	tag = gid->raw[11] << 8 |  gid->raw[12];
716
717	return tag < 0x1000;
718}
719
720int __ibv_resolve_eth_gid(const struct ibv_pd *pd, uint8_t port_num,
721			  union ibv_gid *dgid, uint8_t sgid_index,
722			  uint8_t mac[], uint16_t *vlan, uint8_t *tagged,
723			  uint8_t *is_mcast)
724{
725	int err;
726	union ibv_gid sgid;
727	int stagged, svlan;
728
729	err = resolve_gid(dgid, mac, is_mcast);
730	if (err)
731		return err;
732
733	err = ibv_query_gid(pd->context, port_num, sgid_index, &sgid);
734	if (err)
735		return err;
736
737	stagged = is_tagged_vlan(&sgid);
738	if (stagged) {
739		if (!is_tagged_vlan(dgid) && !is_mcast)
740			return -1;
741
742		svlan = get_vlan_id(&sgid);
743		if (svlan != get_vlan_id(dgid) && !is_mcast)
744			return -1;
745
746		*tagged = 1;
747		*vlan = svlan;
748	} else
749		*tagged = 0;
750
751	return 0;
752}
753default_symver(__ibv_resolve_eth_gid, ibv_resolve_eth_gid);
754
755