1/* 2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include <rdma/ib_mad.h> 34#include <rdma/ib_smi.h> 35#include <rdma/ib_cache.h> 36#include <rdma/ib_sa.h> 37 38#include <linux/mlx4/cmd.h> 39#include <linux/delay.h> 40 41#include "mlx4_ib.h" 42 43#define MAX_VFS 80 44#define MAX_PEND_REQS_PER_FUNC 4 45#define MAD_TIMEOUT_MS 2000 46 47#define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg) 48#define mcg_error(fmt, arg...) pr_err(fmt, ##arg) 49#define mcg_warn_group(group, format, arg...) \ 50 pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\ 51 (group)->name, group->demux->port, ## arg) 52 53#define mcg_error_group(group, format, arg...) \ 54 pr_err(" %16s: " format, (group)->name, ## arg) 55 56static union ib_gid mgid0; 57 58static struct workqueue_struct *clean_wq; 59 60enum mcast_state { 61 MCAST_NOT_MEMBER = 0, 62 MCAST_MEMBER, 63}; 64 65enum mcast_group_state { 66 MCAST_IDLE, 67 MCAST_JOIN_SENT, 68 MCAST_LEAVE_SENT, 69 MCAST_RESP_READY 70}; 71 72struct mcast_member { 73 enum mcast_state state; 74 uint8_t join_state; 75 int num_pend_reqs; 76 struct list_head pending; 77}; 78 79struct ib_sa_mcmember_data { 80 union ib_gid mgid; 81 union ib_gid port_gid; 82 __be32 qkey; 83 __be16 mlid; 84 u8 mtusel_mtu; 85 u8 tclass; 86 __be16 pkey; 87 u8 ratesel_rate; 88 u8 lifetmsel_lifetm; 89 __be32 sl_flowlabel_hoplimit; 90 u8 scope_join_state; 91 u8 proxy_join; 92 u8 reserved[2]; 93}; 94 95struct mcast_group { 96 struct ib_sa_mcmember_data rec; 97 struct rb_node node; 98 struct list_head mgid0_list; 99 struct mlx4_ib_demux_ctx *demux; 100 struct mcast_member func[MAX_VFS]; 101 struct mutex lock; 102 struct work_struct work; 103 struct list_head pending_list; 104 int members[3]; 105 enum mcast_group_state state; 106 enum mcast_group_state prev_state; 107 struct ib_sa_mad response_sa_mad; 108 __be64 last_req_tid; 109 110 char name[33]; /* MGID string */ 111 struct device_attribute dentry; 112 113 /* refcount is the reference count for the following: 114 1. Each queued request 115 2. Each invocation of the worker thread 116 3. Membership of the port at the SA 117 */ 118 atomic_t refcount; 119 120 /* delayed work to clean pending SM request */ 121 struct delayed_work timeout_work; 122 struct list_head cleanup_list; 123}; 124 125struct mcast_req { 126 int func; 127 struct ib_sa_mad sa_mad; 128 struct list_head group_list; 129 struct list_head func_list; 130 struct mcast_group *group; 131 int clean; 132}; 133 134 135#define safe_atomic_dec(ref) \ 136 do {\ 137 if (atomic_dec_and_test(ref)) \ 138 mcg_warn_group(group, "did not expect to reach zero\n"); \ 139 } while (0) 140 141static const char *get_state_string(enum mcast_group_state state) 142{ 143 switch (state) { 144 case MCAST_IDLE: 145 return "MCAST_IDLE"; 146 case MCAST_JOIN_SENT: 147 return "MCAST_JOIN_SENT"; 148 case MCAST_LEAVE_SENT: 149 return "MCAST_LEAVE_SENT"; 150 case MCAST_RESP_READY: 151 return "MCAST_RESP_READY"; 152 } 153 return "Invalid State"; 154} 155 156static struct mcast_group *mcast_find(struct mlx4_ib_demux_ctx *ctx, 157 union ib_gid *mgid) 158{ 159 struct rb_node *node = ctx->mcg_table.rb_node; 160 struct mcast_group *group; 161 int ret; 162 163 while (node) { 164 group = rb_entry(node, struct mcast_group, node); 165 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); 166 if (!ret) 167 return group; 168 169 if (ret < 0) 170 node = node->rb_left; 171 else 172 node = node->rb_right; 173 } 174 return NULL; 175} 176 177static struct mcast_group *mcast_insert(struct mlx4_ib_demux_ctx *ctx, 178 struct mcast_group *group) 179{ 180 struct rb_node **link = &ctx->mcg_table.rb_node; 181 struct rb_node *parent = NULL; 182 struct mcast_group *cur_group; 183 int ret; 184 185 while (*link) { 186 parent = *link; 187 cur_group = rb_entry(parent, struct mcast_group, node); 188 189 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, 190 sizeof group->rec.mgid); 191 if (ret < 0) 192 link = &(*link)->rb_left; 193 else if (ret > 0) 194 link = &(*link)->rb_right; 195 else 196 return cur_group; 197 } 198 rb_link_node(&group->node, parent, link); 199 rb_insert_color(&group->node, &ctx->mcg_table); 200 return NULL; 201} 202 203static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad) 204{ 205 struct mlx4_ib_dev *dev = ctx->dev; 206 struct ib_ah_attr ah_attr; 207 208 spin_lock(&dev->sm_lock); 209 if (!dev->sm_ah[ctx->port - 1]) { 210 /* port is not yet Active, sm_ah not ready */ 211 spin_unlock(&dev->sm_lock); 212 return -EAGAIN; 213 } 214 mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr); 215 spin_unlock(&dev->sm_lock); 216 return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), ctx->port, 217 IB_QPT_GSI, 0, 1, IB_QP1_QKEY, &ah_attr, mad); 218} 219 220static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx, 221 struct ib_mad *mad) 222{ 223 struct mlx4_ib_dev *dev = ctx->dev; 224 struct ib_mad_agent *agent = dev->send_agent[ctx->port - 1][1]; 225 struct ib_wc wc; 226 struct ib_ah_attr ah_attr; 227 228 /* Our agent might not yet be registered when mads start to arrive */ 229 if (!agent) 230 return -EAGAIN; 231 232 ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr); 233 234 if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index)) 235 return -EINVAL; 236 wc.sl = 0; 237 wc.dlid_path_bits = 0; 238 wc.port_num = ctx->port; 239 wc.slid = ah_attr.dlid; /* opensm lid */ 240 wc.src_qp = 1; 241 return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad); 242} 243 244static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad) 245{ 246 struct ib_sa_mad mad; 247 struct ib_sa_mcmember_data *sa_mad_data = (struct ib_sa_mcmember_data *)&mad.data; 248 int ret; 249 250 /* we rely on a mad request as arrived from a VF */ 251 memcpy(&mad, sa_mad, sizeof mad); 252 253 /* fix port GID to be the real one (slave 0) */ 254 sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0]; 255 256 /* assign our own TID */ 257 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); 258 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ 259 260 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); 261 /* set timeout handler */ 262 if (!ret) { 263 /* calls mlx4_ib_mcg_timeout_handler */ 264 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, 265 msecs_to_jiffies(MAD_TIMEOUT_MS)); 266 } 267 268 return ret; 269} 270 271static int send_leave_to_wire(struct mcast_group *group, u8 join_state) 272{ 273 struct ib_sa_mad mad; 274 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data; 275 int ret; 276 277 memset(&mad, 0, sizeof mad); 278 mad.mad_hdr.base_version = 1; 279 mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; 280 mad.mad_hdr.class_version = 2; 281 mad.mad_hdr.method = IB_SA_METHOD_DELETE; 282 mad.mad_hdr.status = cpu_to_be16(0); 283 mad.mad_hdr.class_specific = cpu_to_be16(0); 284 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); 285 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ 286 mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 287 mad.mad_hdr.attr_mod = cpu_to_be32(0); 288 mad.sa_hdr.sm_key = 0x0; 289 mad.sa_hdr.attr_offset = cpu_to_be16(7); 290 mad.sa_hdr.comp_mask = IB_SA_MCMEMBER_REC_MGID | 291 IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_JOIN_STATE; 292 293 *sa_data = group->rec; 294 sa_data->scope_join_state = join_state; 295 296 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); 297 if (ret) 298 group->state = MCAST_IDLE; 299 300 /* set timeout handler */ 301 if (!ret) { 302 /* calls mlx4_ib_mcg_timeout_handler */ 303 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, 304 msecs_to_jiffies(MAD_TIMEOUT_MS)); 305 } 306 307 return ret; 308} 309 310static int send_reply_to_slave(int slave, struct mcast_group *group, 311 struct ib_sa_mad *req_sa_mad, u16 status) 312{ 313 struct ib_sa_mad mad; 314 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data; 315 struct ib_sa_mcmember_data *req_sa_data = (struct ib_sa_mcmember_data *)&req_sa_mad->data; 316 int ret; 317 318 memset(&mad, 0, sizeof mad); 319 mad.mad_hdr.base_version = 1; 320 mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; 321 mad.mad_hdr.class_version = 2; 322 mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; 323 mad.mad_hdr.status = cpu_to_be16(status); 324 mad.mad_hdr.class_specific = cpu_to_be16(0); 325 mad.mad_hdr.tid = req_sa_mad->mad_hdr.tid; 326 *(u8 *)&mad.mad_hdr.tid = 0; /* resetting tid to 0 */ 327 mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 328 mad.mad_hdr.attr_mod = cpu_to_be32(0); 329 mad.sa_hdr.sm_key = req_sa_mad->sa_hdr.sm_key; 330 mad.sa_hdr.attr_offset = cpu_to_be16(7); 331 mad.sa_hdr.comp_mask = 0; /* ignored on responses, see IBTA spec */ 332 333 *sa_data = group->rec; 334 335 /* reconstruct VF's requested join_state and port_gid */ 336 sa_data->scope_join_state &= 0xf0; 337 sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f); 338 memcpy(&sa_data->port_gid, &req_sa_data->port_gid, sizeof req_sa_data->port_gid); 339 340 ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad); 341 return ret; 342} 343 344static int check_selector(ib_sa_comp_mask comp_mask, 345 ib_sa_comp_mask selector_mask, 346 ib_sa_comp_mask value_mask, 347 u8 src_value, u8 dst_value) 348{ 349 int err; 350 u8 selector = dst_value >> 6; 351 dst_value &= 0x3f; 352 src_value &= 0x3f; 353 354 if (!(comp_mask & selector_mask) || !(comp_mask & value_mask)) 355 return 0; 356 357 switch (selector) { 358 case IB_SA_GT: 359 err = (src_value <= dst_value); 360 break; 361 case IB_SA_LT: 362 err = (src_value >= dst_value); 363 break; 364 case IB_SA_EQ: 365 err = (src_value != dst_value); 366 break; 367 default: 368 err = 0; 369 break; 370 } 371 372 return err; 373} 374 375static u16 cmp_rec(struct ib_sa_mcmember_data *src, 376 struct ib_sa_mcmember_data *dst, ib_sa_comp_mask comp_mask) 377{ 378 /* src is group record, dst is request record */ 379 /* MGID must already match */ 380 /* Port_GID we always replace to our Port_GID, so it is a match */ 381 382#define MAD_STATUS_REQ_INVALID 0x0200 383 if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey) 384 return MAD_STATUS_REQ_INVALID; 385 if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid) 386 return MAD_STATUS_REQ_INVALID; 387 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR, 388 IB_SA_MCMEMBER_REC_MTU, 389 src->mtusel_mtu, dst->mtusel_mtu)) 390 return MAD_STATUS_REQ_INVALID; 391 if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS && 392 src->tclass != dst->tclass) 393 return MAD_STATUS_REQ_INVALID; 394 if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey) 395 return MAD_STATUS_REQ_INVALID; 396 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR, 397 IB_SA_MCMEMBER_REC_RATE, 398 src->ratesel_rate, dst->ratesel_rate)) 399 return MAD_STATUS_REQ_INVALID; 400 if (check_selector(comp_mask, 401 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR, 402 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME, 403 src->lifetmsel_lifetm, dst->lifetmsel_lifetm)) 404 return MAD_STATUS_REQ_INVALID; 405 if (comp_mask & IB_SA_MCMEMBER_REC_SL && 406 (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0xf0000000) != 407 (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0xf0000000)) 408 return MAD_STATUS_REQ_INVALID; 409 if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL && 410 (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x0fffff00) != 411 (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x0fffff00)) 412 return MAD_STATUS_REQ_INVALID; 413 if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT && 414 (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x000000ff) != 415 (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x000000ff)) 416 return MAD_STATUS_REQ_INVALID; 417 if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && 418 (src->scope_join_state & 0xf0) != 419 (dst->scope_join_state & 0xf0)) 420 return MAD_STATUS_REQ_INVALID; 421 422 /* join_state checked separately, proxy_join ignored */ 423 424 return 0; 425} 426 427/* release group, return 1 if this was last release and group is destroyed 428 * timout work is canceled sync */ 429static int release_group(struct mcast_group *group, int from_timeout_handler) 430{ 431 struct mlx4_ib_demux_ctx *ctx = group->demux; 432 int nzgroup; 433 434 mutex_lock(&ctx->mcg_table_lock); 435 mutex_lock(&group->lock); 436 if (atomic_dec_and_test(&group->refcount)) { 437 if (!from_timeout_handler) { 438 if (group->state != MCAST_IDLE && 439 !cancel_delayed_work(&group->timeout_work)) { 440 atomic_inc(&group->refcount); 441 mutex_unlock(&group->lock); 442 mutex_unlock(&ctx->mcg_table_lock); 443 return 0; 444 } 445 } 446 447 nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0); 448 if (nzgroup) 449 del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); 450 if (!list_empty(&group->pending_list)) 451 mcg_warn_group(group, "releasing a group with non empty pending list\n"); 452 if (nzgroup) 453 rb_erase(&group->node, &ctx->mcg_table); 454 list_del_init(&group->mgid0_list); 455 mutex_unlock(&group->lock); 456 mutex_unlock(&ctx->mcg_table_lock); 457 kfree(group); 458 return 1; 459 } else { 460 mutex_unlock(&group->lock); 461 mutex_unlock(&ctx->mcg_table_lock); 462 } 463 return 0; 464} 465 466static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) 467{ 468 int i; 469 470 for (i = 0; i < 3; i++, join_state >>= 1) 471 if (join_state & 0x1) 472 group->members[i] += inc; 473} 474 475static u8 get_leave_state(struct mcast_group *group) 476{ 477 u8 leave_state = 0; 478 int i; 479 480 for (i = 0; i < 3; i++) 481 if (!group->members[i]) 482 leave_state |= (1 << i); 483 484 return leave_state & (group->rec.scope_join_state & 7); 485} 486 487static int join_group(struct mcast_group *group, int slave, u8 join_mask) 488{ 489 int ret = 0; 490 u8 join_state; 491 492 /* remove bits that slave is already member of, and adjust */ 493 join_state = join_mask & (~group->func[slave].join_state); 494 adjust_membership(group, join_state, 1); 495 group->func[slave].join_state |= join_state; 496 if (group->func[slave].state != MCAST_MEMBER && join_state) { 497 group->func[slave].state = MCAST_MEMBER; 498 ret = 1; 499 } 500 return ret; 501} 502 503static int leave_group(struct mcast_group *group, int slave, u8 leave_state) 504{ 505 int ret = 0; 506 507 adjust_membership(group, leave_state, -1); 508 group->func[slave].join_state &= ~leave_state; 509 if (!group->func[slave].join_state) { 510 group->func[slave].state = MCAST_NOT_MEMBER; 511 ret = 1; 512 } 513 return ret; 514} 515 516static int check_leave(struct mcast_group *group, int slave, u8 leave_mask) 517{ 518 if (group->func[slave].state != MCAST_MEMBER) 519 return MAD_STATUS_REQ_INVALID; 520 521 /* make sure we're not deleting unset bits */ 522 if (~group->func[slave].join_state & leave_mask) 523 return MAD_STATUS_REQ_INVALID; 524 525 if (!leave_mask) 526 return MAD_STATUS_REQ_INVALID; 527 528 return 0; 529} 530 531static void mlx4_ib_mcg_timeout_handler(struct work_struct *work) 532{ 533 struct delayed_work *delay = to_delayed_work(work); 534 struct mcast_group *group; 535 struct mcast_req *req = NULL; 536 537 group = container_of(delay, typeof(*group), timeout_work); 538 539 mutex_lock(&group->lock); 540 if (group->state == MCAST_JOIN_SENT) { 541 if (!list_empty(&group->pending_list)) { 542 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); 543 list_del(&req->group_list); 544 list_del(&req->func_list); 545 --group->func[req->func].num_pend_reqs; 546 mutex_unlock(&group->lock); 547 kfree(req); 548 if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) { 549 if (release_group(group, 1)) 550 return; 551 } else { 552 kfree(group); 553 return; 554 } 555 mutex_lock(&group->lock); 556 } else 557 mcg_warn_group(group, "DRIVER BUG\n"); 558 } else if (group->state == MCAST_LEAVE_SENT) { 559 if (group->rec.scope_join_state & 7) 560 group->rec.scope_join_state &= 0xf8; 561 group->state = MCAST_IDLE; 562 mutex_unlock(&group->lock); 563 if (release_group(group, 1)) 564 return; 565 mutex_lock(&group->lock); 566 } else 567 mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state)); 568 group->state = MCAST_IDLE; 569 atomic_inc(&group->refcount); 570 queue_work(group->demux->mcg_wq, &group->work); 571 safe_atomic_dec(&group->refcount); 572 573 mutex_unlock(&group->lock); 574} 575 576static int handle_leave_req(struct mcast_group *group, u8 leave_mask, 577 struct mcast_req *req) 578{ 579 u16 status; 580 581 if (req->clean) 582 leave_mask = group->func[req->func].join_state; 583 584 status = check_leave(group, req->func, leave_mask); 585 if (!status) 586 leave_group(group, req->func, leave_mask); 587 588 if (!req->clean) 589 send_reply_to_slave(req->func, group, &req->sa_mad, status); 590 --group->func[req->func].num_pend_reqs; 591 list_del(&req->group_list); 592 list_del(&req->func_list); 593 kfree(req); 594 return 1; 595} 596 597static int handle_join_req(struct mcast_group *group, u8 join_mask, 598 struct mcast_req *req) 599{ 600 u8 group_join_state = group->rec.scope_join_state & 7; 601 int ref = 0; 602 u16 status; 603 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; 604 605 if (join_mask == (group_join_state & join_mask)) { 606 /* port's membership need not change */ 607 status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask); 608 if (!status) 609 join_group(group, req->func, join_mask); 610 611 --group->func[req->func].num_pend_reqs; 612 send_reply_to_slave(req->func, group, &req->sa_mad, status); 613 list_del(&req->group_list); 614 list_del(&req->func_list); 615 kfree(req); 616 ++ref; 617 } else { 618 /* port's membership needs to be updated */ 619 group->prev_state = group->state; 620 if (send_join_to_wire(group, &req->sa_mad)) { 621 --group->func[req->func].num_pend_reqs; 622 list_del(&req->group_list); 623 list_del(&req->func_list); 624 kfree(req); 625 ref = 1; 626 group->state = group->prev_state; 627 } else 628 group->state = MCAST_JOIN_SENT; 629 } 630 631 return ref; 632} 633 634static void mlx4_ib_mcg_work_handler(struct work_struct *work) 635{ 636 struct mcast_group *group; 637 struct mcast_req *req = NULL; 638 struct ib_sa_mcmember_data *sa_data; 639 u8 req_join_state; 640 int rc = 1; /* release_count - this is for the scheduled work */ 641 u16 status; 642 u8 method; 643 644 group = container_of(work, typeof(*group), work); 645 646 mutex_lock(&group->lock); 647 648 /* First, let's see if a response from SM is waiting regarding this group. 649 * If so, we need to update the group's REC. If this is a bad response, we 650 * may need to send a bad response to a VF waiting for it. If VF is waiting 651 * and this is a good response, the VF will be answered later in this func. */ 652 if (group->state == MCAST_RESP_READY) { 653 /* cancels mlx4_ib_mcg_timeout_handler */ 654 cancel_delayed_work(&group->timeout_work); 655 status = be16_to_cpu(group->response_sa_mad.mad_hdr.status); 656 method = group->response_sa_mad.mad_hdr.method; 657 if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) { 658 mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n", 659 (long long unsigned int)be64_to_cpu(group->response_sa_mad.mad_hdr.tid), 660 (long long unsigned int)be64_to_cpu(group->last_req_tid)); 661 group->state = group->prev_state; 662 goto process_requests; 663 } 664 if (status) { 665 if (!list_empty(&group->pending_list)) 666 req = list_first_entry(&group->pending_list, 667 struct mcast_req, group_list); 668 if (method == IB_MGMT_METHOD_GET_RESP) { 669 if (req) { 670 send_reply_to_slave(req->func, group, &req->sa_mad, status); 671 --group->func[req->func].num_pend_reqs; 672 list_del(&req->group_list); 673 list_del(&req->func_list); 674 kfree(req); 675 ++rc; 676 } else 677 mcg_warn_group(group, "no request for failed join\n"); 678 } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing) 679 ++rc; 680 } else { 681 u8 resp_join_state; 682 u8 cur_join_state; 683 684 resp_join_state = ((struct ib_sa_mcmember_data *) 685 group->response_sa_mad.data)->scope_join_state & 7; 686 cur_join_state = group->rec.scope_join_state & 7; 687 688 if (method == IB_MGMT_METHOD_GET_RESP) { 689 /* successfull join */ 690 if (!cur_join_state && resp_join_state) 691 --rc; 692 } else if (!resp_join_state) 693 ++rc; 694 memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec); 695 } 696 group->state = MCAST_IDLE; 697 } 698 699process_requests: 700 /* We should now go over pending join/leave requests, as long as we are idle. */ 701 while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) { 702 req = list_first_entry(&group->pending_list, struct mcast_req, 703 group_list); 704 sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; 705 req_join_state = sa_data->scope_join_state & 0x7; 706 707 /* For a leave request, we will immediately answer the VF, and 708 * update our internal counters. The actual leave will be sent 709 * to SM later, if at all needed. We dequeue the request now. */ 710 if (req->sa_mad.mad_hdr.method == IB_SA_METHOD_DELETE) 711 rc += handle_leave_req(group, req_join_state, req); 712 else 713 rc += handle_join_req(group, req_join_state, req); 714 } 715 716 /* Handle leaves */ 717 if (group->state == MCAST_IDLE) { 718 req_join_state = get_leave_state(group); 719 if (req_join_state) { 720 group->rec.scope_join_state &= ~req_join_state; 721 group->prev_state = group->state; 722 if (send_leave_to_wire(group, req_join_state)) { 723 group->state = group->prev_state; 724 ++rc; 725 } else 726 group->state = MCAST_LEAVE_SENT; 727 } 728 } 729 730 if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) 731 goto process_requests; 732 mutex_unlock(&group->lock); 733 734 while (rc--) 735 release_group(group, 0); 736} 737 738static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx *ctx, 739 __be64 tid, 740 union ib_gid *new_mgid) 741{ 742 struct mcast_group *group = NULL, *cur_group; 743 struct mcast_req *req; 744 struct list_head *pos; 745 struct list_head *n; 746 747 mutex_lock(&ctx->mcg_table_lock); 748 list_for_each_safe(pos, n, &ctx->mcg_mgid0_list) { 749 group = list_entry(pos, struct mcast_group, mgid0_list); 750 mutex_lock(&group->lock); 751 if (group->last_req_tid == tid) { 752 if (memcmp(new_mgid, &mgid0, sizeof mgid0)) { 753 group->rec.mgid = *new_mgid; 754 sprintf(group->name, "%016llx%016llx", 755 (long long unsigned int)be64_to_cpu(group->rec.mgid.global.subnet_prefix), 756 (long long unsigned int)be64_to_cpu(group->rec.mgid.global.interface_id)); 757 list_del_init(&group->mgid0_list); 758 cur_group = mcast_insert(ctx, group); 759 if (cur_group) { 760 /* A race between our code and SM. Silently cleaning the new one */ 761 req = list_first_entry(&group->pending_list, 762 struct mcast_req, group_list); 763 --group->func[req->func].num_pend_reqs; 764 list_del(&req->group_list); 765 list_del(&req->func_list); 766 kfree(req); 767 mutex_unlock(&group->lock); 768 mutex_unlock(&ctx->mcg_table_lock); 769 release_group(group, 0); 770 return NULL; 771 } 772 773 atomic_inc(&group->refcount); 774 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); 775 mutex_unlock(&group->lock); 776 mutex_unlock(&ctx->mcg_table_lock); 777 return group; 778 } else { 779 struct mcast_req *tmp1, *tmp2; 780 781 list_del(&group->mgid0_list); 782 if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE) 783 cancel_delayed_work_sync(&group->timeout_work); 784 785 list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) { 786 list_del(&tmp1->group_list); 787 kfree(tmp1); 788 } 789 mutex_unlock(&group->lock); 790 mutex_unlock(&ctx->mcg_table_lock); 791 kfree(group); 792 return NULL; 793 } 794 } 795 mutex_unlock(&group->lock); 796 } 797 mutex_unlock(&ctx->mcg_table_lock); 798 799 return NULL; 800} 801 802static ssize_t sysfs_show_group(struct device *dev, 803 struct device_attribute *attr, char *buf); 804 805static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx, 806 union ib_gid *mgid, int create, 807 gfp_t gfp_mask) 808{ 809 struct mcast_group *group, *cur_group; 810 int is_mgid0; 811 int i; 812 813 is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0); 814 if (!is_mgid0) { 815 group = mcast_find(ctx, mgid); 816 if (group) 817 goto found; 818 } 819 820 if (!create) 821 return ERR_PTR(-ENOENT); 822 823 group = kzalloc(sizeof *group, gfp_mask); 824 if (!group) 825 return ERR_PTR(-ENOMEM); 826 827 group->demux = ctx; 828 group->rec.mgid = *mgid; 829 INIT_LIST_HEAD(&group->pending_list); 830 INIT_LIST_HEAD(&group->mgid0_list); 831 for (i = 0; i < MAX_VFS; ++i) 832 INIT_LIST_HEAD(&group->func[i].pending); 833 INIT_WORK(&group->work, mlx4_ib_mcg_work_handler); 834 INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler); 835 mutex_init(&group->lock); 836 sprintf(group->name, "%016llx%016llx", 837 (long long unsigned int)be64_to_cpu(group->rec.mgid.global.subnet_prefix), 838 (long long unsigned int)be64_to_cpu(group->rec.mgid.global.interface_id)); 839 sysfs_attr_init(&group->dentry.attr); 840 group->dentry.show = sysfs_show_group; 841 group->dentry.store = NULL; 842 group->dentry.attr.name = group->name; 843 group->dentry.attr.mode = 0400; 844 group->state = MCAST_IDLE; 845 846 if (is_mgid0) { 847 list_add(&group->mgid0_list, &ctx->mcg_mgid0_list); 848 goto found; 849 } 850 851 cur_group = mcast_insert(ctx, group); 852 if (cur_group) { 853 mcg_warn("group just showed up %s - confused\n", cur_group->name); 854 kfree(group); 855 return ERR_PTR(-EINVAL); 856 } 857 858 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); 859 860found: 861 atomic_inc(&group->refcount); 862 return group; 863} 864 865static void queue_req(struct mcast_req *req) 866{ 867 struct mcast_group *group = req->group; 868 869 atomic_inc(&group->refcount); /* for the request */ 870 atomic_inc(&group->refcount); /* for scheduling the work */ 871 list_add_tail(&req->group_list, &group->pending_list); 872 list_add_tail(&req->func_list, &group->func[req->func].pending); 873 /* calls mlx4_ib_mcg_work_handler */ 874 queue_work(group->demux->mcg_wq, &group->work); 875 safe_atomic_dec(&group->refcount); 876} 877 878int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave, 879 struct ib_sa_mad *mad) 880{ 881 struct mlx4_ib_dev *dev = to_mdev(ibdev); 882 struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)mad->data; 883 struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1]; 884 struct mcast_group *group; 885 886 switch (mad->mad_hdr.method) { 887 case IB_MGMT_METHOD_GET_RESP: 888 case IB_SA_METHOD_DELETE_RESP: 889 mutex_lock(&ctx->mcg_table_lock); 890 group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL); 891 mutex_unlock(&ctx->mcg_table_lock); 892 if (IS_ERR(group)) { 893 if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) { 894 __be64 tid = mad->mad_hdr.tid; 895 *(u8 *)(&tid) = (u8)slave; /* in group we kept the modified TID */ 896 group = search_relocate_mgid0_group(ctx, tid, &rec->mgid); 897 } else 898 group = NULL; 899 } 900 901 if (!group) 902 return 1; 903 904 mutex_lock(&group->lock); 905 group->response_sa_mad = *mad; 906 group->prev_state = group->state; 907 group->state = MCAST_RESP_READY; 908 /* calls mlx4_ib_mcg_work_handler */ 909 atomic_inc(&group->refcount); 910 queue_work(ctx->mcg_wq, &group->work); 911 safe_atomic_dec(&group->refcount); 912 mutex_unlock(&group->lock); 913 release_group(group, 0); 914 return 1; /* consumed */ 915 case IB_MGMT_METHOD_SET: 916 case IB_SA_METHOD_GET_TABLE: 917 case IB_SA_METHOD_GET_TABLE_RESP: 918 case IB_SA_METHOD_DELETE: 919 return 0; /* not consumed, pass-through to guest over tunnel */ 920 default: 921 mcg_warn("In demux, port %d: unexpected MCMember method: 0x%x, dropping\n", 922 port, mad->mad_hdr.method); 923 return 1; /* consumed */ 924 } 925} 926 927int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, 928 int slave, struct ib_sa_mad *sa_mad) 929{ 930 struct mlx4_ib_dev *dev = to_mdev(ibdev); 931 struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)sa_mad->data; 932 struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1]; 933 struct mcast_group *group; 934 struct mcast_req *req; 935 int may_create = 0; 936 937 if (ctx->flushing) 938 return -EAGAIN; 939 940 switch (sa_mad->mad_hdr.method) { 941 case IB_MGMT_METHOD_SET: 942 may_create = 1; 943 case IB_SA_METHOD_DELETE: 944 req = kzalloc(sizeof *req, GFP_KERNEL); 945 if (!req) 946 return -ENOMEM; 947 948 req->func = slave; 949 req->sa_mad = *sa_mad; 950 951 mutex_lock(&ctx->mcg_table_lock); 952 group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL); 953 mutex_unlock(&ctx->mcg_table_lock); 954 if (IS_ERR(group)) { 955 kfree(req); 956 return PTR_ERR(group); 957 } 958 mutex_lock(&group->lock); 959 if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) { 960 mutex_unlock(&group->lock); 961 mcg_warn_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n", 962 port, slave, MAX_PEND_REQS_PER_FUNC); 963 release_group(group, 0); 964 kfree(req); 965 return -ENOMEM; 966 } 967 ++group->func[slave].num_pend_reqs; 968 req->group = group; 969 queue_req(req); 970 mutex_unlock(&group->lock); 971 release_group(group, 0); 972 return 1; /* consumed */ 973 case IB_SA_METHOD_GET_TABLE: 974 case IB_MGMT_METHOD_GET_RESP: 975 case IB_SA_METHOD_GET_TABLE_RESP: 976 case IB_SA_METHOD_DELETE_RESP: 977 return 0; /* not consumed, pass-through */ 978 default: 979 mcg_warn("In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n", 980 port, slave, sa_mad->mad_hdr.method); 981 return 1; /* consumed */ 982 } 983} 984 985static ssize_t sysfs_show_group(struct device *dev, 986 struct device_attribute *attr, char *buf) 987{ 988 struct mcast_group *group = 989 container_of(attr, struct mcast_group, dentry); 990 struct mcast_req *req = NULL; 991 char pending_str[40]; 992 char state_str[40]; 993 ssize_t len = 0; 994 int f; 995 996 if (group->state == MCAST_IDLE) 997 sprintf(state_str, "%s", get_state_string(group->state)); 998 else 999 sprintf(state_str, "%s(TID=0x%llx)", 1000 get_state_string(group->state), 1001 (long long unsigned int)be64_to_cpu(group->last_req_tid)); 1002 if (list_empty(&group->pending_list)) { 1003 sprintf(pending_str, "No"); 1004 } else { 1005 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); 1006 sprintf(pending_str, "Yes(TID=0x%llx)", 1007 (long long unsigned int)be64_to_cpu(req->sa_mad.mad_hdr.tid)); 1008 } 1009 len += sprintf(buf + len, "%1d [%02d,%02d,%02d] %4d %4s %5s ", 1010 group->rec.scope_join_state & 0xf, 1011 group->members[2], group->members[1], group->members[0], 1012 atomic_read(&group->refcount), 1013 pending_str, 1014 state_str); 1015 for (f = 0; f < MAX_VFS; ++f) 1016 if (group->func[f].state == MCAST_MEMBER) 1017 len += sprintf(buf + len, "%d[%1x] ", 1018 f, group->func[f].join_state); 1019 1020 len += sprintf(buf + len, "\t\t(%4hx %4x %2x %2x %2x %2x %2x " 1021 "%4x %4x %2x %2x)\n", 1022 be16_to_cpu(group->rec.pkey), 1023 be32_to_cpu(group->rec.qkey), 1024 (group->rec.mtusel_mtu & 0xc0) >> 6, 1025 group->rec.mtusel_mtu & 0x3f, 1026 group->rec.tclass, 1027 (group->rec.ratesel_rate & 0xc0) >> 6, 1028 group->rec.ratesel_rate & 0x3f, 1029 (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0xf0000000) >> 28, 1030 (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x0fffff00) >> 8, 1031 be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x000000ff, 1032 group->rec.proxy_join); 1033 1034 return len; 1035} 1036 1037int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx) 1038{ 1039 char name[20]; 1040 1041 atomic_set(&ctx->tid, 0); 1042 sprintf(name, "mlx4_ib_mcg%d", ctx->port); 1043 ctx->mcg_wq = create_singlethread_workqueue(name); 1044 if (!ctx->mcg_wq) 1045 return -ENOMEM; 1046 1047 mutex_init(&ctx->mcg_table_lock); 1048 ctx->mcg_table = RB_ROOT; 1049 INIT_LIST_HEAD(&ctx->mcg_mgid0_list); 1050 ctx->flushing = 0; 1051 1052 return 0; 1053} 1054 1055static void force_clean_group(struct mcast_group *group) 1056{ 1057 struct mcast_req *req, *tmp 1058 ; 1059 list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) { 1060 list_del(&req->group_list); 1061 kfree(req); 1062 } 1063 del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr); 1064 rb_erase(&group->node, &group->demux->mcg_table); 1065 kfree(group); 1066} 1067 1068static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq) 1069{ 1070 int i; 1071 struct rb_node *p; 1072 struct mcast_group *group; 1073 unsigned long end; 1074 int count; 1075 1076 for (i = 0; i < MAX_VFS; ++i) 1077 clean_vf_mcast(ctx, i); 1078 1079 end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000); 1080 do { 1081 count = 0; 1082 mutex_lock(&ctx->mcg_table_lock); 1083 for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) 1084 ++count; 1085 mutex_unlock(&ctx->mcg_table_lock); 1086 if (!count) 1087 break; 1088 1089 msleep(1); 1090 } while (time_after(end, jiffies)); 1091 1092 flush_workqueue(ctx->mcg_wq); 1093 if (destroy_wq) 1094 destroy_workqueue(ctx->mcg_wq); 1095 1096 mutex_lock(&ctx->mcg_table_lock); 1097 while ((p = rb_first(&ctx->mcg_table)) != NULL) { 1098 group = rb_entry(p, struct mcast_group, node); 1099 if (atomic_read(&group->refcount)) 1100 mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group); 1101 1102 force_clean_group(group); 1103 } 1104 mutex_unlock(&ctx->mcg_table_lock); 1105} 1106 1107struct clean_work { 1108 struct work_struct work; 1109 struct mlx4_ib_demux_ctx *ctx; 1110 int destroy_wq; 1111}; 1112 1113static void mcg_clean_task(struct work_struct *work) 1114{ 1115 struct clean_work *cw = container_of(work, struct clean_work, work); 1116 1117 _mlx4_ib_mcg_port_cleanup(cw->ctx, cw->destroy_wq); 1118 cw->ctx->flushing = 0; 1119 kfree(cw); 1120} 1121 1122void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq) 1123{ 1124 struct clean_work *work; 1125 1126 if (ctx->flushing) 1127 return; 1128 1129 ctx->flushing = 1; 1130 1131 if (destroy_wq) { 1132 _mlx4_ib_mcg_port_cleanup(ctx, destroy_wq); 1133 ctx->flushing = 0; 1134 return; 1135 } 1136 1137 work = kmalloc(sizeof *work, GFP_KERNEL); 1138 if (!work) { 1139 ctx->flushing = 0; 1140 mcg_warn("failed allocating work for cleanup\n"); 1141 return; 1142 } 1143 1144 work->ctx = ctx; 1145 work->destroy_wq = destroy_wq; 1146 INIT_WORK(&work->work, mcg_clean_task); 1147 queue_work(clean_wq, &work->work); 1148} 1149 1150static void build_leave_mad(struct mcast_req *req) 1151{ 1152 struct ib_sa_mad *mad = &req->sa_mad; 1153 1154 mad->mad_hdr.method = IB_SA_METHOD_DELETE; 1155} 1156 1157 1158static void clear_pending_reqs(struct mcast_group *group, int vf) 1159{ 1160 struct mcast_req *req, *tmp, *group_first = NULL; 1161 int clear; 1162 int pend = 0; 1163 1164 if (!list_empty(&group->pending_list)) 1165 group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list); 1166 1167 list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) { 1168 clear = 1; 1169 if (group_first == req && 1170 (group->state == MCAST_JOIN_SENT || 1171 group->state == MCAST_LEAVE_SENT)) { 1172 clear = cancel_delayed_work(&group->timeout_work); 1173 pend = !clear; 1174 group->state = MCAST_IDLE; 1175 } 1176 if (clear) { 1177 --group->func[vf].num_pend_reqs; 1178 list_del(&req->group_list); 1179 list_del(&req->func_list); 1180 kfree(req); 1181 atomic_dec(&group->refcount); 1182 } 1183 } 1184 1185 if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) { 1186 mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n", 1187 list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs); 1188 } 1189} 1190 1191static int push_deleteing_req(struct mcast_group *group, int slave) 1192{ 1193 struct mcast_req *req; 1194 struct mcast_req *pend_req; 1195 1196 if (!group->func[slave].join_state) 1197 return 0; 1198 1199 req = kzalloc(sizeof *req, GFP_KERNEL); 1200 if (!req) { 1201 mcg_warn_group(group, "failed allocation - may leave stall groups\n"); 1202 return -ENOMEM; 1203 } 1204 1205 if (!list_empty(&group->func[slave].pending)) { 1206 pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list); 1207 if (pend_req->clean) { 1208 kfree(req); 1209 return 0; 1210 } 1211 } 1212 1213 req->clean = 1; 1214 req->func = slave; 1215 req->group = group; 1216 ++group->func[slave].num_pend_reqs; 1217 build_leave_mad(req); 1218 queue_req(req); 1219 return 0; 1220} 1221 1222void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave) 1223{ 1224 struct mcast_group *group; 1225 struct rb_node *p; 1226 1227 mutex_lock(&ctx->mcg_table_lock); 1228 for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) { 1229 group = rb_entry(p, struct mcast_group, node); 1230 mutex_lock(&group->lock); 1231 if (atomic_read(&group->refcount)) { 1232 /* clear pending requests of this VF */ 1233 clear_pending_reqs(group, slave); 1234 push_deleteing_req(group, slave); 1235 } 1236 mutex_unlock(&group->lock); 1237 } 1238 mutex_unlock(&ctx->mcg_table_lock); 1239} 1240 1241 1242int mlx4_ib_mcg_init(void) 1243{ 1244 clean_wq = create_singlethread_workqueue("mlx4_ib_mcg"); 1245 if (!clean_wq) 1246 return -ENOMEM; 1247 1248 return 0; 1249} 1250 1251void mlx4_ib_mcg_destroy(void) 1252{ 1253 destroy_workqueue(clean_wq); 1254} 1255