1/* 2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include <rdma/ib_mad.h> 34 35#include <linux/mlx4/cmd.h> 36#include <linux/idr.h> 37#include <rdma/ib_cm.h> 38 39#include "mlx4_ib.h" 40 41#define CM_CLEANUP_CACHE_TIMEOUT (5 * HZ) 42 43struct id_map_entry { 44 struct rb_node node; 45 46 u32 sl_cm_id; 47 u32 pv_cm_id; 48 int slave_id; 49 int scheduled_delete; 50 struct mlx4_ib_dev *dev; 51 52 struct list_head list; 53 struct delayed_work timeout; 54}; 55 56struct cm_generic_msg { 57 struct ib_mad_hdr hdr; 58 59 __be32 local_comm_id; 60 __be32 remote_comm_id; 61}; 62 63struct cm_req_msg { 64 unsigned char unused[0x60]; 65 union ib_gid primary_path_sgid; 66}; 67 68 69static void set_local_comm_id(struct ib_mad *mad, u32 cm_id) 70{ 71 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; 72 msg->local_comm_id = cpu_to_be32(cm_id); 73} 74 75static u32 get_local_comm_id(struct ib_mad *mad) 76{ 77 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; 78 79 return be32_to_cpu(msg->local_comm_id); 80} 81 82static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id) 83{ 84 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; 85 msg->remote_comm_id = cpu_to_be32(cm_id); 86} 87 88static u32 get_remote_comm_id(struct ib_mad *mad) 89{ 90 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; 91 92 return be32_to_cpu(msg->remote_comm_id); 93} 94 95static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad) 96{ 97 struct cm_req_msg *msg = (struct cm_req_msg *)mad; 98 99 return msg->primary_path_sgid; 100} 101 102/* Lock should be taken before called */ 103static struct id_map_entry * 104id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id) 105{ 106 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; 107 struct rb_node *node = sl_id_map->rb_node; 108 109 while (node) { 110 struct id_map_entry *id_map_entry = 111 rb_entry(node, struct id_map_entry, node); 112 113 if (id_map_entry->sl_cm_id > sl_cm_id) 114 node = node->rb_left; 115 else if (id_map_entry->sl_cm_id < sl_cm_id) 116 node = node->rb_right; 117 else if (id_map_entry->slave_id > slave_id) 118 node = node->rb_left; 119 else if (id_map_entry->slave_id < slave_id) 120 node = node->rb_right; 121 else 122 return id_map_entry; 123 } 124 return NULL; 125} 126 127static void id_map_ent_timeout(struct work_struct *work) 128{ 129 struct delayed_work *delay = to_delayed_work(work); 130 struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout); 131 struct id_map_entry *db_ent, *found_ent; 132 struct mlx4_ib_dev *dev = ent->dev; 133 struct mlx4_ib_sriov *sriov = &dev->sriov; 134 struct rb_root *sl_id_map = &sriov->sl_id_map; 135 int pv_id = (int) ent->pv_cm_id; 136 137 spin_lock(&sriov->id_map_lock); 138 db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id); 139 if (!db_ent) 140 goto out; 141 found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id); 142 if (found_ent && found_ent == ent) 143 rb_erase(&found_ent->node, sl_id_map); 144 idr_remove(&sriov->pv_id_table, pv_id); 145 146out: 147 list_del(&ent->list); 148 spin_unlock(&sriov->id_map_lock); 149 kfree(ent); 150} 151 152static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id) 153{ 154 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; 155 struct rb_root *sl_id_map = &sriov->sl_id_map; 156 struct id_map_entry *ent, *found_ent; 157 158 spin_lock(&sriov->id_map_lock); 159 ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id); 160 if (!ent) 161 goto out; 162 found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id); 163 if (found_ent && found_ent == ent) 164 rb_erase(&found_ent->node, sl_id_map); 165 idr_remove(&sriov->pv_id_table, pv_cm_id); 166out: 167 spin_unlock(&sriov->id_map_lock); 168} 169 170static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new) 171{ 172 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; 173 struct rb_node **link = &sl_id_map->rb_node, *parent = NULL; 174 struct id_map_entry *ent; 175 int slave_id = new->slave_id; 176 int sl_cm_id = new->sl_cm_id; 177 178 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id); 179 if (ent) { 180 pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n", 181 sl_cm_id); 182 183 rb_replace_node(&ent->node, &new->node, sl_id_map); 184 return; 185 } 186 187 /* Go to the bottom of the tree */ 188 while (*link) { 189 parent = *link; 190 ent = rb_entry(parent, struct id_map_entry, node); 191 192 if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id)) 193 link = &(*link)->rb_left; 194 else 195 link = &(*link)->rb_right; 196 } 197 198 rb_link_node(&new->node, parent, link); 199 rb_insert_color(&new->node, sl_id_map); 200} 201 202static struct id_map_entry * 203id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) 204{ 205 int ret, id; 206 static int next_id; 207 struct id_map_entry *ent; 208 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; 209 210 ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL); 211 if (!ent) { 212 mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n"); 213 return ERR_PTR(-ENOMEM); 214 } 215 216 ent->sl_cm_id = sl_cm_id; 217 ent->slave_id = slave_id; 218 ent->scheduled_delete = 0; 219 ent->dev = to_mdev(ibdev); 220 INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout); 221 222 do { 223 spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); 224 ret = idr_get_new_above(&sriov->pv_id_table, ent, 225 next_id, &id); 226 if (!ret) { 227 next_id = ((unsigned) id + 1) & MAX_IDR_MASK; 228 ent->pv_cm_id = (u32)id; 229 sl_id_map_add(ibdev, ent); 230 } 231 232 spin_unlock(&sriov->id_map_lock); 233 } while (ret == -EAGAIN && idr_pre_get(&sriov->pv_id_table, GFP_KERNEL)); 234 /*the function idr_get_new_above can return -ENOSPC, so don't insert in that case.*/ 235 if (!ret) { 236 spin_lock(&sriov->id_map_lock); 237 list_add_tail(&ent->list, &sriov->cm_list); 238 spin_unlock(&sriov->id_map_lock); 239 return ent; 240 } 241 /*error flow*/ 242 kfree(ent); 243 mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret); 244 return ERR_PTR(-ENOMEM); 245} 246 247static struct id_map_entry * 248id_map_get(struct ib_device *ibdev, int *pv_cm_id, int sl_cm_id, int slave_id) 249{ 250 struct id_map_entry *ent; 251 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; 252 253 spin_lock(&sriov->id_map_lock); 254 if (*pv_cm_id == -1) { 255 ent = id_map_find_by_sl_id(ibdev, sl_cm_id, slave_id); 256 if (ent) 257 *pv_cm_id = (int) ent->pv_cm_id; 258 } else 259 ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id); 260 spin_unlock(&sriov->id_map_lock); 261 262 return ent; 263} 264 265static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id) 266{ 267 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; 268 unsigned long flags; 269 270 spin_lock(&sriov->id_map_lock); 271 spin_lock_irqsave(&sriov->going_down_lock, flags); 272 /*make sure that there is no schedule inside the scheduled work.*/ 273 if (!sriov->is_going_down) { 274 id->scheduled_delete = 1; 275 schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT); 276 } 277 spin_unlock_irqrestore(&sriov->going_down_lock, flags); 278 spin_unlock(&sriov->id_map_lock); 279} 280 281int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id, 282 struct ib_mad *mad) 283{ 284 struct id_map_entry *id; 285 u32 sl_cm_id; 286 int pv_cm_id = -1; 287 288 sl_cm_id = get_local_comm_id(mad); 289 290 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID || 291 mad->mad_hdr.attr_id == CM_REP_ATTR_ID) { 292 id = id_map_alloc(ibdev, slave_id, sl_cm_id); 293 if (IS_ERR(id)) { 294 mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n", 295 __func__, slave_id, sl_cm_id); 296 return PTR_ERR(id); 297 } 298 } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) { 299 return 0; 300 } else { 301 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id); 302 } 303 304 if (!id) { 305 pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL!\n", 306 slave_id, sl_cm_id); 307 return -EINVAL; 308 } 309 310 set_local_comm_id(mad, id->pv_cm_id); 311 312 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) 313 schedule_delayed(ibdev, id); 314 else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) 315 id_map_find_del(ibdev, pv_cm_id); 316 317 return 0; 318} 319 320int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave, 321 struct ib_mad *mad, int is_eth) 322{ 323 u32 pv_cm_id; 324 struct id_map_entry *id; 325 326 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID) { 327 union ib_gid gid; 328 329 if (is_eth) 330 return 0; 331 332 gid = gid_from_req_msg(ibdev, mad); 333 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id); 334 if (*slave < 0) { 335 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n", 336 (long long)gid.global.interface_id); 337 return -ENOENT; 338 } 339 return 0; 340 } 341 342 pv_cm_id = get_remote_comm_id(mad); 343 id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1); 344 345 if (!id) { 346 pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id); 347 return -ENOENT; 348 } 349 350 if (!is_eth) 351 *slave = id->slave_id; 352 set_remote_comm_id(mad, id->sl_cm_id); 353 354 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) 355 schedule_delayed(ibdev, id); 356 else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID || 357 mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) { 358 id_map_find_del(ibdev, (int) pv_cm_id); 359 } 360 361 return 0; 362} 363 364void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev) 365{ 366 spin_lock_init(&dev->sriov.id_map_lock); 367 INIT_LIST_HEAD(&dev->sriov.cm_list); 368 dev->sriov.sl_id_map = RB_ROOT; 369 idr_init(&dev->sriov.pv_id_table); 370 idr_pre_get(&dev->sriov.pv_id_table, GFP_KERNEL); 371} 372 373/* slave = -1 ==> all slaves */ 374/* TBD -- call paravirt clean for single slave. Need for slave RESET event */ 375void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave) 376{ 377 struct mlx4_ib_sriov *sriov = &dev->sriov; 378 struct rb_root *sl_id_map = &sriov->sl_id_map; 379 struct list_head lh; 380 struct rb_node *nd; 381 int need_flush = 1; 382 struct id_map_entry *map, *tmp_map; 383 /* cancel all delayed work queue entries */ 384 INIT_LIST_HEAD(&lh); 385 spin_lock(&sriov->id_map_lock); 386 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { 387 if (slave < 0 || slave == map->slave_id) { 388 if (map->scheduled_delete) 389 need_flush &= !!cancel_delayed_work(&map->timeout); 390 } 391 } 392 393 spin_unlock(&sriov->id_map_lock); 394 395 if (!need_flush) 396 flush_scheduled_work(); /* make sure all timers were flushed */ 397 398 /* now, remove all leftover entries from databases*/ 399 spin_lock(&sriov->id_map_lock); 400 if (slave < 0) { 401 while (rb_first(sl_id_map)) { 402 struct id_map_entry *ent = 403 rb_entry(rb_first(sl_id_map), 404 struct id_map_entry, node); 405 406 rb_erase(&ent->node, sl_id_map); 407 idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id); 408 } 409 list_splice_init(&dev->sriov.cm_list, &lh); 410 } else { 411 /* first, move nodes belonging to slave to db remove list */ 412 nd = rb_first(sl_id_map); 413 while (nd) { 414 struct id_map_entry *ent = 415 rb_entry(nd, struct id_map_entry, node); 416 nd = rb_next(nd); 417 if (ent->slave_id == slave) 418 list_move_tail(&ent->list, &lh); 419 } 420 /* remove those nodes from databases */ 421 list_for_each_entry_safe(map, tmp_map, &lh, list) { 422 rb_erase(&map->node, sl_id_map); 423 idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id); 424 } 425 426 /* add remaining nodes from cm_list */ 427 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { 428 if (slave == map->slave_id) 429 list_move_tail(&map->list, &lh); 430 } 431 } 432 433 spin_unlock(&sriov->id_map_lock); 434 435 /* free any map entries left behind due to cancel_delayed_work above */ 436 list_for_each_entry_safe(map, tmp_map, &lh, list) { 437 list_del(&map->list); 438 kfree(map); 439 } 440} 441