1255932Salfred/* 2255932Salfred * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3272027Shselasky * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. 4255932Salfred * All rights reserved. 5255932Salfred * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 6255932Salfred * 7255932Salfred * This software is available to you under a choice of one of two 8255932Salfred * licenses. You may choose to be licensed under the terms of the GNU 9255932Salfred * General Public License (GPL) Version 2, available from the file 10255932Salfred * COPYING in the main directory of this source tree, or the 11255932Salfred * OpenIB.org BSD license below: 12255932Salfred * 13255932Salfred * Redistribution and use in source and binary forms, with or 14255932Salfred * without modification, are permitted provided that the following 15255932Salfred * conditions are met: 16255932Salfred * 17255932Salfred * - Redistributions of source code must retain the above 18255932Salfred * copyright notice, this list of conditions and the following 19255932Salfred * disclaimer. 20255932Salfred * 21255932Salfred * - Redistributions in binary form must reproduce the above 22255932Salfred * copyright notice, this list of conditions and the following 23255932Salfred * disclaimer in the documentation and/or other materials 24255932Salfred * provided with the distribution. 25255932Salfred * 26255932Salfred * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27255932Salfred * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28255932Salfred * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29255932Salfred * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30255932Salfred * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31255932Salfred * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32255932Salfred * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33255932Salfred * SOFTWARE. 34255932Salfred */ 35255932Salfred 36255932Salfred#include <linux/sched.h> 37255932Salfred#include <linux/pci.h> 38255932Salfred#include <linux/errno.h> 39255932Salfred#include <linux/kernel.h> 40255932Salfred#include <linux/io.h> 41255932Salfred#include <linux/slab.h> 42306486Shselasky#include <dev/mlx4/cmd.h> 43306486Shselasky#include <dev/mlx4/qp.h> 44255932Salfred#include <linux/if_ether.h> 45272027Shselasky#include <linux/etherdevice.h> 46255932Salfred 47255932Salfred#include "mlx4.h" 48255932Salfred#include "fw.h" 49255932Salfred 50255932Salfred#define MLX4_MAC_VALID (1ull << 63) 51329159Shselasky#define MLX4_PF_COUNTERS_PER_PORT 2 52329159Shselasky#define MLX4_VF_COUNTERS_PER_PORT 1 53255932Salfred 54255932Salfredstruct mac_res { 55255932Salfred struct list_head list; 56255932Salfred u64 mac; 57255932Salfred int ref_count; 58255932Salfred u8 smac_index; 59255932Salfred u8 port; 60255932Salfred}; 61255932Salfred 62255932Salfredstruct vlan_res { 63255932Salfred struct list_head list; 64255932Salfred u16 vlan; 65255932Salfred int ref_count; 66255932Salfred int vlan_index; 67255932Salfred u8 port; 68255932Salfred}; 69255932Salfred 70255932Salfredstruct res_common { 71255932Salfred struct list_head list; 72255932Salfred struct rb_node node; 73255932Salfred u64 res_id; 74255932Salfred int owner; 75255932Salfred int state; 76255932Salfred int from_state; 77255932Salfred int to_state; 78255932Salfred int removing; 79255932Salfred}; 80255932Salfred 81255932Salfredenum { 82255932Salfred RES_ANY_BUSY = 1 83255932Salfred}; 84255932Salfred 85255932Salfredstruct res_gid { 86255932Salfred struct list_head list; 87255932Salfred u8 gid[16]; 88255932Salfred enum mlx4_protocol prot; 89255932Salfred enum mlx4_steer_type steer; 90272027Shselasky u64 reg_id; 91255932Salfred}; 92255932Salfred 93255932Salfredenum res_qp_states { 94255932Salfred RES_QP_BUSY = RES_ANY_BUSY, 95255932Salfred 96255932Salfred /* QP number was allocated */ 97255932Salfred RES_QP_RESERVED, 98255932Salfred 99255932Salfred /* ICM memory for QP context was mapped */ 100255932Salfred RES_QP_MAPPED, 101255932Salfred 102255932Salfred /* QP is in hw ownership */ 103255932Salfred RES_QP_HW 104255932Salfred}; 105255932Salfred 106255932Salfredstruct res_qp { 107255932Salfred struct res_common com; 108255932Salfred struct res_mtt *mtt; 109255932Salfred struct res_cq *rcq; 110255932Salfred struct res_cq *scq; 111255932Salfred struct res_srq *srq; 112255932Salfred struct list_head mcg_list; 113255932Salfred spinlock_t mcg_spl; 114255932Salfred int local_qpn; 115272027Shselasky atomic_t ref_count; 116272027Shselasky u32 qpc_flags; 117272027Shselasky /* saved qp params before VST enforcement in order to restore on VGT */ 118272027Shselasky u8 sched_queue; 119272027Shselasky __be32 param3; 120272027Shselasky u8 vlan_control; 121272027Shselasky u8 fvl_rx; 122272027Shselasky u8 pri_path_fl; 123272027Shselasky u8 vlan_index; 124272027Shselasky u8 feup; 125255932Salfred}; 126255932Salfred 127255932Salfredenum res_mtt_states { 128255932Salfred RES_MTT_BUSY = RES_ANY_BUSY, 129255932Salfred RES_MTT_ALLOCATED, 130255932Salfred}; 131255932Salfred 132255932Salfredstatic inline const char *mtt_states_str(enum res_mtt_states state) 133255932Salfred{ 134255932Salfred switch (state) { 135255932Salfred case RES_MTT_BUSY: return "RES_MTT_BUSY"; 136255932Salfred case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED"; 137255932Salfred default: return "Unknown"; 138255932Salfred } 139255932Salfred} 140255932Salfred 141255932Salfredstruct res_mtt { 142255932Salfred struct res_common com; 143255932Salfred int order; 144255932Salfred atomic_t ref_count; 145255932Salfred}; 146255932Salfred 147255932Salfredenum res_mpt_states { 148255932Salfred RES_MPT_BUSY = RES_ANY_BUSY, 149255932Salfred RES_MPT_RESERVED, 150255932Salfred RES_MPT_MAPPED, 151255932Salfred RES_MPT_HW, 152255932Salfred}; 153255932Salfred 154255932Salfredstruct res_mpt { 155255932Salfred struct res_common com; 156255932Salfred struct res_mtt *mtt; 157255932Salfred int key; 158255932Salfred}; 159255932Salfred 160255932Salfredenum res_eq_states { 161255932Salfred RES_EQ_BUSY = RES_ANY_BUSY, 162255932Salfred RES_EQ_RESERVED, 163255932Salfred RES_EQ_HW, 164255932Salfred}; 165255932Salfred 166255932Salfredstruct res_eq { 167255932Salfred struct res_common com; 168255932Salfred struct res_mtt *mtt; 169255932Salfred}; 170255932Salfred 171255932Salfredenum res_cq_states { 172255932Salfred RES_CQ_BUSY = RES_ANY_BUSY, 173255932Salfred RES_CQ_ALLOCATED, 174255932Salfred RES_CQ_HW, 175255932Salfred}; 176255932Salfred 177255932Salfredstruct res_cq { 178255932Salfred struct res_common com; 179255932Salfred struct res_mtt *mtt; 180255932Salfred atomic_t ref_count; 181255932Salfred}; 182255932Salfred 183255932Salfredenum res_srq_states { 184255932Salfred RES_SRQ_BUSY = RES_ANY_BUSY, 185255932Salfred RES_SRQ_ALLOCATED, 186255932Salfred RES_SRQ_HW, 187255932Salfred}; 188255932Salfred 189255932Salfredstruct res_srq { 190255932Salfred struct res_common com; 191255932Salfred struct res_mtt *mtt; 192255932Salfred struct res_cq *cq; 193255932Salfred atomic_t ref_count; 194255932Salfred}; 195255932Salfred 196255932Salfredenum res_counter_states { 197255932Salfred RES_COUNTER_BUSY = RES_ANY_BUSY, 198255932Salfred RES_COUNTER_ALLOCATED, 199255932Salfred}; 200255932Salfred 201255932Salfredstruct res_counter { 202255932Salfred struct res_common com; 203255932Salfred int port; 204255932Salfred}; 205255932Salfred 206255932Salfredenum res_xrcdn_states { 207255932Salfred RES_XRCD_BUSY = RES_ANY_BUSY, 208255932Salfred RES_XRCD_ALLOCATED, 209255932Salfred}; 210255932Salfred 211255932Salfredstruct res_xrcdn { 212255932Salfred struct res_common com; 213255932Salfred int port; 214255932Salfred}; 215255932Salfred 216255932Salfredenum res_fs_rule_states { 217255932Salfred RES_FS_RULE_BUSY = RES_ANY_BUSY, 218255932Salfred RES_FS_RULE_ALLOCATED, 219255932Salfred}; 220255932Salfred 221255932Salfredstruct res_fs_rule { 222255932Salfred struct res_common com; 223272027Shselasky int qpn; 224329159Shselasky /* VF DMFS mbox with port flipped */ 225329159Shselasky void *mirr_mbox; 226329159Shselasky /* > 0 --> apply mirror when getting into HA mode */ 227329159Shselasky /* = 0 --> un-apply mirror when getting out of HA mode */ 228329159Shselasky u32 mirr_mbox_size; 229329159Shselasky struct list_head mirr_list; 230329159Shselasky u64 mirr_rule_id; 231255932Salfred}; 232255932Salfred 233255932Salfredstatic void *res_tracker_lookup(struct rb_root *root, u64 res_id) 234255932Salfred{ 235255932Salfred struct rb_node *node = root->rb_node; 236255932Salfred 237255932Salfred while (node) { 238255932Salfred struct res_common *res = container_of(node, struct res_common, 239255932Salfred node); 240255932Salfred 241255932Salfred if (res_id < res->res_id) 242255932Salfred node = node->rb_left; 243255932Salfred else if (res_id > res->res_id) 244255932Salfred node = node->rb_right; 245255932Salfred else 246255932Salfred return res; 247255932Salfred } 248255932Salfred return NULL; 249255932Salfred} 250255932Salfred 251255932Salfredstatic int res_tracker_insert(struct rb_root *root, struct res_common *res) 252255932Salfred{ 253255932Salfred struct rb_node **new = &(root->rb_node), *parent = NULL; 254255932Salfred 255255932Salfred /* Figure out where to put new node */ 256255932Salfred while (*new) { 257255932Salfred struct res_common *this = container_of(*new, struct res_common, 258255932Salfred node); 259255932Salfred 260255932Salfred parent = *new; 261255932Salfred if (res->res_id < this->res_id) 262255932Salfred new = &((*new)->rb_left); 263255932Salfred else if (res->res_id > this->res_id) 264255932Salfred new = &((*new)->rb_right); 265255932Salfred else 266255932Salfred return -EEXIST; 267255932Salfred } 268255932Salfred 269255932Salfred /* Add new node and rebalance tree. */ 270255932Salfred rb_link_node(&res->node, parent, new); 271255932Salfred rb_insert_color(&res->node, root); 272255932Salfred 273255932Salfred return 0; 274255932Salfred} 275255932Salfred 276255932Salfredenum qp_transition { 277255932Salfred QP_TRANS_INIT2RTR, 278255932Salfred QP_TRANS_RTR2RTS, 279255932Salfred QP_TRANS_RTS2RTS, 280255932Salfred QP_TRANS_SQERR2RTS, 281255932Salfred QP_TRANS_SQD2SQD, 282255932Salfred QP_TRANS_SQD2RTS 283255932Salfred}; 284255932Salfred 285255932Salfred/* For Debug uses */ 286329159Shselaskystatic const char *resource_str(enum mlx4_resource rt) 287255932Salfred{ 288255932Salfred switch (rt) { 289255932Salfred case RES_QP: return "RES_QP"; 290255932Salfred case RES_CQ: return "RES_CQ"; 291255932Salfred case RES_SRQ: return "RES_SRQ"; 292255932Salfred case RES_MPT: return "RES_MPT"; 293255932Salfred case RES_MTT: return "RES_MTT"; 294255932Salfred case RES_MAC: return "RES_MAC"; 295255932Salfred case RES_VLAN: return "RES_VLAN"; 296255932Salfred case RES_EQ: return "RES_EQ"; 297255932Salfred case RES_COUNTER: return "RES_COUNTER"; 298255932Salfred case RES_FS_RULE: return "RES_FS_RULE"; 299255932Salfred case RES_XRCD: return "RES_XRCD"; 300255932Salfred default: return "Unknown resource type !!!"; 301255932Salfred }; 302255932Salfred} 303255932Salfred 304255932Salfredstatic void rem_slave_vlans(struct mlx4_dev *dev, int slave); 305255932Salfredstatic inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, 306255932Salfred enum mlx4_resource res_type, int count, 307255932Salfred int port) 308255932Salfred{ 309255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 310255932Salfred struct resource_allocator *res_alloc = 311255932Salfred &priv->mfunc.master.res_tracker.res_alloc[res_type]; 312255932Salfred int err = -EINVAL; 313255932Salfred int allocated, free, reserved, guaranteed, from_free; 314329159Shselasky int from_rsvd; 315255932Salfred 316329159Shselasky if (slave > dev->persist->num_vfs) 317329159Shselasky return -EINVAL; 318329159Shselasky 319255932Salfred spin_lock(&res_alloc->alloc_lock); 320255932Salfred allocated = (port > 0) ? 321329159Shselasky res_alloc->allocated[(port - 1) * 322329159Shselasky (dev->persist->num_vfs + 1) + slave] : 323255932Salfred res_alloc->allocated[slave]; 324255932Salfred free = (port > 0) ? res_alloc->res_port_free[port - 1] : 325255932Salfred res_alloc->res_free; 326255932Salfred reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] : 327255932Salfred res_alloc->res_reserved; 328255932Salfred guaranteed = res_alloc->guaranteed[slave]; 329255932Salfred 330329159Shselasky if (allocated + count > res_alloc->quota[slave]) { 331329159Shselasky mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n", 332329159Shselasky slave, port, resource_str(res_type), count, 333329159Shselasky allocated, res_alloc->quota[slave]); 334255932Salfred goto out; 335329159Shselasky } 336255932Salfred 337255932Salfred if (allocated + count <= guaranteed) { 338255932Salfred err = 0; 339329159Shselasky from_rsvd = count; 340255932Salfred } else { 341255932Salfred /* portion may need to be obtained from free area */ 342255932Salfred if (guaranteed - allocated > 0) 343255932Salfred from_free = count - (guaranteed - allocated); 344255932Salfred else 345255932Salfred from_free = count; 346255932Salfred 347329159Shselasky from_rsvd = count - from_free; 348329159Shselasky 349329159Shselasky if (free - from_free >= reserved) 350255932Salfred err = 0; 351329159Shselasky else 352329159Shselasky mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n", 353329159Shselasky slave, port, resource_str(res_type), free, 354329159Shselasky from_free, reserved); 355255932Salfred } 356255932Salfred 357255932Salfred if (!err) { 358255932Salfred /* grant the request */ 359255932Salfred if (port > 0) { 360329159Shselasky res_alloc->allocated[(port - 1) * 361329159Shselasky (dev->persist->num_vfs + 1) + slave] += count; 362255932Salfred res_alloc->res_port_free[port - 1] -= count; 363329159Shselasky res_alloc->res_port_rsvd[port - 1] -= from_rsvd; 364255932Salfred } else { 365255932Salfred res_alloc->allocated[slave] += count; 366255932Salfred res_alloc->res_free -= count; 367329159Shselasky res_alloc->res_reserved -= from_rsvd; 368255932Salfred } 369255932Salfred } 370255932Salfred 371255932Salfredout: 372255932Salfred spin_unlock(&res_alloc->alloc_lock); 373255932Salfred return err; 374255932Salfred} 375255932Salfred 376255932Salfredstatic inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, 377255932Salfred enum mlx4_resource res_type, int count, 378255932Salfred int port) 379255932Salfred{ 380255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 381255932Salfred struct resource_allocator *res_alloc = 382255932Salfred &priv->mfunc.master.res_tracker.res_alloc[res_type]; 383329159Shselasky int allocated, guaranteed, from_rsvd; 384255932Salfred 385329159Shselasky if (slave > dev->persist->num_vfs) 386329159Shselasky return; 387329159Shselasky 388255932Salfred spin_lock(&res_alloc->alloc_lock); 389329159Shselasky 390329159Shselasky allocated = (port > 0) ? 391329159Shselasky res_alloc->allocated[(port - 1) * 392329159Shselasky (dev->persist->num_vfs + 1) + slave] : 393329159Shselasky res_alloc->allocated[slave]; 394329159Shselasky guaranteed = res_alloc->guaranteed[slave]; 395329159Shselasky 396329159Shselasky if (allocated - count >= guaranteed) { 397329159Shselasky from_rsvd = 0; 398329159Shselasky } else { 399329159Shselasky /* portion may need to be returned to reserved area */ 400329159Shselasky if (allocated - guaranteed > 0) 401329159Shselasky from_rsvd = count - (allocated - guaranteed); 402329159Shselasky else 403329159Shselasky from_rsvd = count; 404329159Shselasky } 405329159Shselasky 406255932Salfred if (port > 0) { 407329159Shselasky res_alloc->allocated[(port - 1) * 408329159Shselasky (dev->persist->num_vfs + 1) + slave] -= count; 409255932Salfred res_alloc->res_port_free[port - 1] += count; 410329159Shselasky res_alloc->res_port_rsvd[port - 1] += from_rsvd; 411255932Salfred } else { 412255932Salfred res_alloc->allocated[slave] -= count; 413255932Salfred res_alloc->res_free += count; 414329159Shselasky res_alloc->res_reserved += from_rsvd; 415255932Salfred } 416255932Salfred 417255932Salfred spin_unlock(&res_alloc->alloc_lock); 418255932Salfred return; 419255932Salfred} 420255932Salfred 421255932Salfredstatic inline void initialize_res_quotas(struct mlx4_dev *dev, 422255932Salfred struct resource_allocator *res_alloc, 423255932Salfred enum mlx4_resource res_type, 424255932Salfred int vf, int num_instances) 425255932Salfred{ 426329159Shselasky res_alloc->guaranteed[vf] = num_instances / 427329159Shselasky (2 * (dev->persist->num_vfs + 1)); 428255932Salfred res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; 429255932Salfred if (vf == mlx4_master_func_num(dev)) { 430255932Salfred res_alloc->res_free = num_instances; 431255932Salfred if (res_type == RES_MTT) { 432255932Salfred /* reserved mtts will be taken out of the PF allocation */ 433255932Salfred res_alloc->res_free += dev->caps.reserved_mtts; 434255932Salfred res_alloc->guaranteed[vf] += dev->caps.reserved_mtts; 435255932Salfred res_alloc->quota[vf] += dev->caps.reserved_mtts; 436255932Salfred } 437255932Salfred } 438255932Salfred} 439255932Salfred 440255932Salfredvoid mlx4_init_quotas(struct mlx4_dev *dev) 441255932Salfred{ 442255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 443255932Salfred int pf; 444255932Salfred 445255932Salfred /* quotas for VFs are initialized in mlx4_slave_cap */ 446255932Salfred if (mlx4_is_slave(dev)) 447255932Salfred return; 448255932Salfred 449255932Salfred if (!mlx4_is_mfunc(dev)) { 450255932Salfred dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps - 451255932Salfred mlx4_num_reserved_sqps(dev); 452255932Salfred dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs; 453255932Salfred dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs; 454255932Salfred dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts; 455255932Salfred dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws; 456255932Salfred return; 457255932Salfred } 458255932Salfred 459255932Salfred pf = mlx4_master_func_num(dev); 460255932Salfred dev->quotas.qp = 461255932Salfred priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf]; 462255932Salfred dev->quotas.cq = 463255932Salfred priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf]; 464255932Salfred dev->quotas.srq = 465255932Salfred priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf]; 466255932Salfred dev->quotas.mtt = 467255932Salfred priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf]; 468255932Salfred dev->quotas.mpt = 469255932Salfred priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; 470255932Salfred} 471329159Shselasky 472329159Shselaskystatic int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev) 473329159Shselasky{ 474329159Shselasky /* reduce the sink counter */ 475329159Shselasky return (dev->caps.max_counters - 1 - 476329159Shselasky (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS)) 477329159Shselasky / MLX4_MAX_PORTS; 478329159Shselasky} 479329159Shselasky 480255932Salfredint mlx4_init_resource_tracker(struct mlx4_dev *dev) 481255932Salfred{ 482255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 483255932Salfred int i, j; 484255932Salfred int t; 485329159Shselasky int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev); 486255932Salfred 487255932Salfred priv->mfunc.master.res_tracker.slave_list = 488255932Salfred kzalloc(dev->num_slaves * sizeof(struct slave_list), 489255932Salfred GFP_KERNEL); 490255932Salfred if (!priv->mfunc.master.res_tracker.slave_list) 491255932Salfred return -ENOMEM; 492255932Salfred 493255932Salfred for (i = 0 ; i < dev->num_slaves; i++) { 494255932Salfred for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t) 495255932Salfred INIT_LIST_HEAD(&priv->mfunc.master.res_tracker. 496255932Salfred slave_list[i].res_list[t]); 497255932Salfred mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 498255932Salfred } 499255932Salfred 500255932Salfred mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n", 501255932Salfred dev->num_slaves); 502255932Salfred for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) 503255932Salfred priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT; 504255932Salfred 505255932Salfred for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 506255932Salfred struct resource_allocator *res_alloc = 507255932Salfred &priv->mfunc.master.res_tracker.res_alloc[i]; 508329159Shselasky res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) * 509329159Shselasky sizeof(int), GFP_KERNEL); 510329159Shselasky res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) * 511329159Shselasky sizeof(int), GFP_KERNEL); 512255932Salfred if (i == RES_MAC || i == RES_VLAN) 513255932Salfred res_alloc->allocated = kzalloc(MLX4_MAX_PORTS * 514329159Shselasky (dev->persist->num_vfs 515329159Shselasky + 1) * 516329159Shselasky sizeof(int), GFP_KERNEL); 517255932Salfred else 518329159Shselasky res_alloc->allocated = kzalloc((dev->persist-> 519329159Shselasky num_vfs + 1) * 520329159Shselasky sizeof(int), GFP_KERNEL); 521329159Shselasky /* Reduce the sink counter */ 522329159Shselasky if (i == RES_COUNTER) 523329159Shselasky res_alloc->res_free = dev->caps.max_counters - 1; 524255932Salfred 525255932Salfred if (!res_alloc->quota || !res_alloc->guaranteed || 526255932Salfred !res_alloc->allocated) 527255932Salfred goto no_mem_err; 528255932Salfred 529255932Salfred spin_lock_init(&res_alloc->alloc_lock); 530329159Shselasky for (t = 0; t < dev->persist->num_vfs + 1; t++) { 531329159Shselasky struct mlx4_active_ports actv_ports = 532329159Shselasky mlx4_get_active_ports(dev, t); 533255932Salfred switch (i) { 534255932Salfred case RES_QP: 535255932Salfred initialize_res_quotas(dev, res_alloc, RES_QP, 536255932Salfred t, dev->caps.num_qps - 537255932Salfred dev->caps.reserved_qps - 538255932Salfred mlx4_num_reserved_sqps(dev)); 539255932Salfred break; 540255932Salfred case RES_CQ: 541255932Salfred initialize_res_quotas(dev, res_alloc, RES_CQ, 542255932Salfred t, dev->caps.num_cqs - 543255932Salfred dev->caps.reserved_cqs); 544255932Salfred break; 545255932Salfred case RES_SRQ: 546255932Salfred initialize_res_quotas(dev, res_alloc, RES_SRQ, 547255932Salfred t, dev->caps.num_srqs - 548255932Salfred dev->caps.reserved_srqs); 549255932Salfred break; 550255932Salfred case RES_MPT: 551255932Salfred initialize_res_quotas(dev, res_alloc, RES_MPT, 552255932Salfred t, dev->caps.num_mpts - 553255932Salfred dev->caps.reserved_mrws); 554255932Salfred break; 555255932Salfred case RES_MTT: 556255932Salfred initialize_res_quotas(dev, res_alloc, RES_MTT, 557255932Salfred t, dev->caps.num_mtts - 558255932Salfred dev->caps.reserved_mtts); 559255932Salfred break; 560255932Salfred case RES_MAC: 561255932Salfred if (t == mlx4_master_func_num(dev)) { 562329159Shselasky int max_vfs_pport = 0; 563329159Shselasky /* Calculate the max vfs per port for */ 564329159Shselasky /* both ports. */ 565329159Shselasky for (j = 0; j < dev->caps.num_ports; 566329159Shselasky j++) { 567329159Shselasky struct mlx4_slaves_pport slaves_pport = 568329159Shselasky mlx4_phys_to_slaves_pport(dev, j + 1); 569329159Shselasky unsigned current_slaves = 570329159Shselasky bitmap_weight(slaves_pport.slaves, 571329159Shselasky dev->caps.num_ports) - 1; 572329159Shselasky if (max_vfs_pport < current_slaves) 573329159Shselasky max_vfs_pport = 574329159Shselasky current_slaves; 575329159Shselasky } 576255932Salfred res_alloc->quota[t] = 577329159Shselasky MLX4_MAX_MAC_NUM - 578329159Shselasky 2 * max_vfs_pport; 579329159Shselasky res_alloc->guaranteed[t] = 2; 580255932Salfred for (j = 0; j < MLX4_MAX_PORTS; j++) 581329159Shselasky res_alloc->res_port_free[j] = 582329159Shselasky MLX4_MAX_MAC_NUM; 583255932Salfred } else { 584329159Shselasky res_alloc->quota[t] = MLX4_MAX_MAC_NUM; 585255932Salfred res_alloc->guaranteed[t] = 2; 586255932Salfred } 587255932Salfred break; 588255932Salfred case RES_VLAN: 589255932Salfred if (t == mlx4_master_func_num(dev)) { 590255932Salfred res_alloc->quota[t] = MLX4_MAX_VLAN_NUM; 591255932Salfred res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2; 592255932Salfred for (j = 0; j < MLX4_MAX_PORTS; j++) 593255932Salfred res_alloc->res_port_free[j] = 594255932Salfred res_alloc->quota[t]; 595255932Salfred } else { 596255932Salfred res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2; 597255932Salfred res_alloc->guaranteed[t] = 0; 598255932Salfred } 599255932Salfred break; 600255932Salfred case RES_COUNTER: 601255932Salfred res_alloc->quota[t] = dev->caps.max_counters; 602255932Salfred if (t == mlx4_master_func_num(dev)) 603329159Shselasky res_alloc->guaranteed[t] = 604329159Shselasky MLX4_PF_COUNTERS_PER_PORT * 605329159Shselasky MLX4_MAX_PORTS; 606329159Shselasky else if (t <= max_vfs_guarantee_counter) 607329159Shselasky res_alloc->guaranteed[t] = 608329159Shselasky MLX4_VF_COUNTERS_PER_PORT * 609329159Shselasky MLX4_MAX_PORTS; 610329159Shselasky else 611329159Shselasky res_alloc->guaranteed[t] = 0; 612329159Shselasky res_alloc->res_free -= res_alloc->guaranteed[t]; 613255932Salfred break; 614255932Salfred default: 615255932Salfred break; 616255932Salfred } 617255932Salfred if (i == RES_MAC || i == RES_VLAN) { 618329159Shselasky for (j = 0; j < dev->caps.num_ports; j++) 619329159Shselasky if (test_bit(j, actv_ports.ports)) 620329159Shselasky res_alloc->res_port_rsvd[j] += 621329159Shselasky res_alloc->guaranteed[t]; 622255932Salfred } else { 623255932Salfred res_alloc->res_reserved += res_alloc->guaranteed[t]; 624255932Salfred } 625255932Salfred } 626255932Salfred } 627255932Salfred spin_lock_init(&priv->mfunc.master.res_tracker.lock); 628255932Salfred return 0; 629255932Salfred 630255932Salfredno_mem_err: 631255932Salfred for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 632255932Salfred kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); 633255932Salfred priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; 634255932Salfred kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); 635255932Salfred priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; 636255932Salfred kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); 637255932Salfred priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; 638255932Salfred } 639255932Salfred return -ENOMEM; 640255932Salfred} 641255932Salfred 642255932Salfredvoid mlx4_free_resource_tracker(struct mlx4_dev *dev, 643255932Salfred enum mlx4_res_tracker_free_type type) 644255932Salfred{ 645255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 646255932Salfred int i; 647255932Salfred 648255932Salfred if (priv->mfunc.master.res_tracker.slave_list) { 649255932Salfred if (type != RES_TR_FREE_STRUCTS_ONLY) { 650255932Salfred for (i = 0; i < dev->num_slaves; i++) { 651255932Salfred if (type == RES_TR_FREE_ALL || 652255932Salfred dev->caps.function != i) 653255932Salfred mlx4_delete_all_resources_for_slave(dev, i); 654255932Salfred } 655255932Salfred /* free master's vlans */ 656255932Salfred i = dev->caps.function; 657329159Shselasky mlx4_reset_roce_gids(dev, i); 658255932Salfred mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 659255932Salfred rem_slave_vlans(dev, i); 660255932Salfred mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 661255932Salfred } 662255932Salfred 663255932Salfred if (type != RES_TR_FREE_SLAVES_ONLY) { 664255932Salfred for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 665255932Salfred kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); 666255932Salfred priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; 667255932Salfred kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); 668255932Salfred priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; 669255932Salfred kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); 670255932Salfred priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; 671255932Salfred } 672255932Salfred kfree(priv->mfunc.master.res_tracker.slave_list); 673255932Salfred priv->mfunc.master.res_tracker.slave_list = NULL; 674255932Salfred } 675255932Salfred } 676255932Salfred} 677255932Salfred 678255932Salfredstatic void update_pkey_index(struct mlx4_dev *dev, int slave, 679255932Salfred struct mlx4_cmd_mailbox *inbox) 680255932Salfred{ 681255932Salfred u8 sched = *(u8 *)(inbox->buf + 64); 682255932Salfred u8 orig_index = *(u8 *)(inbox->buf + 35); 683255932Salfred u8 new_index; 684255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 685255932Salfred int port; 686255932Salfred 687255932Salfred port = (sched >> 6 & 1) + 1; 688255932Salfred 689255932Salfred new_index = priv->virt2phys_pkey[slave][port - 1][orig_index]; 690255932Salfred *(u8 *)(inbox->buf + 35) = new_index; 691255932Salfred} 692255932Salfred 693255932Salfredstatic void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, 694255932Salfred u8 slave) 695255932Salfred{ 696255932Salfred struct mlx4_qp_context *qp_ctx = inbox->buf + 8; 697255932Salfred enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf); 698255932Salfred u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 699255932Salfred int port; 700255932Salfred 701255932Salfred if (MLX4_QP_ST_UD == ts) { 702255932Salfred port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 703255932Salfred if (mlx4_is_eth(dev, port)) 704329159Shselasky qp_ctx->pri_path.mgid_index = 705329159Shselasky mlx4_get_base_gid_ix(dev, slave, port) | 0x80; 706255932Salfred else 707329159Shselasky qp_ctx->pri_path.mgid_index = slave | 0x80; 708255932Salfred 709329159Shselasky } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) { 710255932Salfred if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { 711255932Salfred port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 712255932Salfred if (mlx4_is_eth(dev, port)) { 713329159Shselasky qp_ctx->pri_path.mgid_index += 714329159Shselasky mlx4_get_base_gid_ix(dev, slave, port); 715255932Salfred qp_ctx->pri_path.mgid_index &= 0x7f; 716255932Salfred } else { 717255932Salfred qp_ctx->pri_path.mgid_index = slave & 0x7F; 718255932Salfred } 719255932Salfred } 720255932Salfred if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { 721255932Salfred port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; 722255932Salfred if (mlx4_is_eth(dev, port)) { 723329159Shselasky qp_ctx->alt_path.mgid_index += 724329159Shselasky mlx4_get_base_gid_ix(dev, slave, port); 725255932Salfred qp_ctx->alt_path.mgid_index &= 0x7f; 726255932Salfred } else { 727255932Salfred qp_ctx->alt_path.mgid_index = slave & 0x7F; 728255932Salfred } 729255932Salfred } 730255932Salfred } 731255932Salfred} 732255932Salfred 733329159Shselaskystatic int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc, 734329159Shselasky u8 slave, int port); 735272027Shselasky 736255932Salfredstatic int update_vport_qp_param(struct mlx4_dev *dev, 737255932Salfred struct mlx4_cmd_mailbox *inbox, 738272027Shselasky u8 slave, u32 qpn) 739255932Salfred{ 740255932Salfred struct mlx4_qp_context *qpc = inbox->buf + 8; 741255932Salfred struct mlx4_vport_oper_state *vp_oper; 742255932Salfred struct mlx4_priv *priv; 743255932Salfred u32 qp_type; 744329159Shselasky int port, err = 0; 745255932Salfred 746255932Salfred port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; 747255932Salfred priv = mlx4_priv(dev); 748255932Salfred vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 749272027Shselasky qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; 750255932Salfred 751329159Shselasky err = handle_counter(dev, qpc, slave, port); 752329159Shselasky if (err) 753329159Shselasky goto out; 754255932Salfred 755272027Shselasky if (MLX4_VGT != vp_oper->state.default_vlan) { 756272027Shselasky /* the reserved QPs (special, proxy, tunnel) 757272027Shselasky * do not operate over vlans 758272027Shselasky */ 759272027Shselasky if (mlx4_is_qp_reserved(dev, qpn)) 760272027Shselasky return 0; 761272027Shselasky 762329159Shselasky /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */ 763329159Shselasky if (qp_type == MLX4_QP_ST_UD || 764329159Shselasky (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) { 765329159Shselasky if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) { 766329159Shselasky *(__be32 *)inbox->buf = 767329159Shselasky cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | 768329159Shselasky MLX4_QP_OPTPAR_VLAN_STRIPPING); 769329159Shselasky qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); 770329159Shselasky } else { 771329159Shselasky struct mlx4_update_qp_params params = {.flags = 0}; 772329159Shselasky 773329159Shselasky err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); 774329159Shselasky if (err) 775329159Shselasky goto out; 776329159Shselasky } 777329159Shselasky } 778329159Shselasky 779272027Shselasky /* preserve IF_COUNTER flag */ 780272027Shselasky qpc->pri_path.vlan_control &= 781329159Shselasky MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER; 782329159Shselasky if (1 /*vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE*/ && 783329159Shselasky dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { 784329159Shselasky qpc->pri_path.vlan_control |= 785329159Shselasky MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 786329159Shselasky MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | 787329159Shselasky MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | 788329159Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 789329159Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | 790329159Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 791329159Shselasky } else if (0 != vp_oper->state.default_vlan) { 792329159Shselasky if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) { 793329159Shselasky /* vst QinQ should block untagged on TX, 794329159Shselasky * but cvlan is in payload and phv is set so 795329159Shselasky * hw see it as untagged. Block tagged instead. 796329159Shselasky */ 797272027Shselasky qpc->pri_path.vlan_control |= 798329159Shselasky MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | 799272027Shselasky MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 800272027Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 801272027Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; 802329159Shselasky } else { /* vst 802.1Q */ 803272027Shselasky qpc->pri_path.vlan_control |= 804272027Shselasky MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 805329159Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 806329159Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; 807272027Shselasky } 808329159Shselasky } else { /* priority tagged */ 809329159Shselasky qpc->pri_path.vlan_control |= 810329159Shselasky MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 811329159Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 812272027Shselasky } 813329159Shselasky 814272027Shselasky qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN; 815255932Salfred qpc->pri_path.vlan_index = vp_oper->vlan_idx; 816329159Shselasky qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN; 817329159Shselasky if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) 818329159Shselasky qpc->pri_path.fl |= MLX4_FL_SV; 819329159Shselasky else 820329159Shselasky qpc->pri_path.fl |= MLX4_FL_CV; 821272027Shselasky qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; 822255932Salfred qpc->pri_path.sched_queue &= 0xC7; 823255932Salfred qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; 824329159Shselasky qpc->qos_vport = vp_oper->state.qos_vport; 825255932Salfred } 826255932Salfred if (vp_oper->state.spoofchk) { 827272027Shselasky qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; 828255932Salfred qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; 829255932Salfred } 830329159Shselaskyout: 831329159Shselasky return err; 832255932Salfred} 833255932Salfred 834255932Salfredstatic int mpt_mask(struct mlx4_dev *dev) 835255932Salfred{ 836255932Salfred return dev->caps.num_mpts - 1; 837255932Salfred} 838255932Salfred 839272027Shselaskystatic void *find_res(struct mlx4_dev *dev, u64 res_id, 840255932Salfred enum mlx4_resource type) 841255932Salfred{ 842255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 843255932Salfred 844255932Salfred return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type], 845255932Salfred res_id); 846255932Salfred} 847255932Salfred 848255932Salfredstatic int get_res(struct mlx4_dev *dev, int slave, u64 res_id, 849255932Salfred enum mlx4_resource type, 850255932Salfred void *res) 851255932Salfred{ 852255932Salfred struct res_common *r; 853255932Salfred int err = 0; 854255932Salfred 855255932Salfred spin_lock_irq(mlx4_tlock(dev)); 856255932Salfred r = find_res(dev, res_id, type); 857255932Salfred if (!r) { 858272027Shselasky err = -ENONET; 859255932Salfred goto exit; 860255932Salfred } 861255932Salfred 862255932Salfred if (r->state == RES_ANY_BUSY) { 863255932Salfred err = -EBUSY; 864255932Salfred goto exit; 865255932Salfred } 866255932Salfred 867255932Salfred if (r->owner != slave) { 868255932Salfred err = -EPERM; 869255932Salfred goto exit; 870255932Salfred } 871255932Salfred 872255932Salfred r->from_state = r->state; 873255932Salfred r->state = RES_ANY_BUSY; 874255932Salfred 875255932Salfred if (res) 876255932Salfred *((struct res_common **)res) = r; 877255932Salfred 878255932Salfredexit: 879255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 880255932Salfred return err; 881255932Salfred} 882255932Salfred 883255932Salfredint mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, 884255932Salfred enum mlx4_resource type, 885255932Salfred u64 res_id, int *slave) 886255932Salfred{ 887255932Salfred 888255932Salfred struct res_common *r; 889255932Salfred int err = -ENOENT; 890255932Salfred int id = res_id; 891255932Salfred 892255932Salfred if (type == RES_QP) 893255932Salfred id &= 0x7fffff; 894255932Salfred spin_lock(mlx4_tlock(dev)); 895255932Salfred 896255932Salfred r = find_res(dev, id, type); 897255932Salfred if (r) { 898255932Salfred *slave = r->owner; 899255932Salfred err = 0; 900255932Salfred } 901255932Salfred spin_unlock(mlx4_tlock(dev)); 902255932Salfred 903255932Salfred return err; 904255932Salfred} 905255932Salfred 906255932Salfredstatic void put_res(struct mlx4_dev *dev, int slave, u64 res_id, 907255932Salfred enum mlx4_resource type) 908255932Salfred{ 909255932Salfred struct res_common *r; 910255932Salfred 911255932Salfred spin_lock_irq(mlx4_tlock(dev)); 912255932Salfred r = find_res(dev, res_id, type); 913255932Salfred if (r) 914255932Salfred r->state = r->from_state; 915255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 916255932Salfred} 917255932Salfred 918329159Shselaskystatic int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 919329159Shselasky u64 in_param, u64 *out_param, int port); 920329159Shselasky 921329159Shselaskystatic int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port, 922329159Shselasky int counter_index) 923329159Shselasky{ 924329159Shselasky struct res_common *r; 925329159Shselasky struct res_counter *counter; 926329159Shselasky int ret = 0; 927329159Shselasky 928329159Shselasky if (counter_index == MLX4_SINK_COUNTER_INDEX(dev)) 929329159Shselasky return ret; 930329159Shselasky 931329159Shselasky spin_lock_irq(mlx4_tlock(dev)); 932329159Shselasky r = find_res(dev, counter_index, RES_COUNTER); 933329159Shselasky if (!r || r->owner != slave) { 934329159Shselasky ret = -EINVAL; 935329159Shselasky } else { 936329159Shselasky counter = container_of(r, struct res_counter, com); 937329159Shselasky if (!counter->port) 938329159Shselasky counter->port = port; 939329159Shselasky } 940329159Shselasky 941329159Shselasky spin_unlock_irq(mlx4_tlock(dev)); 942329159Shselasky return ret; 943329159Shselasky} 944329159Shselasky 945329159Shselaskystatic int handle_unexisting_counter(struct mlx4_dev *dev, 946329159Shselasky struct mlx4_qp_context *qpc, u8 slave, 947329159Shselasky int port) 948329159Shselasky{ 949329159Shselasky struct mlx4_priv *priv = mlx4_priv(dev); 950329159Shselasky struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 951329159Shselasky struct res_common *tmp; 952329159Shselasky struct res_counter *counter; 953329159Shselasky u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev); 954329159Shselasky int err = 0; 955329159Shselasky 956329159Shselasky spin_lock_irq(mlx4_tlock(dev)); 957329159Shselasky list_for_each_entry(tmp, 958329159Shselasky &tracker->slave_list[slave].res_list[RES_COUNTER], 959329159Shselasky list) { 960329159Shselasky counter = container_of(tmp, struct res_counter, com); 961329159Shselasky if (port == counter->port) { 962329159Shselasky qpc->pri_path.counter_index = counter->com.res_id; 963329159Shselasky spin_unlock_irq(mlx4_tlock(dev)); 964329159Shselasky return 0; 965329159Shselasky } 966329159Shselasky } 967329159Shselasky spin_unlock_irq(mlx4_tlock(dev)); 968329159Shselasky 969329159Shselasky /* No existing counter, need to allocate a new counter */ 970329159Shselasky err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx, 971329159Shselasky port); 972329159Shselasky if (err == -ENOENT) { 973329159Shselasky err = 0; 974329159Shselasky } else if (err && err != -ENOSPC) { 975329159Shselasky mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n", 976329159Shselasky __func__, slave, err); 977329159Shselasky } else { 978329159Shselasky qpc->pri_path.counter_index = counter_idx; 979329159Shselasky mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n", 980329159Shselasky __func__, slave, qpc->pri_path.counter_index); 981329159Shselasky err = 0; 982329159Shselasky } 983329159Shselasky 984329159Shselasky return err; 985329159Shselasky} 986329159Shselasky 987329159Shselaskystatic int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc, 988329159Shselasky u8 slave, int port) 989329159Shselasky{ 990329159Shselasky if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev)) 991329159Shselasky return handle_existing_counter(dev, slave, port, 992329159Shselasky qpc->pri_path.counter_index); 993329159Shselasky 994329159Shselasky return handle_unexisting_counter(dev, qpc, slave, port); 995329159Shselasky} 996329159Shselasky 997255932Salfredstatic struct res_common *alloc_qp_tr(int id) 998255932Salfred{ 999255932Salfred struct res_qp *ret; 1000255932Salfred 1001255932Salfred ret = kzalloc(sizeof *ret, GFP_KERNEL); 1002255932Salfred if (!ret) 1003255932Salfred return NULL; 1004255932Salfred 1005255932Salfred ret->com.res_id = id; 1006255932Salfred ret->com.state = RES_QP_RESERVED; 1007255932Salfred ret->local_qpn = id; 1008255932Salfred INIT_LIST_HEAD(&ret->mcg_list); 1009255932Salfred spin_lock_init(&ret->mcg_spl); 1010272027Shselasky atomic_set(&ret->ref_count, 0); 1011255932Salfred 1012255932Salfred return &ret->com; 1013255932Salfred} 1014255932Salfred 1015255932Salfredstatic struct res_common *alloc_mtt_tr(int id, int order) 1016255932Salfred{ 1017255932Salfred struct res_mtt *ret; 1018255932Salfred 1019255932Salfred ret = kzalloc(sizeof *ret, GFP_KERNEL); 1020255932Salfred if (!ret) 1021255932Salfred return NULL; 1022255932Salfred 1023255932Salfred ret->com.res_id = id; 1024255932Salfred ret->order = order; 1025255932Salfred ret->com.state = RES_MTT_ALLOCATED; 1026255932Salfred atomic_set(&ret->ref_count, 0); 1027255932Salfred 1028255932Salfred return &ret->com; 1029255932Salfred} 1030255932Salfred 1031255932Salfredstatic struct res_common *alloc_mpt_tr(int id, int key) 1032255932Salfred{ 1033255932Salfred struct res_mpt *ret; 1034255932Salfred 1035255932Salfred ret = kzalloc(sizeof *ret, GFP_KERNEL); 1036255932Salfred if (!ret) 1037255932Salfred return NULL; 1038255932Salfred 1039255932Salfred ret->com.res_id = id; 1040255932Salfred ret->com.state = RES_MPT_RESERVED; 1041255932Salfred ret->key = key; 1042255932Salfred 1043255932Salfred return &ret->com; 1044255932Salfred} 1045255932Salfred 1046255932Salfredstatic struct res_common *alloc_eq_tr(int id) 1047255932Salfred{ 1048255932Salfred struct res_eq *ret; 1049255932Salfred 1050255932Salfred ret = kzalloc(sizeof *ret, GFP_KERNEL); 1051255932Salfred if (!ret) 1052255932Salfred return NULL; 1053255932Salfred 1054255932Salfred ret->com.res_id = id; 1055255932Salfred ret->com.state = RES_EQ_RESERVED; 1056255932Salfred 1057255932Salfred return &ret->com; 1058255932Salfred} 1059255932Salfred 1060255932Salfredstatic struct res_common *alloc_cq_tr(int id) 1061255932Salfred{ 1062255932Salfred struct res_cq *ret; 1063255932Salfred 1064255932Salfred ret = kzalloc(sizeof *ret, GFP_KERNEL); 1065255932Salfred if (!ret) 1066255932Salfred return NULL; 1067255932Salfred 1068255932Salfred ret->com.res_id = id; 1069255932Salfred ret->com.state = RES_CQ_ALLOCATED; 1070255932Salfred atomic_set(&ret->ref_count, 0); 1071255932Salfred 1072255932Salfred return &ret->com; 1073255932Salfred} 1074255932Salfred 1075255932Salfredstatic struct res_common *alloc_srq_tr(int id) 1076255932Salfred{ 1077255932Salfred struct res_srq *ret; 1078255932Salfred 1079255932Salfred ret = kzalloc(sizeof *ret, GFP_KERNEL); 1080255932Salfred if (!ret) 1081255932Salfred return NULL; 1082255932Salfred 1083255932Salfred ret->com.res_id = id; 1084255932Salfred ret->com.state = RES_SRQ_ALLOCATED; 1085255932Salfred atomic_set(&ret->ref_count, 0); 1086255932Salfred 1087255932Salfred return &ret->com; 1088255932Salfred} 1089255932Salfred 1090329159Shselaskystatic struct res_common *alloc_counter_tr(int id, int port) 1091255932Salfred{ 1092255932Salfred struct res_counter *ret; 1093255932Salfred 1094255932Salfred ret = kzalloc(sizeof *ret, GFP_KERNEL); 1095255932Salfred if (!ret) 1096255932Salfred return NULL; 1097255932Salfred 1098255932Salfred ret->com.res_id = id; 1099255932Salfred ret->com.state = RES_COUNTER_ALLOCATED; 1100329159Shselasky ret->port = port; 1101255932Salfred 1102255932Salfred return &ret->com; 1103255932Salfred} 1104255932Salfred 1105255932Salfredstatic struct res_common *alloc_xrcdn_tr(int id) 1106255932Salfred{ 1107255932Salfred struct res_xrcdn *ret; 1108255932Salfred 1109255932Salfred ret = kzalloc(sizeof *ret, GFP_KERNEL); 1110255932Salfred if (!ret) 1111255932Salfred return NULL; 1112255932Salfred 1113255932Salfred ret->com.res_id = id; 1114255932Salfred ret->com.state = RES_XRCD_ALLOCATED; 1115255932Salfred 1116255932Salfred return &ret->com; 1117255932Salfred} 1118255932Salfred 1119272027Shselaskystatic struct res_common *alloc_fs_rule_tr(u64 id, int qpn) 1120255932Salfred{ 1121255932Salfred struct res_fs_rule *ret; 1122255932Salfred 1123255932Salfred ret = kzalloc(sizeof *ret, GFP_KERNEL); 1124255932Salfred if (!ret) 1125255932Salfred return NULL; 1126255932Salfred 1127255932Salfred ret->com.res_id = id; 1128255932Salfred ret->com.state = RES_FS_RULE_ALLOCATED; 1129272027Shselasky ret->qpn = qpn; 1130255932Salfred return &ret->com; 1131255932Salfred} 1132255932Salfred 1133255932Salfredstatic struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, 1134255932Salfred int extra) 1135255932Salfred{ 1136255932Salfred struct res_common *ret; 1137255932Salfred 1138255932Salfred switch (type) { 1139255932Salfred case RES_QP: 1140255932Salfred ret = alloc_qp_tr(id); 1141255932Salfred break; 1142255932Salfred case RES_MPT: 1143255932Salfred ret = alloc_mpt_tr(id, extra); 1144255932Salfred break; 1145255932Salfred case RES_MTT: 1146255932Salfred ret = alloc_mtt_tr(id, extra); 1147255932Salfred break; 1148255932Salfred case RES_EQ: 1149255932Salfred ret = alloc_eq_tr(id); 1150255932Salfred break; 1151255932Salfred case RES_CQ: 1152255932Salfred ret = alloc_cq_tr(id); 1153255932Salfred break; 1154255932Salfred case RES_SRQ: 1155255932Salfred ret = alloc_srq_tr(id); 1156255932Salfred break; 1157255932Salfred case RES_MAC: 1158329159Shselasky pr_err("implementation missing\n"); 1159255932Salfred return NULL; 1160255932Salfred case RES_COUNTER: 1161329159Shselasky ret = alloc_counter_tr(id, extra); 1162255932Salfred break; 1163255932Salfred case RES_XRCD: 1164255932Salfred ret = alloc_xrcdn_tr(id); 1165255932Salfred break; 1166255932Salfred case RES_FS_RULE: 1167272027Shselasky ret = alloc_fs_rule_tr(id, extra); 1168255932Salfred break; 1169255932Salfred default: 1170255932Salfred return NULL; 1171255932Salfred } 1172255932Salfred if (ret) 1173255932Salfred ret->owner = slave; 1174255932Salfred 1175255932Salfred return ret; 1176255932Salfred} 1177255932Salfred 1178329159Shselaskyint mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port, 1179329159Shselasky struct mlx4_counter *data) 1180329159Shselasky{ 1181329159Shselasky struct mlx4_priv *priv = mlx4_priv(dev); 1182329159Shselasky struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1183329159Shselasky struct res_common *tmp; 1184329159Shselasky struct res_counter *counter; 1185329159Shselasky int *counters_arr; 1186329159Shselasky int i = 0, err = 0; 1187329159Shselasky 1188329159Shselasky memset(data, 0, sizeof(*data)); 1189329159Shselasky 1190329159Shselasky counters_arr = kmalloc_array(dev->caps.max_counters, 1191329159Shselasky sizeof(*counters_arr), GFP_KERNEL); 1192329159Shselasky if (!counters_arr) 1193329159Shselasky return -ENOMEM; 1194329159Shselasky 1195329159Shselasky spin_lock_irq(mlx4_tlock(dev)); 1196329159Shselasky list_for_each_entry(tmp, 1197329159Shselasky &tracker->slave_list[slave].res_list[RES_COUNTER], 1198329159Shselasky list) { 1199329159Shselasky counter = container_of(tmp, struct res_counter, com); 1200329159Shselasky if (counter->port == port) { 1201329159Shselasky counters_arr[i] = (int)tmp->res_id; 1202329159Shselasky i++; 1203329159Shselasky } 1204329159Shselasky } 1205329159Shselasky spin_unlock_irq(mlx4_tlock(dev)); 1206329159Shselasky counters_arr[i] = -1; 1207329159Shselasky 1208329159Shselasky i = 0; 1209329159Shselasky 1210329159Shselasky while (counters_arr[i] != -1) { 1211329159Shselasky err = mlx4_get_counter_stats(dev, counters_arr[i], data, 1212329159Shselasky 0); 1213329159Shselasky if (err) { 1214329159Shselasky memset(data, 0, sizeof(*data)); 1215329159Shselasky goto table_changed; 1216329159Shselasky } 1217329159Shselasky i++; 1218329159Shselasky } 1219329159Shselasky 1220329159Shselaskytable_changed: 1221329159Shselasky kfree(counters_arr); 1222329159Shselasky return 0; 1223329159Shselasky} 1224329159Shselasky 1225255932Salfredstatic int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, 1226255932Salfred enum mlx4_resource type, int extra) 1227255932Salfred{ 1228255932Salfred int i; 1229255932Salfred int err; 1230255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 1231255932Salfred struct res_common **res_arr; 1232255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1233255932Salfred struct rb_root *root = &tracker->res_tree[type]; 1234255932Salfred 1235255932Salfred res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL); 1236255932Salfred if (!res_arr) 1237255932Salfred return -ENOMEM; 1238255932Salfred 1239255932Salfred for (i = 0; i < count; ++i) { 1240255932Salfred res_arr[i] = alloc_tr(base + i, type, slave, extra); 1241255932Salfred if (!res_arr[i]) { 1242255932Salfred for (--i; i >= 0; --i) 1243255932Salfred kfree(res_arr[i]); 1244255932Salfred 1245255932Salfred kfree(res_arr); 1246255932Salfred return -ENOMEM; 1247255932Salfred } 1248255932Salfred } 1249255932Salfred 1250255932Salfred spin_lock_irq(mlx4_tlock(dev)); 1251255932Salfred for (i = 0; i < count; ++i) { 1252255932Salfred if (find_res(dev, base + i, type)) { 1253255932Salfred err = -EEXIST; 1254255932Salfred goto undo; 1255255932Salfred } 1256255932Salfred err = res_tracker_insert(root, res_arr[i]); 1257255932Salfred if (err) 1258255932Salfred goto undo; 1259255932Salfred list_add_tail(&res_arr[i]->list, 1260255932Salfred &tracker->slave_list[slave].res_list[type]); 1261255932Salfred } 1262255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 1263255932Salfred kfree(res_arr); 1264255932Salfred 1265255932Salfred return 0; 1266255932Salfred 1267255932Salfredundo: 1268272027Shselasky for (--i; i >= 0; --i) { 1269255932Salfred rb_erase(&res_arr[i]->node, root); 1270272027Shselasky list_del_init(&res_arr[i]->list); 1271272027Shselasky } 1272255932Salfred 1273255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 1274255932Salfred 1275255932Salfred for (i = 0; i < count; ++i) 1276255932Salfred kfree(res_arr[i]); 1277255932Salfred 1278255932Salfred kfree(res_arr); 1279255932Salfred 1280255932Salfred return err; 1281255932Salfred} 1282255932Salfred 1283255932Salfredstatic int remove_qp_ok(struct res_qp *res) 1284255932Salfred{ 1285272027Shselasky if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) || 1286272027Shselasky !list_empty(&res->mcg_list)) { 1287272027Shselasky pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n", 1288272027Shselasky res->com.state, atomic_read(&res->ref_count)); 1289255932Salfred return -EBUSY; 1290272027Shselasky } else if (res->com.state != RES_QP_RESERVED) { 1291255932Salfred return -EPERM; 1292272027Shselasky } 1293255932Salfred 1294255932Salfred return 0; 1295255932Salfred} 1296255932Salfred 1297255932Salfredstatic int remove_mtt_ok(struct res_mtt *res, int order) 1298255932Salfred{ 1299255932Salfred if (res->com.state == RES_MTT_BUSY || 1300255932Salfred atomic_read(&res->ref_count)) { 1301329159Shselasky pr_devel("%s-%d: state %s, ref_count %d\n", 1302329159Shselasky __func__, __LINE__, 1303329159Shselasky mtt_states_str(res->com.state), 1304329159Shselasky atomic_read(&res->ref_count)); 1305255932Salfred return -EBUSY; 1306255932Salfred } else if (res->com.state != RES_MTT_ALLOCATED) 1307255932Salfred return -EPERM; 1308255932Salfred else if (res->order != order) 1309255932Salfred return -EINVAL; 1310255932Salfred 1311255932Salfred return 0; 1312255932Salfred} 1313255932Salfred 1314255932Salfredstatic int remove_mpt_ok(struct res_mpt *res) 1315255932Salfred{ 1316255932Salfred if (res->com.state == RES_MPT_BUSY) 1317255932Salfred return -EBUSY; 1318255932Salfred else if (res->com.state != RES_MPT_RESERVED) 1319255932Salfred return -EPERM; 1320255932Salfred 1321255932Salfred return 0; 1322255932Salfred} 1323255932Salfred 1324255932Salfredstatic int remove_eq_ok(struct res_eq *res) 1325255932Salfred{ 1326255932Salfred if (res->com.state == RES_MPT_BUSY) 1327255932Salfred return -EBUSY; 1328255932Salfred else if (res->com.state != RES_MPT_RESERVED) 1329255932Salfred return -EPERM; 1330255932Salfred 1331255932Salfred return 0; 1332255932Salfred} 1333255932Salfred 1334255932Salfredstatic int remove_counter_ok(struct res_counter *res) 1335255932Salfred{ 1336255932Salfred if (res->com.state == RES_COUNTER_BUSY) 1337255932Salfred return -EBUSY; 1338255932Salfred else if (res->com.state != RES_COUNTER_ALLOCATED) 1339255932Salfred return -EPERM; 1340255932Salfred 1341255932Salfred return 0; 1342255932Salfred} 1343255932Salfred 1344255932Salfredstatic int remove_xrcdn_ok(struct res_xrcdn *res) 1345255932Salfred{ 1346255932Salfred if (res->com.state == RES_XRCD_BUSY) 1347255932Salfred return -EBUSY; 1348255932Salfred else if (res->com.state != RES_XRCD_ALLOCATED) 1349255932Salfred return -EPERM; 1350255932Salfred 1351255932Salfred return 0; 1352255932Salfred} 1353255932Salfred 1354255932Salfredstatic int remove_fs_rule_ok(struct res_fs_rule *res) 1355255932Salfred{ 1356255932Salfred if (res->com.state == RES_FS_RULE_BUSY) 1357255932Salfred return -EBUSY; 1358255932Salfred else if (res->com.state != RES_FS_RULE_ALLOCATED) 1359255932Salfred return -EPERM; 1360255932Salfred 1361255932Salfred return 0; 1362255932Salfred} 1363255932Salfred 1364255932Salfredstatic int remove_cq_ok(struct res_cq *res) 1365255932Salfred{ 1366255932Salfred if (res->com.state == RES_CQ_BUSY) 1367255932Salfred return -EBUSY; 1368255932Salfred else if (res->com.state != RES_CQ_ALLOCATED) 1369255932Salfred return -EPERM; 1370255932Salfred 1371255932Salfred return 0; 1372255932Salfred} 1373255932Salfred 1374255932Salfredstatic int remove_srq_ok(struct res_srq *res) 1375255932Salfred{ 1376255932Salfred if (res->com.state == RES_SRQ_BUSY) 1377255932Salfred return -EBUSY; 1378255932Salfred else if (res->com.state != RES_SRQ_ALLOCATED) 1379255932Salfred return -EPERM; 1380255932Salfred 1381255932Salfred return 0; 1382255932Salfred} 1383255932Salfred 1384255932Salfredstatic int remove_ok(struct res_common *res, enum mlx4_resource type, int extra) 1385255932Salfred{ 1386255932Salfred switch (type) { 1387255932Salfred case RES_QP: 1388255932Salfred return remove_qp_ok((struct res_qp *)res); 1389255932Salfred case RES_CQ: 1390255932Salfred return remove_cq_ok((struct res_cq *)res); 1391255932Salfred case RES_SRQ: 1392255932Salfred return remove_srq_ok((struct res_srq *)res); 1393255932Salfred case RES_MPT: 1394255932Salfred return remove_mpt_ok((struct res_mpt *)res); 1395255932Salfred case RES_MTT: 1396255932Salfred return remove_mtt_ok((struct res_mtt *)res, extra); 1397255932Salfred case RES_MAC: 1398255932Salfred return -ENOSYS; 1399255932Salfred case RES_EQ: 1400255932Salfred return remove_eq_ok((struct res_eq *)res); 1401255932Salfred case RES_COUNTER: 1402255932Salfred return remove_counter_ok((struct res_counter *)res); 1403255932Salfred case RES_XRCD: 1404255932Salfred return remove_xrcdn_ok((struct res_xrcdn *)res); 1405255932Salfred case RES_FS_RULE: 1406255932Salfred return remove_fs_rule_ok((struct res_fs_rule *)res); 1407255932Salfred default: 1408255932Salfred return -EINVAL; 1409255932Salfred } 1410255932Salfred} 1411255932Salfred 1412255932Salfredstatic int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, 1413255932Salfred enum mlx4_resource type, int extra) 1414255932Salfred{ 1415255932Salfred u64 i; 1416255932Salfred int err; 1417255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 1418255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1419255932Salfred struct res_common *r; 1420255932Salfred 1421255932Salfred spin_lock_irq(mlx4_tlock(dev)); 1422255932Salfred for (i = base; i < base + count; ++i) { 1423255932Salfred r = res_tracker_lookup(&tracker->res_tree[type], i); 1424255932Salfred if (!r) { 1425255932Salfred err = -ENOENT; 1426255932Salfred goto out; 1427255932Salfred } 1428255932Salfred if (r->owner != slave) { 1429255932Salfred err = -EPERM; 1430255932Salfred goto out; 1431255932Salfred } 1432255932Salfred err = remove_ok(r, type, extra); 1433255932Salfred if (err) 1434255932Salfred goto out; 1435255932Salfred } 1436255932Salfred 1437255932Salfred for (i = base; i < base + count; ++i) { 1438255932Salfred r = res_tracker_lookup(&tracker->res_tree[type], i); 1439255932Salfred rb_erase(&r->node, &tracker->res_tree[type]); 1440255932Salfred list_del(&r->list); 1441255932Salfred kfree(r); 1442255932Salfred } 1443255932Salfred err = 0; 1444255932Salfred 1445255932Salfredout: 1446255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 1447255932Salfred 1448255932Salfred return err; 1449255932Salfred} 1450255932Salfred 1451255932Salfredstatic int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, 1452255932Salfred enum res_qp_states state, struct res_qp **qp, 1453255932Salfred int alloc) 1454255932Salfred{ 1455255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 1456255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1457255932Salfred struct res_qp *r; 1458255932Salfred int err = 0; 1459255932Salfred 1460255932Salfred spin_lock_irq(mlx4_tlock(dev)); 1461255932Salfred r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn); 1462255932Salfred if (!r) 1463255932Salfred err = -ENOENT; 1464255932Salfred else if (r->com.owner != slave) 1465255932Salfred err = -EPERM; 1466255932Salfred else { 1467255932Salfred switch (state) { 1468255932Salfred case RES_QP_BUSY: 1469255932Salfred mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n", 1470272027Shselasky __func__, (unsigned long long)r->com.res_id); 1471255932Salfred err = -EBUSY; 1472255932Salfred break; 1473255932Salfred 1474255932Salfred case RES_QP_RESERVED: 1475255932Salfred if (r->com.state == RES_QP_MAPPED && !alloc) 1476255932Salfred break; 1477255932Salfred 1478272027Shselasky mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", (unsigned long long)r->com.res_id); 1479255932Salfred err = -EINVAL; 1480255932Salfred break; 1481255932Salfred 1482255932Salfred case RES_QP_MAPPED: 1483255932Salfred if ((r->com.state == RES_QP_RESERVED && alloc) || 1484255932Salfred r->com.state == RES_QP_HW) 1485255932Salfred break; 1486255932Salfred else { 1487255932Salfred mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", 1488272027Shselasky (unsigned long long)r->com.res_id); 1489255932Salfred err = -EINVAL; 1490255932Salfred } 1491255932Salfred 1492255932Salfred break; 1493255932Salfred 1494255932Salfred case RES_QP_HW: 1495255932Salfred if (r->com.state != RES_QP_MAPPED) 1496255932Salfred err = -EINVAL; 1497255932Salfred break; 1498255932Salfred default: 1499255932Salfred err = -EINVAL; 1500255932Salfred } 1501255932Salfred 1502255932Salfred if (!err) { 1503255932Salfred r->com.from_state = r->com.state; 1504255932Salfred r->com.to_state = state; 1505255932Salfred r->com.state = RES_QP_BUSY; 1506255932Salfred if (qp) 1507255932Salfred *qp = r; 1508255932Salfred } 1509255932Salfred } 1510255932Salfred 1511255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 1512255932Salfred 1513255932Salfred return err; 1514255932Salfred} 1515255932Salfred 1516255932Salfredstatic int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index, 1517255932Salfred enum res_mpt_states state, struct res_mpt **mpt) 1518255932Salfred{ 1519255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 1520255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1521255932Salfred struct res_mpt *r; 1522255932Salfred int err = 0; 1523255932Salfred 1524255932Salfred spin_lock_irq(mlx4_tlock(dev)); 1525255932Salfred r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index); 1526255932Salfred if (!r) 1527255932Salfred err = -ENOENT; 1528255932Salfred else if (r->com.owner != slave) 1529255932Salfred err = -EPERM; 1530255932Salfred else { 1531255932Salfred switch (state) { 1532255932Salfred case RES_MPT_BUSY: 1533255932Salfred err = -EINVAL; 1534255932Salfred break; 1535255932Salfred 1536255932Salfred case RES_MPT_RESERVED: 1537255932Salfred if (r->com.state != RES_MPT_MAPPED) 1538255932Salfred err = -EINVAL; 1539255932Salfred break; 1540255932Salfred 1541255932Salfred case RES_MPT_MAPPED: 1542255932Salfred if (r->com.state != RES_MPT_RESERVED && 1543255932Salfred r->com.state != RES_MPT_HW) 1544255932Salfred err = -EINVAL; 1545255932Salfred break; 1546255932Salfred 1547255932Salfred case RES_MPT_HW: 1548255932Salfred if (r->com.state != RES_MPT_MAPPED) 1549255932Salfred err = -EINVAL; 1550255932Salfred break; 1551255932Salfred default: 1552255932Salfred err = -EINVAL; 1553255932Salfred } 1554255932Salfred 1555255932Salfred if (!err) { 1556255932Salfred r->com.from_state = r->com.state; 1557255932Salfred r->com.to_state = state; 1558255932Salfred r->com.state = RES_MPT_BUSY; 1559255932Salfred if (mpt) 1560255932Salfred *mpt = r; 1561255932Salfred } 1562255932Salfred } 1563255932Salfred 1564255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 1565255932Salfred 1566255932Salfred return err; 1567255932Salfred} 1568255932Salfred 1569255932Salfredstatic int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, 1570255932Salfred enum res_eq_states state, struct res_eq **eq) 1571255932Salfred{ 1572255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 1573255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1574255932Salfred struct res_eq *r; 1575255932Salfred int err = 0; 1576255932Salfred 1577255932Salfred spin_lock_irq(mlx4_tlock(dev)); 1578255932Salfred r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index); 1579255932Salfred if (!r) 1580255932Salfred err = -ENOENT; 1581255932Salfred else if (r->com.owner != slave) 1582255932Salfred err = -EPERM; 1583255932Salfred else { 1584255932Salfred switch (state) { 1585255932Salfred case RES_EQ_BUSY: 1586255932Salfred err = -EINVAL; 1587255932Salfred break; 1588255932Salfred 1589255932Salfred case RES_EQ_RESERVED: 1590255932Salfred if (r->com.state != RES_EQ_HW) 1591255932Salfred err = -EINVAL; 1592255932Salfred break; 1593255932Salfred 1594255932Salfred case RES_EQ_HW: 1595255932Salfred if (r->com.state != RES_EQ_RESERVED) 1596255932Salfred err = -EINVAL; 1597255932Salfred break; 1598255932Salfred 1599255932Salfred default: 1600255932Salfred err = -EINVAL; 1601255932Salfred } 1602255932Salfred 1603255932Salfred if (!err) { 1604255932Salfred r->com.from_state = r->com.state; 1605255932Salfred r->com.to_state = state; 1606255932Salfred r->com.state = RES_EQ_BUSY; 1607255932Salfred } 1608255932Salfred } 1609255932Salfred 1610255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 1611255932Salfred 1612329159Shselasky if (!err && eq) 1613329159Shselasky *eq = r; 1614329159Shselasky 1615255932Salfred return err; 1616255932Salfred} 1617255932Salfred 1618255932Salfredstatic int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, 1619255932Salfred enum res_cq_states state, struct res_cq **cq) 1620255932Salfred{ 1621255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 1622255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1623255932Salfred struct res_cq *r; 1624255932Salfred int err; 1625255932Salfred 1626255932Salfred spin_lock_irq(mlx4_tlock(dev)); 1627255932Salfred r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn); 1628329159Shselasky if (!r) { 1629255932Salfred err = -ENOENT; 1630329159Shselasky } else if (r->com.owner != slave) { 1631255932Salfred err = -EPERM; 1632329159Shselasky } else if (state == RES_CQ_ALLOCATED) { 1633329159Shselasky if (r->com.state != RES_CQ_HW) 1634329159Shselasky err = -EINVAL; 1635329159Shselasky else if (atomic_read(&r->ref_count)) 1636255932Salfred err = -EBUSY; 1637329159Shselasky else 1638329159Shselasky err = 0; 1639329159Shselasky } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) { 1640329159Shselasky err = -EINVAL; 1641329159Shselasky } else { 1642329159Shselasky err = 0; 1643329159Shselasky } 1644255932Salfred 1645329159Shselasky if (!err) { 1646329159Shselasky r->com.from_state = r->com.state; 1647329159Shselasky r->com.to_state = state; 1648329159Shselasky r->com.state = RES_CQ_BUSY; 1649329159Shselasky if (cq) 1650329159Shselasky *cq = r; 1651255932Salfred } 1652255932Salfred 1653255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 1654255932Salfred 1655255932Salfred return err; 1656255932Salfred} 1657255932Salfred 1658255932Salfredstatic int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, 1659255932Salfred enum res_srq_states state, struct res_srq **srq) 1660255932Salfred{ 1661255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 1662255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1663255932Salfred struct res_srq *r; 1664255932Salfred int err = 0; 1665255932Salfred 1666255932Salfred spin_lock_irq(mlx4_tlock(dev)); 1667255932Salfred r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index); 1668329159Shselasky if (!r) { 1669255932Salfred err = -ENOENT; 1670329159Shselasky } else if (r->com.owner != slave) { 1671255932Salfred err = -EPERM; 1672329159Shselasky } else if (state == RES_SRQ_ALLOCATED) { 1673329159Shselasky if (r->com.state != RES_SRQ_HW) 1674255932Salfred err = -EINVAL; 1675329159Shselasky else if (atomic_read(&r->ref_count)) 1676329159Shselasky err = -EBUSY; 1677329159Shselasky } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) { 1678329159Shselasky err = -EINVAL; 1679329159Shselasky } 1680255932Salfred 1681329159Shselasky if (!err) { 1682329159Shselasky r->com.from_state = r->com.state; 1683329159Shselasky r->com.to_state = state; 1684329159Shselasky r->com.state = RES_SRQ_BUSY; 1685329159Shselasky if (srq) 1686329159Shselasky *srq = r; 1687255932Salfred } 1688255932Salfred 1689255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 1690255932Salfred 1691255932Salfred return err; 1692255932Salfred} 1693255932Salfred 1694255932Salfredstatic void res_abort_move(struct mlx4_dev *dev, int slave, 1695255932Salfred enum mlx4_resource type, int id) 1696255932Salfred{ 1697255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 1698255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1699255932Salfred struct res_common *r; 1700255932Salfred 1701255932Salfred spin_lock_irq(mlx4_tlock(dev)); 1702255932Salfred r = res_tracker_lookup(&tracker->res_tree[type], id); 1703255932Salfred if (r && (r->owner == slave)) 1704255932Salfred r->state = r->from_state; 1705255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 1706255932Salfred} 1707255932Salfred 1708255932Salfredstatic void res_end_move(struct mlx4_dev *dev, int slave, 1709255932Salfred enum mlx4_resource type, int id) 1710255932Salfred{ 1711255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 1712255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1713255932Salfred struct res_common *r; 1714255932Salfred 1715255932Salfred spin_lock_irq(mlx4_tlock(dev)); 1716255932Salfred r = res_tracker_lookup(&tracker->res_tree[type], id); 1717255932Salfred if (r && (r->owner == slave)) 1718255932Salfred r->state = r->to_state; 1719255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 1720255932Salfred} 1721255932Salfred 1722255932Salfredstatic int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) 1723255932Salfred{ 1724255932Salfred return mlx4_is_qp_reserved(dev, qpn) && 1725255932Salfred (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn)); 1726255932Salfred} 1727255932Salfred 1728255932Salfredstatic int fw_reserved(struct mlx4_dev *dev, int qpn) 1729255932Salfred{ 1730255932Salfred return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 1731255932Salfred} 1732255932Salfred 1733255932Salfredstatic int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1734255932Salfred u64 in_param, u64 *out_param) 1735255932Salfred{ 1736255932Salfred int err; 1737255932Salfred int count; 1738255932Salfred int align; 1739255932Salfred int base; 1740255932Salfred int qpn; 1741272027Shselasky u8 flags; 1742255932Salfred 1743255932Salfred switch (op) { 1744255932Salfred case RES_OP_RESERVE: 1745255932Salfred count = get_param_l(&in_param) & 0xffffff; 1746329159Shselasky /* Turn off all unsupported QP allocation flags that the 1747329159Shselasky * slave tries to set. 1748329159Shselasky */ 1749329159Shselasky flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask; 1750255932Salfred align = get_param_h(&in_param); 1751255932Salfred err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); 1752255932Salfred if (err) 1753255932Salfred return err; 1754255932Salfred 1755272027Shselasky err = __mlx4_qp_reserve_range(dev, count, align, &base, flags); 1756255932Salfred if (err) { 1757255932Salfred mlx4_release_resource(dev, slave, RES_QP, count, 0); 1758255932Salfred return err; 1759255932Salfred } 1760255932Salfred 1761255932Salfred err = add_res_range(dev, slave, base, count, RES_QP, 0); 1762255932Salfred if (err) { 1763255932Salfred mlx4_release_resource(dev, slave, RES_QP, count, 0); 1764255932Salfred __mlx4_qp_release_range(dev, base, count); 1765255932Salfred return err; 1766255932Salfred } 1767255932Salfred set_param_l(out_param, base); 1768255932Salfred break; 1769255932Salfred case RES_OP_MAP_ICM: 1770255932Salfred qpn = get_param_l(&in_param) & 0x7fffff; 1771255932Salfred if (valid_reserved(dev, slave, qpn)) { 1772255932Salfred err = add_res_range(dev, slave, qpn, 1, RES_QP, 0); 1773255932Salfred if (err) 1774255932Salfred return err; 1775255932Salfred } 1776255932Salfred 1777255932Salfred err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, 1778255932Salfred NULL, 1); 1779255932Salfred if (err) 1780255932Salfred return err; 1781255932Salfred 1782255932Salfred if (!fw_reserved(dev, qpn)) { 1783329159Shselasky err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL); 1784255932Salfred if (err) { 1785255932Salfred res_abort_move(dev, slave, RES_QP, qpn); 1786255932Salfred return err; 1787255932Salfred } 1788255932Salfred } 1789255932Salfred 1790255932Salfred res_end_move(dev, slave, RES_QP, qpn); 1791255932Salfred break; 1792255932Salfred 1793255932Salfred default: 1794255932Salfred err = -EINVAL; 1795255932Salfred break; 1796255932Salfred } 1797255932Salfred return err; 1798255932Salfred} 1799255932Salfred 1800255932Salfredstatic int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1801255932Salfred u64 in_param, u64 *out_param) 1802255932Salfred{ 1803255932Salfred int err = -EINVAL; 1804255932Salfred int base; 1805255932Salfred int order; 1806255932Salfred 1807255932Salfred if (op != RES_OP_RESERVE_AND_MAP) 1808255932Salfred return err; 1809255932Salfred 1810255932Salfred order = get_param_l(&in_param); 1811255932Salfred 1812255932Salfred err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0); 1813255932Salfred if (err) 1814255932Salfred return err; 1815255932Salfred 1816255932Salfred base = __mlx4_alloc_mtt_range(dev, order); 1817255932Salfred if (base == -1) { 1818255932Salfred mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); 1819255932Salfred return -ENOMEM; 1820255932Salfred } 1821255932Salfred 1822255932Salfred err = add_res_range(dev, slave, base, 1, RES_MTT, order); 1823255932Salfred if (err) { 1824255932Salfred mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); 1825255932Salfred __mlx4_free_mtt_range(dev, base, order); 1826329159Shselasky } else { 1827255932Salfred set_param_l(out_param, base); 1828329159Shselasky } 1829255932Salfred 1830255932Salfred return err; 1831255932Salfred} 1832255932Salfred 1833255932Salfredstatic int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1834255932Salfred u64 in_param, u64 *out_param) 1835255932Salfred{ 1836255932Salfred int err = -EINVAL; 1837255932Salfred int index; 1838255932Salfred int id; 1839255932Salfred struct res_mpt *mpt; 1840255932Salfred 1841255932Salfred switch (op) { 1842255932Salfred case RES_OP_RESERVE: 1843255932Salfred err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0); 1844255932Salfred if (err) 1845255932Salfred break; 1846255932Salfred 1847272027Shselasky index = __mlx4_mpt_reserve(dev); 1848255932Salfred if (index == -1) { 1849255932Salfred mlx4_release_resource(dev, slave, RES_MPT, 1, 0); 1850255932Salfred break; 1851255932Salfred } 1852255932Salfred id = index & mpt_mask(dev); 1853255932Salfred 1854255932Salfred err = add_res_range(dev, slave, id, 1, RES_MPT, index); 1855255932Salfred if (err) { 1856255932Salfred mlx4_release_resource(dev, slave, RES_MPT, 1, 0); 1857272027Shselasky __mlx4_mpt_release(dev, index); 1858255932Salfred break; 1859255932Salfred } 1860255932Salfred set_param_l(out_param, index); 1861255932Salfred break; 1862255932Salfred case RES_OP_MAP_ICM: 1863255932Salfred index = get_param_l(&in_param); 1864255932Salfred id = index & mpt_mask(dev); 1865255932Salfred err = mr_res_start_move_to(dev, slave, id, 1866255932Salfred RES_MPT_MAPPED, &mpt); 1867255932Salfred if (err) 1868255932Salfred return err; 1869255932Salfred 1870329159Shselasky err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL); 1871255932Salfred if (err) { 1872255932Salfred res_abort_move(dev, slave, RES_MPT, id); 1873255932Salfred return err; 1874255932Salfred } 1875255932Salfred 1876255932Salfred res_end_move(dev, slave, RES_MPT, id); 1877255932Salfred break; 1878255932Salfred } 1879255932Salfred return err; 1880255932Salfred} 1881255932Salfred 1882255932Salfredstatic int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1883255932Salfred u64 in_param, u64 *out_param) 1884255932Salfred{ 1885255932Salfred int cqn; 1886255932Salfred int err; 1887255932Salfred 1888255932Salfred switch (op) { 1889255932Salfred case RES_OP_RESERVE_AND_MAP: 1890255932Salfred err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0); 1891255932Salfred if (err) 1892255932Salfred break; 1893255932Salfred 1894255932Salfred err = __mlx4_cq_alloc_icm(dev, &cqn); 1895255932Salfred if (err) { 1896255932Salfred mlx4_release_resource(dev, slave, RES_CQ, 1, 0); 1897255932Salfred break; 1898255932Salfred } 1899255932Salfred 1900255932Salfred err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); 1901255932Salfred if (err) { 1902255932Salfred mlx4_release_resource(dev, slave, RES_CQ, 1, 0); 1903255932Salfred __mlx4_cq_free_icm(dev, cqn); 1904255932Salfred break; 1905255932Salfred } 1906255932Salfred 1907255932Salfred set_param_l(out_param, cqn); 1908255932Salfred break; 1909255932Salfred 1910255932Salfred default: 1911255932Salfred err = -EINVAL; 1912255932Salfred } 1913255932Salfred 1914255932Salfred return err; 1915255932Salfred} 1916255932Salfred 1917255932Salfredstatic int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1918255932Salfred u64 in_param, u64 *out_param) 1919255932Salfred{ 1920255932Salfred int srqn; 1921255932Salfred int err; 1922255932Salfred 1923255932Salfred switch (op) { 1924255932Salfred case RES_OP_RESERVE_AND_MAP: 1925255932Salfred err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0); 1926255932Salfred if (err) 1927255932Salfred break; 1928255932Salfred 1929255932Salfred err = __mlx4_srq_alloc_icm(dev, &srqn); 1930255932Salfred if (err) { 1931255932Salfred mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); 1932255932Salfred break; 1933255932Salfred } 1934255932Salfred 1935255932Salfred err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); 1936255932Salfred if (err) { 1937255932Salfred mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); 1938255932Salfred __mlx4_srq_free_icm(dev, srqn); 1939255932Salfred break; 1940255932Salfred } 1941255932Salfred 1942255932Salfred set_param_l(out_param, srqn); 1943255932Salfred break; 1944255932Salfred 1945255932Salfred default: 1946255932Salfred err = -EINVAL; 1947255932Salfred } 1948255932Salfred 1949255932Salfred return err; 1950255932Salfred} 1951255932Salfred 1952255932Salfredstatic int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port, 1953255932Salfred u8 smac_index, u64 *mac) 1954255932Salfred{ 1955255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 1956255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1957255932Salfred struct list_head *mac_list = 1958255932Salfred &tracker->slave_list[slave].res_list[RES_MAC]; 1959255932Salfred struct mac_res *res, *tmp; 1960255932Salfred 1961255932Salfred list_for_each_entry_safe(res, tmp, mac_list, list) { 1962255932Salfred if (res->smac_index == smac_index && res->port == (u8) port) { 1963255932Salfred *mac = res->mac; 1964255932Salfred return 0; 1965255932Salfred } 1966255932Salfred } 1967255932Salfred return -ENOENT; 1968255932Salfred} 1969255932Salfred 1970255932Salfredstatic int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index) 1971255932Salfred{ 1972255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 1973255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1974255932Salfred struct list_head *mac_list = 1975255932Salfred &tracker->slave_list[slave].res_list[RES_MAC]; 1976255932Salfred struct mac_res *res, *tmp; 1977255932Salfred 1978255932Salfred list_for_each_entry_safe(res, tmp, mac_list, list) { 1979255932Salfred if (res->mac == mac && res->port == (u8) port) { 1980255932Salfred /* mac found. update ref count */ 1981255932Salfred ++res->ref_count; 1982255932Salfred return 0; 1983255932Salfred } 1984255932Salfred } 1985255932Salfred 1986255932Salfred if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) 1987255932Salfred return -EINVAL; 1988255932Salfred res = kzalloc(sizeof *res, GFP_KERNEL); 1989255932Salfred if (!res) { 1990255932Salfred mlx4_release_resource(dev, slave, RES_MAC, 1, port); 1991255932Salfred return -ENOMEM; 1992255932Salfred } 1993255932Salfred res->mac = mac; 1994255932Salfred res->port = (u8) port; 1995255932Salfred res->smac_index = smac_index; 1996255932Salfred res->ref_count = 1; 1997255932Salfred list_add_tail(&res->list, 1998255932Salfred &tracker->slave_list[slave].res_list[RES_MAC]); 1999255932Salfred return 0; 2000255932Salfred} 2001255932Salfred 2002255932Salfredstatic void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, 2003255932Salfred int port) 2004255932Salfred{ 2005255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 2006255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 2007255932Salfred struct list_head *mac_list = 2008255932Salfred &tracker->slave_list[slave].res_list[RES_MAC]; 2009255932Salfred struct mac_res *res, *tmp; 2010255932Salfred 2011255932Salfred list_for_each_entry_safe(res, tmp, mac_list, list) { 2012255932Salfred if (res->mac == mac && res->port == (u8) port) { 2013255932Salfred if (!--res->ref_count) { 2014255932Salfred list_del(&res->list); 2015255932Salfred mlx4_release_resource(dev, slave, RES_MAC, 1, port); 2016255932Salfred kfree(res); 2017255932Salfred } 2018255932Salfred break; 2019255932Salfred } 2020255932Salfred } 2021255932Salfred} 2022255932Salfred 2023255932Salfredstatic void rem_slave_macs(struct mlx4_dev *dev, int slave) 2024255932Salfred{ 2025255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 2026255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 2027255932Salfred struct list_head *mac_list = 2028255932Salfred &tracker->slave_list[slave].res_list[RES_MAC]; 2029255932Salfred struct mac_res *res, *tmp; 2030255932Salfred int i; 2031255932Salfred 2032255932Salfred list_for_each_entry_safe(res, tmp, mac_list, list) { 2033255932Salfred list_del(&res->list); 2034255932Salfred /* dereference the mac the num times the slave referenced it */ 2035255932Salfred for (i = 0; i < res->ref_count; i++) 2036255932Salfred __mlx4_unregister_mac(dev, res->port, res->mac); 2037255932Salfred mlx4_release_resource(dev, slave, RES_MAC, 1, res->port); 2038255932Salfred kfree(res); 2039255932Salfred } 2040255932Salfred} 2041255932Salfred 2042255932Salfredstatic int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2043255932Salfred u64 in_param, u64 *out_param, int in_port) 2044255932Salfred{ 2045255932Salfred int err = -EINVAL; 2046255932Salfred int port; 2047255932Salfred u64 mac; 2048272060Shselasky u8 smac_index = 0; 2049255932Salfred 2050255932Salfred if (op != RES_OP_RESERVE_AND_MAP) 2051255932Salfred return err; 2052255932Salfred 2053255932Salfred port = !in_port ? get_param_l(out_param) : in_port; 2054329159Shselasky port = mlx4_slave_convert_port( 2055329159Shselasky dev, slave, port); 2056329159Shselasky 2057329159Shselasky if (port < 0) 2058329159Shselasky return -EINVAL; 2059255932Salfred mac = in_param; 2060255932Salfred 2061255932Salfred err = __mlx4_register_mac(dev, port, mac); 2062255932Salfred if (err >= 0) { 2063255932Salfred smac_index = err; 2064255932Salfred set_param_l(out_param, err); 2065255932Salfred err = 0; 2066255932Salfred } 2067255932Salfred 2068255932Salfred if (!err) { 2069255932Salfred err = mac_add_to_slave(dev, slave, mac, port, smac_index); 2070255932Salfred if (err) 2071255932Salfred __mlx4_unregister_mac(dev, port, mac); 2072255932Salfred } 2073255932Salfred return err; 2074255932Salfred} 2075255932Salfred 2076255932Salfredstatic int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan, 2077255932Salfred int port, int vlan_index) 2078255932Salfred{ 2079255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 2080255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 2081255932Salfred struct list_head *vlan_list = 2082255932Salfred &tracker->slave_list[slave].res_list[RES_VLAN]; 2083255932Salfred struct vlan_res *res, *tmp; 2084255932Salfred 2085255932Salfred list_for_each_entry_safe(res, tmp, vlan_list, list) { 2086255932Salfred if (res->vlan == vlan && res->port == (u8) port) { 2087255932Salfred /* vlan found. update ref count */ 2088255932Salfred ++res->ref_count; 2089255932Salfred return 0; 2090255932Salfred } 2091255932Salfred } 2092255932Salfred 2093255932Salfred if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port)) 2094255932Salfred return -EINVAL; 2095255932Salfred res = kzalloc(sizeof(*res), GFP_KERNEL); 2096255932Salfred if (!res) { 2097255932Salfred mlx4_release_resource(dev, slave, RES_VLAN, 1, port); 2098255932Salfred return -ENOMEM; 2099255932Salfred } 2100255932Salfred res->vlan = vlan; 2101255932Salfred res->port = (u8) port; 2102255932Salfred res->vlan_index = vlan_index; 2103255932Salfred res->ref_count = 1; 2104255932Salfred list_add_tail(&res->list, 2105255932Salfred &tracker->slave_list[slave].res_list[RES_VLAN]); 2106255932Salfred return 0; 2107255932Salfred} 2108255932Salfred 2109255932Salfred 2110255932Salfredstatic void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan, 2111255932Salfred int port) 2112255932Salfred{ 2113255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 2114255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 2115255932Salfred struct list_head *vlan_list = 2116255932Salfred &tracker->slave_list[slave].res_list[RES_VLAN]; 2117255932Salfred struct vlan_res *res, *tmp; 2118255932Salfred 2119255932Salfred list_for_each_entry_safe(res, tmp, vlan_list, list) { 2120255932Salfred if (res->vlan == vlan && res->port == (u8) port) { 2121255932Salfred if (!--res->ref_count) { 2122255932Salfred list_del(&res->list); 2123255932Salfred mlx4_release_resource(dev, slave, RES_VLAN, 2124255932Salfred 1, port); 2125255932Salfred kfree(res); 2126255932Salfred } 2127255932Salfred break; 2128255932Salfred } 2129255932Salfred } 2130255932Salfred} 2131255932Salfred 2132255932Salfredstatic void rem_slave_vlans(struct mlx4_dev *dev, int slave) 2133255932Salfred{ 2134255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 2135255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 2136255932Salfred struct list_head *vlan_list = 2137255932Salfred &tracker->slave_list[slave].res_list[RES_VLAN]; 2138255932Salfred struct vlan_res *res, *tmp; 2139255932Salfred int i; 2140255932Salfred 2141255932Salfred list_for_each_entry_safe(res, tmp, vlan_list, list) { 2142255932Salfred list_del(&res->list); 2143255932Salfred /* dereference the vlan the num times the slave referenced it */ 2144255932Salfred for (i = 0; i < res->ref_count; i++) 2145255932Salfred __mlx4_unregister_vlan(dev, res->port, res->vlan); 2146255932Salfred mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port); 2147255932Salfred kfree(res); 2148255932Salfred } 2149255932Salfred} 2150255932Salfred 2151255932Salfredstatic int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2152272027Shselasky u64 in_param, u64 *out_param, int in_port) 2153255932Salfred{ 2154272027Shselasky struct mlx4_priv *priv = mlx4_priv(dev); 2155272027Shselasky struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 2156329159Shselasky int err; 2157255932Salfred u16 vlan; 2158255932Salfred int vlan_index; 2159272027Shselasky int port; 2160255932Salfred 2161272027Shselasky port = !in_port ? get_param_l(out_param) : in_port; 2162272027Shselasky 2163329159Shselasky if (!port || op != RES_OP_RESERVE_AND_MAP) 2164329159Shselasky return -EINVAL; 2165255932Salfred 2166329159Shselasky port = mlx4_slave_convert_port( 2167329159Shselasky dev, slave, port); 2168255932Salfred 2169329159Shselasky if (port < 0) 2170329159Shselasky return -EINVAL; 2171272027Shselasky /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ 2172272027Shselasky if (!in_port && port > 0 && port <= dev->caps.num_ports) { 2173272027Shselasky slave_state[slave].old_vlan_api = true; 2174272027Shselasky return 0; 2175272027Shselasky } 2176272027Shselasky 2177255932Salfred vlan = (u16) in_param; 2178255932Salfred 2179255932Salfred err = __mlx4_register_vlan(dev, port, vlan, &vlan_index); 2180255932Salfred if (!err) { 2181255932Salfred set_param_l(out_param, (u32) vlan_index); 2182255932Salfred err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index); 2183255932Salfred if (err) 2184255932Salfred __mlx4_unregister_vlan(dev, port, vlan); 2185255932Salfred } 2186255932Salfred return err; 2187255932Salfred} 2188255932Salfred 2189255932Salfredstatic int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2190272027Shselasky u64 in_param, u64 *out_param, int port) 2191255932Salfred{ 2192255932Salfred u32 index; 2193255932Salfred int err; 2194255932Salfred 2195255932Salfred if (op != RES_OP_RESERVE) 2196255932Salfred return -EINVAL; 2197255932Salfred 2198329159Shselasky err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0); 2199329159Shselasky if (err) 2200329159Shselasky return err; 2201329159Shselasky 2202329159Shselasky err = __mlx4_counter_alloc(dev, &index); 2203329159Shselasky if (err) { 2204329159Shselasky mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 2205329159Shselasky return err; 2206329159Shselasky } 2207329159Shselasky 2208329159Shselasky err = add_res_range(dev, slave, index, 1, RES_COUNTER, port); 2209329159Shselasky if (err) { 2210329159Shselasky __mlx4_counter_free(dev, index); 2211329159Shselasky mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 2212329159Shselasky } else { 2213255932Salfred set_param_l(out_param, index); 2214329159Shselasky } 2215255932Salfred 2216255932Salfred return err; 2217255932Salfred} 2218255932Salfred 2219255932Salfredstatic int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2220255932Salfred u64 in_param, u64 *out_param) 2221255932Salfred{ 2222255932Salfred u32 xrcdn; 2223255932Salfred int err; 2224255932Salfred 2225255932Salfred if (op != RES_OP_RESERVE) 2226255932Salfred return -EINVAL; 2227255932Salfred 2228255932Salfred err = __mlx4_xrcd_alloc(dev, &xrcdn); 2229255932Salfred if (err) 2230255932Salfred return err; 2231255932Salfred 2232255932Salfred err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); 2233255932Salfred if (err) 2234255932Salfred __mlx4_xrcd_free(dev, xrcdn); 2235255932Salfred else 2236255932Salfred set_param_l(out_param, xrcdn); 2237255932Salfred 2238255932Salfred return err; 2239255932Salfred} 2240255932Salfred 2241255932Salfredint mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, 2242255932Salfred struct mlx4_vhcr *vhcr, 2243255932Salfred struct mlx4_cmd_mailbox *inbox, 2244255932Salfred struct mlx4_cmd_mailbox *outbox, 2245255932Salfred struct mlx4_cmd_info *cmd) 2246255932Salfred{ 2247255932Salfred int err; 2248255932Salfred int alop = vhcr->op_modifier; 2249255932Salfred 2250255932Salfred switch (vhcr->in_modifier & 0xFF) { 2251255932Salfred case RES_QP: 2252255932Salfred err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop, 2253255932Salfred vhcr->in_param, &vhcr->out_param); 2254255932Salfred break; 2255255932Salfred 2256255932Salfred case RES_MTT: 2257255932Salfred err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop, 2258255932Salfred vhcr->in_param, &vhcr->out_param); 2259255932Salfred break; 2260255932Salfred 2261255932Salfred case RES_MPT: 2262255932Salfred err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop, 2263255932Salfred vhcr->in_param, &vhcr->out_param); 2264255932Salfred break; 2265255932Salfred 2266255932Salfred case RES_CQ: 2267255932Salfred err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop, 2268255932Salfred vhcr->in_param, &vhcr->out_param); 2269255932Salfred break; 2270255932Salfred 2271255932Salfred case RES_SRQ: 2272255932Salfred err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop, 2273255932Salfred vhcr->in_param, &vhcr->out_param); 2274255932Salfred break; 2275255932Salfred 2276255932Salfred case RES_MAC: 2277255932Salfred err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop, 2278255932Salfred vhcr->in_param, &vhcr->out_param, 2279255932Salfred (vhcr->in_modifier >> 8) & 0xFF); 2280255932Salfred break; 2281255932Salfred 2282255932Salfred case RES_VLAN: 2283255932Salfred err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop, 2284255932Salfred vhcr->in_param, &vhcr->out_param, 2285255932Salfred (vhcr->in_modifier >> 8) & 0xFF); 2286255932Salfred break; 2287255932Salfred 2288255932Salfred case RES_COUNTER: 2289255932Salfred err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop, 2290329159Shselasky vhcr->in_param, &vhcr->out_param, 0); 2291255932Salfred break; 2292255932Salfred 2293255932Salfred case RES_XRCD: 2294255932Salfred err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop, 2295255932Salfred vhcr->in_param, &vhcr->out_param); 2296255932Salfred break; 2297255932Salfred 2298255932Salfred default: 2299255932Salfred err = -EINVAL; 2300255932Salfred break; 2301255932Salfred } 2302255932Salfred 2303255932Salfred return err; 2304255932Salfred} 2305255932Salfred 2306255932Salfredstatic int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2307255932Salfred u64 in_param) 2308255932Salfred{ 2309255932Salfred int err; 2310255932Salfred int count; 2311255932Salfred int base; 2312255932Salfred int qpn; 2313255932Salfred 2314255932Salfred switch (op) { 2315255932Salfred case RES_OP_RESERVE: 2316255932Salfred base = get_param_l(&in_param) & 0x7fffff; 2317255932Salfred count = get_param_h(&in_param); 2318255932Salfred err = rem_res_range(dev, slave, base, count, RES_QP, 0); 2319255932Salfred if (err) 2320255932Salfred break; 2321255932Salfred mlx4_release_resource(dev, slave, RES_QP, count, 0); 2322255932Salfred __mlx4_qp_release_range(dev, base, count); 2323255932Salfred break; 2324255932Salfred case RES_OP_MAP_ICM: 2325255932Salfred qpn = get_param_l(&in_param) & 0x7fffff; 2326255932Salfred err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED, 2327255932Salfred NULL, 0); 2328255932Salfred if (err) 2329255932Salfred return err; 2330255932Salfred 2331255932Salfred if (!fw_reserved(dev, qpn)) 2332255932Salfred __mlx4_qp_free_icm(dev, qpn); 2333255932Salfred 2334255932Salfred res_end_move(dev, slave, RES_QP, qpn); 2335255932Salfred 2336255932Salfred if (valid_reserved(dev, slave, qpn)) 2337255932Salfred err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0); 2338255932Salfred break; 2339255932Salfred default: 2340255932Salfred err = -EINVAL; 2341255932Salfred break; 2342255932Salfred } 2343255932Salfred return err; 2344255932Salfred} 2345255932Salfred 2346255932Salfredstatic int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2347255932Salfred u64 in_param, u64 *out_param) 2348255932Salfred{ 2349255932Salfred int err = -EINVAL; 2350255932Salfred int base; 2351255932Salfred int order; 2352255932Salfred 2353255932Salfred if (op != RES_OP_RESERVE_AND_MAP) 2354255932Salfred return err; 2355255932Salfred 2356255932Salfred base = get_param_l(&in_param); 2357255932Salfred order = get_param_h(&in_param); 2358255932Salfred err = rem_res_range(dev, slave, base, 1, RES_MTT, order); 2359255932Salfred if (!err) { 2360255932Salfred mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); 2361255932Salfred __mlx4_free_mtt_range(dev, base, order); 2362255932Salfred } 2363255932Salfred return err; 2364255932Salfred} 2365255932Salfred 2366255932Salfredstatic int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2367255932Salfred u64 in_param) 2368255932Salfred{ 2369255932Salfred int err = -EINVAL; 2370255932Salfred int index; 2371255932Salfred int id; 2372255932Salfred struct res_mpt *mpt; 2373255932Salfred 2374255932Salfred switch (op) { 2375255932Salfred case RES_OP_RESERVE: 2376255932Salfred index = get_param_l(&in_param); 2377255932Salfred id = index & mpt_mask(dev); 2378255932Salfred err = get_res(dev, slave, id, RES_MPT, &mpt); 2379255932Salfred if (err) 2380255932Salfred break; 2381255932Salfred index = mpt->key; 2382255932Salfred put_res(dev, slave, id, RES_MPT); 2383255932Salfred 2384255932Salfred err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); 2385255932Salfred if (err) 2386255932Salfred break; 2387255932Salfred mlx4_release_resource(dev, slave, RES_MPT, 1, 0); 2388272027Shselasky __mlx4_mpt_release(dev, index); 2389255932Salfred break; 2390255932Salfred case RES_OP_MAP_ICM: 2391329159Shselasky index = get_param_l(&in_param); 2392329159Shselasky id = index & mpt_mask(dev); 2393329159Shselasky err = mr_res_start_move_to(dev, slave, id, 2394329159Shselasky RES_MPT_RESERVED, &mpt); 2395329159Shselasky if (err) 2396329159Shselasky return err; 2397255932Salfred 2398329159Shselasky __mlx4_mpt_free_icm(dev, mpt->key); 2399329159Shselasky res_end_move(dev, slave, RES_MPT, id); 2400255932Salfred break; 2401255932Salfred default: 2402255932Salfred err = -EINVAL; 2403255932Salfred break; 2404255932Salfred } 2405255932Salfred return err; 2406255932Salfred} 2407255932Salfred 2408255932Salfredstatic int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2409255932Salfred u64 in_param, u64 *out_param) 2410255932Salfred{ 2411255932Salfred int cqn; 2412255932Salfred int err; 2413255932Salfred 2414255932Salfred switch (op) { 2415255932Salfred case RES_OP_RESERVE_AND_MAP: 2416255932Salfred cqn = get_param_l(&in_param); 2417255932Salfred err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0); 2418255932Salfred if (err) 2419255932Salfred break; 2420255932Salfred 2421255932Salfred mlx4_release_resource(dev, slave, RES_CQ, 1, 0); 2422255932Salfred __mlx4_cq_free_icm(dev, cqn); 2423255932Salfred break; 2424255932Salfred 2425255932Salfred default: 2426255932Salfred err = -EINVAL; 2427255932Salfred break; 2428255932Salfred } 2429255932Salfred 2430255932Salfred return err; 2431255932Salfred} 2432255932Salfred 2433255932Salfredstatic int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2434255932Salfred u64 in_param, u64 *out_param) 2435255932Salfred{ 2436255932Salfred int srqn; 2437255932Salfred int err; 2438255932Salfred 2439255932Salfred switch (op) { 2440255932Salfred case RES_OP_RESERVE_AND_MAP: 2441255932Salfred srqn = get_param_l(&in_param); 2442255932Salfred err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0); 2443255932Salfred if (err) 2444255932Salfred break; 2445255932Salfred 2446255932Salfred mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); 2447255932Salfred __mlx4_srq_free_icm(dev, srqn); 2448255932Salfred break; 2449255932Salfred 2450255932Salfred default: 2451255932Salfred err = -EINVAL; 2452255932Salfred break; 2453255932Salfred } 2454255932Salfred 2455255932Salfred return err; 2456255932Salfred} 2457255932Salfred 2458255932Salfredstatic int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2459255932Salfred u64 in_param, u64 *out_param, int in_port) 2460255932Salfred{ 2461255932Salfred int port; 2462255932Salfred int err = 0; 2463255932Salfred 2464255932Salfred switch (op) { 2465255932Salfred case RES_OP_RESERVE_AND_MAP: 2466255932Salfred port = !in_port ? get_param_l(out_param) : in_port; 2467329159Shselasky port = mlx4_slave_convert_port( 2468329159Shselasky dev, slave, port); 2469329159Shselasky 2470329159Shselasky if (port < 0) 2471329159Shselasky return -EINVAL; 2472255932Salfred mac_del_from_slave(dev, slave, in_param, port); 2473255932Salfred __mlx4_unregister_mac(dev, port, in_param); 2474255932Salfred break; 2475255932Salfred default: 2476255932Salfred err = -EINVAL; 2477255932Salfred break; 2478255932Salfred } 2479255932Salfred 2480255932Salfred return err; 2481255932Salfred 2482255932Salfred} 2483255932Salfred 2484255932Salfredstatic int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2485255932Salfred u64 in_param, u64 *out_param, int port) 2486255932Salfred{ 2487272027Shselasky struct mlx4_priv *priv = mlx4_priv(dev); 2488272027Shselasky struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 2489255932Salfred int err = 0; 2490255932Salfred 2491329159Shselasky port = mlx4_slave_convert_port( 2492329159Shselasky dev, slave, port); 2493329159Shselasky 2494329159Shselasky if (port < 0) 2495329159Shselasky return -EINVAL; 2496255932Salfred switch (op) { 2497255932Salfred case RES_OP_RESERVE_AND_MAP: 2498329159Shselasky if (slave_state[slave].old_vlan_api) 2499272027Shselasky return 0; 2500255932Salfred if (!port) 2501255932Salfred return -EINVAL; 2502255932Salfred vlan_del_from_slave(dev, slave, in_param, port); 2503255932Salfred __mlx4_unregister_vlan(dev, port, in_param); 2504255932Salfred break; 2505255932Salfred default: 2506255932Salfred err = -EINVAL; 2507255932Salfred break; 2508255932Salfred } 2509255932Salfred 2510255932Salfred return err; 2511255932Salfred} 2512255932Salfred 2513255932Salfredstatic int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2514329159Shselasky u64 in_param, u64 *out_param) 2515255932Salfred{ 2516255932Salfred int index; 2517329159Shselasky int err; 2518255932Salfred 2519255932Salfred if (op != RES_OP_RESERVE) 2520255932Salfred return -EINVAL; 2521255932Salfred 2522255932Salfred index = get_param_l(&in_param); 2523329159Shselasky if (index == MLX4_SINK_COUNTER_INDEX(dev)) 2524329159Shselasky return 0; 2525255932Salfred 2526329159Shselasky err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0); 2527329159Shselasky if (err) 2528329159Shselasky return err; 2529255932Salfred 2530329159Shselasky __mlx4_counter_free(dev, index); 2531329159Shselasky mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 2532329159Shselasky 2533329159Shselasky return err; 2534255932Salfred} 2535255932Salfred 2536255932Salfredstatic int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2537255932Salfred u64 in_param, u64 *out_param) 2538255932Salfred{ 2539255932Salfred int xrcdn; 2540255932Salfred int err; 2541255932Salfred 2542255932Salfred if (op != RES_OP_RESERVE) 2543255932Salfred return -EINVAL; 2544255932Salfred 2545255932Salfred xrcdn = get_param_l(&in_param); 2546255932Salfred err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); 2547255932Salfred if (err) 2548255932Salfred return err; 2549255932Salfred 2550255932Salfred __mlx4_xrcd_free(dev, xrcdn); 2551255932Salfred 2552255932Salfred return err; 2553255932Salfred} 2554255932Salfred 2555255932Salfredint mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, 2556255932Salfred struct mlx4_vhcr *vhcr, 2557255932Salfred struct mlx4_cmd_mailbox *inbox, 2558255932Salfred struct mlx4_cmd_mailbox *outbox, 2559255932Salfred struct mlx4_cmd_info *cmd) 2560255932Salfred{ 2561255932Salfred int err = -EINVAL; 2562255932Salfred int alop = vhcr->op_modifier; 2563255932Salfred 2564255932Salfred switch (vhcr->in_modifier & 0xFF) { 2565255932Salfred case RES_QP: 2566255932Salfred err = qp_free_res(dev, slave, vhcr->op_modifier, alop, 2567255932Salfred vhcr->in_param); 2568255932Salfred break; 2569255932Salfred 2570255932Salfred case RES_MTT: 2571255932Salfred err = mtt_free_res(dev, slave, vhcr->op_modifier, alop, 2572255932Salfred vhcr->in_param, &vhcr->out_param); 2573255932Salfred break; 2574255932Salfred 2575255932Salfred case RES_MPT: 2576255932Salfred err = mpt_free_res(dev, slave, vhcr->op_modifier, alop, 2577255932Salfred vhcr->in_param); 2578255932Salfred break; 2579255932Salfred 2580255932Salfred case RES_CQ: 2581255932Salfred err = cq_free_res(dev, slave, vhcr->op_modifier, alop, 2582255932Salfred vhcr->in_param, &vhcr->out_param); 2583255932Salfred break; 2584255932Salfred 2585255932Salfred case RES_SRQ: 2586255932Salfred err = srq_free_res(dev, slave, vhcr->op_modifier, alop, 2587255932Salfred vhcr->in_param, &vhcr->out_param); 2588255932Salfred break; 2589255932Salfred 2590255932Salfred case RES_MAC: 2591255932Salfred err = mac_free_res(dev, slave, vhcr->op_modifier, alop, 2592255932Salfred vhcr->in_param, &vhcr->out_param, 2593255932Salfred (vhcr->in_modifier >> 8) & 0xFF); 2594255932Salfred break; 2595255932Salfred 2596255932Salfred case RES_VLAN: 2597255932Salfred err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, 2598255932Salfred vhcr->in_param, &vhcr->out_param, 2599255932Salfred (vhcr->in_modifier >> 8) & 0xFF); 2600255932Salfred break; 2601255932Salfred 2602255932Salfred case RES_COUNTER: 2603255932Salfred err = counter_free_res(dev, slave, vhcr->op_modifier, alop, 2604329159Shselasky vhcr->in_param, &vhcr->out_param); 2605255932Salfred break; 2606255932Salfred 2607255932Salfred case RES_XRCD: 2608255932Salfred err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop, 2609255932Salfred vhcr->in_param, &vhcr->out_param); 2610255932Salfred 2611255932Salfred default: 2612255932Salfred break; 2613255932Salfred } 2614255932Salfred return err; 2615255932Salfred} 2616255932Salfred 2617255932Salfred/* ugly but other choices are uglier */ 2618255932Salfredstatic int mr_phys_mpt(struct mlx4_mpt_entry *mpt) 2619255932Salfred{ 2620255932Salfred return (be32_to_cpu(mpt->flags) >> 9) & 1; 2621255932Salfred} 2622255932Salfred 2623255932Salfredstatic int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt) 2624255932Salfred{ 2625255932Salfred return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8; 2626255932Salfred} 2627255932Salfred 2628255932Salfredstatic int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) 2629255932Salfred{ 2630255932Salfred return be32_to_cpu(mpt->mtt_sz); 2631255932Salfred} 2632255932Salfred 2633272027Shselaskystatic u32 mr_get_pd(struct mlx4_mpt_entry *mpt) 2634272027Shselasky{ 2635272027Shselasky return be32_to_cpu(mpt->pd_flags) & 0x00ffffff; 2636272027Shselasky} 2637272027Shselasky 2638272027Shselaskystatic int mr_is_fmr(struct mlx4_mpt_entry *mpt) 2639272027Shselasky{ 2640272027Shselasky return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG; 2641272027Shselasky} 2642272027Shselasky 2643272027Shselaskystatic int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt) 2644272027Shselasky{ 2645272027Shselasky return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE; 2646272027Shselasky} 2647272027Shselasky 2648272027Shselaskystatic int mr_is_region(struct mlx4_mpt_entry *mpt) 2649272027Shselasky{ 2650272027Shselasky return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION; 2651272027Shselasky} 2652272027Shselasky 2653255932Salfredstatic int qp_get_mtt_addr(struct mlx4_qp_context *qpc) 2654255932Salfred{ 2655255932Salfred return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; 2656255932Salfred} 2657255932Salfred 2658255932Salfredstatic int srq_get_mtt_addr(struct mlx4_srq_context *srqc) 2659255932Salfred{ 2660255932Salfred return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8; 2661255932Salfred} 2662255932Salfred 2663255932Salfredstatic int qp_get_mtt_size(struct mlx4_qp_context *qpc) 2664255932Salfred{ 2665255932Salfred int page_shift = (qpc->log_page_size & 0x3f) + 12; 2666255932Salfred int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf; 2667255932Salfred int log_sq_sride = qpc->sq_size_stride & 7; 2668255932Salfred int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf; 2669255932Salfred int log_rq_stride = qpc->rq_size_stride & 7; 2670255932Salfred int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1; 2671255932Salfred int rss = (be32_to_cpu(qpc->flags) >> 13) & 1; 2672272027Shselasky u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; 2673272027Shselasky int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0; 2674255932Salfred int sq_size; 2675255932Salfred int rq_size; 2676255932Salfred int total_pages; 2677255932Salfred int total_mem; 2678255932Salfred int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; 2679255932Salfred 2680255932Salfred sq_size = 1 << (log_sq_size + log_sq_sride + 4); 2681255932Salfred rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4)); 2682255932Salfred total_mem = sq_size + rq_size; 2683255932Salfred total_pages = 2684255932Salfred roundup_pow_of_two((total_mem + (page_offset << 6)) >> 2685255932Salfred page_shift); 2686255932Salfred 2687255932Salfred return total_pages; 2688255932Salfred} 2689255932Salfred 2690255932Salfredstatic int check_mtt_range(struct mlx4_dev *dev, int slave, int start, 2691255932Salfred int size, struct res_mtt *mtt) 2692255932Salfred{ 2693255932Salfred int res_start = mtt->com.res_id; 2694255932Salfred int res_size = (1 << mtt->order); 2695255932Salfred 2696255932Salfred if (start < res_start || start + size > res_start + res_size) 2697255932Salfred return -EPERM; 2698255932Salfred return 0; 2699255932Salfred} 2700255932Salfred 2701255932Salfredint mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, 2702255932Salfred struct mlx4_vhcr *vhcr, 2703255932Salfred struct mlx4_cmd_mailbox *inbox, 2704255932Salfred struct mlx4_cmd_mailbox *outbox, 2705255932Salfred struct mlx4_cmd_info *cmd) 2706255932Salfred{ 2707255932Salfred int err; 2708255932Salfred int index = vhcr->in_modifier; 2709255932Salfred struct res_mtt *mtt; 2710255932Salfred struct res_mpt *mpt; 2711255932Salfred int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; 2712255932Salfred int phys; 2713255932Salfred int id; 2714272027Shselasky u32 pd; 2715272027Shselasky int pd_slave; 2716255932Salfred 2717255932Salfred id = index & mpt_mask(dev); 2718255932Salfred err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt); 2719255932Salfred if (err) 2720255932Salfred return err; 2721255932Salfred 2722329159Shselasky /* Disable memory windows for VFs. */ 2723272027Shselasky if (!mr_is_region(inbox->buf)) { 2724329159Shselasky err = -EPERM; 2725272027Shselasky goto ex_abort; 2726272027Shselasky } 2727272027Shselasky 2728272027Shselasky /* Make sure that the PD bits related to the slave id are zeros. */ 2729272027Shselasky pd = mr_get_pd(inbox->buf); 2730272027Shselasky pd_slave = (pd >> 17) & 0x7f; 2731329159Shselasky if (pd_slave != 0 && --pd_slave != slave) { 2732272027Shselasky err = -EPERM; 2733272027Shselasky goto ex_abort; 2734272027Shselasky } 2735272027Shselasky 2736272027Shselasky if (mr_is_fmr(inbox->buf)) { 2737272027Shselasky /* FMR and Bind Enable are forbidden in slave devices. */ 2738272027Shselasky if (mr_is_bind_enabled(inbox->buf)) { 2739272027Shselasky err = -EPERM; 2740272027Shselasky goto ex_abort; 2741272027Shselasky } 2742272027Shselasky /* FMR and Memory Windows are also forbidden. */ 2743272027Shselasky if (!mr_is_region(inbox->buf)) { 2744272027Shselasky err = -EPERM; 2745272027Shselasky goto ex_abort; 2746272027Shselasky } 2747272027Shselasky } 2748272027Shselasky 2749255932Salfred phys = mr_phys_mpt(inbox->buf); 2750255932Salfred if (!phys) { 2751255932Salfred err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 2752255932Salfred if (err) 2753255932Salfred goto ex_abort; 2754255932Salfred 2755255932Salfred err = check_mtt_range(dev, slave, mtt_base, 2756255932Salfred mr_get_mtt_size(inbox->buf), mtt); 2757255932Salfred if (err) 2758255932Salfred goto ex_put; 2759255932Salfred 2760255932Salfred mpt->mtt = mtt; 2761255932Salfred } 2762255932Salfred 2763255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2764255932Salfred if (err) 2765255932Salfred goto ex_put; 2766255932Salfred 2767255932Salfred if (!phys) { 2768255932Salfred atomic_inc(&mtt->ref_count); 2769255932Salfred put_res(dev, slave, mtt->com.res_id, RES_MTT); 2770255932Salfred } 2771255932Salfred 2772255932Salfred res_end_move(dev, slave, RES_MPT, id); 2773255932Salfred return 0; 2774255932Salfred 2775255932Salfredex_put: 2776255932Salfred if (!phys) 2777255932Salfred put_res(dev, slave, mtt->com.res_id, RES_MTT); 2778255932Salfredex_abort: 2779255932Salfred res_abort_move(dev, slave, RES_MPT, id); 2780255932Salfred 2781255932Salfred return err; 2782255932Salfred} 2783255932Salfred 2784255932Salfredint mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, 2785255932Salfred struct mlx4_vhcr *vhcr, 2786255932Salfred struct mlx4_cmd_mailbox *inbox, 2787255932Salfred struct mlx4_cmd_mailbox *outbox, 2788255932Salfred struct mlx4_cmd_info *cmd) 2789255932Salfred{ 2790255932Salfred int err; 2791255932Salfred int index = vhcr->in_modifier; 2792255932Salfred struct res_mpt *mpt; 2793255932Salfred int id; 2794255932Salfred 2795255932Salfred id = index & mpt_mask(dev); 2796255932Salfred err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt); 2797255932Salfred if (err) 2798255932Salfred return err; 2799255932Salfred 2800255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2801255932Salfred if (err) 2802255932Salfred goto ex_abort; 2803255932Salfred 2804255932Salfred if (mpt->mtt) 2805255932Salfred atomic_dec(&mpt->mtt->ref_count); 2806255932Salfred 2807255932Salfred res_end_move(dev, slave, RES_MPT, id); 2808255932Salfred return 0; 2809255932Salfred 2810255932Salfredex_abort: 2811255932Salfred res_abort_move(dev, slave, RES_MPT, id); 2812255932Salfred 2813255932Salfred return err; 2814255932Salfred} 2815255932Salfred 2816255932Salfredint mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, 2817255932Salfred struct mlx4_vhcr *vhcr, 2818255932Salfred struct mlx4_cmd_mailbox *inbox, 2819255932Salfred struct mlx4_cmd_mailbox *outbox, 2820255932Salfred struct mlx4_cmd_info *cmd) 2821255932Salfred{ 2822255932Salfred int err; 2823255932Salfred int index = vhcr->in_modifier; 2824255932Salfred struct res_mpt *mpt; 2825255932Salfred int id; 2826255932Salfred 2827255932Salfred id = index & mpt_mask(dev); 2828255932Salfred err = get_res(dev, slave, id, RES_MPT, &mpt); 2829255932Salfred if (err) 2830255932Salfred return err; 2831255932Salfred 2832329159Shselasky if (mpt->com.from_state == RES_MPT_MAPPED) { 2833329159Shselasky /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do 2834329159Shselasky * that, the VF must read the MPT. But since the MPT entry memory is not 2835329159Shselasky * in the VF's virtual memory space, it must use QUERY_MPT to obtain the 2836329159Shselasky * entry contents. To guarantee that the MPT cannot be changed, the driver 2837329159Shselasky * must perform HW2SW_MPT before this query and return the MPT entry to HW 2838329159Shselasky * ownership fofollowing the change. The change here allows the VF to 2839329159Shselasky * perform QUERY_MPT also when the entry is in SW ownership. 2840329159Shselasky */ 2841329159Shselasky struct mlx4_mpt_entry *mpt_entry = mlx4_table_find( 2842329159Shselasky &mlx4_priv(dev)->mr_table.dmpt_table, 2843329159Shselasky mpt->key, NULL); 2844329159Shselasky 2845329159Shselasky if (NULL == mpt_entry || NULL == outbox->buf) { 2846329159Shselasky err = -EINVAL; 2847329159Shselasky goto out; 2848329159Shselasky } 2849329159Shselasky 2850329159Shselasky memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry)); 2851329159Shselasky 2852329159Shselasky err = 0; 2853329159Shselasky } else if (mpt->com.from_state == RES_MPT_HW) { 2854329159Shselasky err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2855329159Shselasky } else { 2856255932Salfred err = -EBUSY; 2857255932Salfred goto out; 2858255932Salfred } 2859255932Salfred 2860255932Salfred 2861255932Salfredout: 2862255932Salfred put_res(dev, slave, id, RES_MPT); 2863255932Salfred return err; 2864255932Salfred} 2865255932Salfred 2866255932Salfredstatic int qp_get_rcqn(struct mlx4_qp_context *qpc) 2867255932Salfred{ 2868255932Salfred return be32_to_cpu(qpc->cqn_recv) & 0xffffff; 2869255932Salfred} 2870255932Salfred 2871255932Salfredstatic int qp_get_scqn(struct mlx4_qp_context *qpc) 2872255932Salfred{ 2873255932Salfred return be32_to_cpu(qpc->cqn_send) & 0xffffff; 2874255932Salfred} 2875255932Salfred 2876255932Salfredstatic u32 qp_get_srqn(struct mlx4_qp_context *qpc) 2877255932Salfred{ 2878255932Salfred return be32_to_cpu(qpc->srqn) & 0x1ffffff; 2879255932Salfred} 2880255932Salfred 2881255932Salfredstatic void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr, 2882255932Salfred struct mlx4_qp_context *context) 2883255932Salfred{ 2884255932Salfred u32 qpn = vhcr->in_modifier & 0xffffff; 2885255932Salfred u32 qkey = 0; 2886255932Salfred 2887255932Salfred if (mlx4_get_parav_qkey(dev, qpn, &qkey)) 2888255932Salfred return; 2889255932Salfred 2890255932Salfred /* adjust qkey in qp context */ 2891255932Salfred context->qkey = cpu_to_be32(qkey); 2892255932Salfred} 2893255932Salfred 2894329159Shselaskystatic int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, 2895329159Shselasky struct mlx4_qp_context *qpc, 2896329159Shselasky struct mlx4_cmd_mailbox *inbox); 2897329159Shselasky 2898255932Salfredint mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, 2899255932Salfred struct mlx4_vhcr *vhcr, 2900255932Salfred struct mlx4_cmd_mailbox *inbox, 2901255932Salfred struct mlx4_cmd_mailbox *outbox, 2902255932Salfred struct mlx4_cmd_info *cmd) 2903255932Salfred{ 2904255932Salfred int err; 2905255932Salfred int qpn = vhcr->in_modifier & 0x7fffff; 2906255932Salfred struct res_mtt *mtt; 2907255932Salfred struct res_qp *qp; 2908255932Salfred struct mlx4_qp_context *qpc = inbox->buf + 8; 2909255932Salfred int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz; 2910255932Salfred int mtt_size = qp_get_mtt_size(qpc); 2911255932Salfred struct res_cq *rcq; 2912255932Salfred struct res_cq *scq; 2913255932Salfred int rcqn = qp_get_rcqn(qpc); 2914255932Salfred int scqn = qp_get_scqn(qpc); 2915255932Salfred u32 srqn = qp_get_srqn(qpc) & 0xffffff; 2916255932Salfred int use_srq = (qp_get_srqn(qpc) >> 24) & 1; 2917255932Salfred struct res_srq *srq; 2918255932Salfred int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; 2919255932Salfred 2920329159Shselasky err = adjust_qp_sched_queue(dev, slave, qpc, inbox); 2921329159Shselasky if (err) 2922329159Shselasky return err; 2923329159Shselasky 2924255932Salfred err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); 2925255932Salfred if (err) 2926255932Salfred return err; 2927255932Salfred qp->local_qpn = local_qpn; 2928272027Shselasky qp->sched_queue = 0; 2929272027Shselasky qp->param3 = 0; 2930272027Shselasky qp->vlan_control = 0; 2931272027Shselasky qp->fvl_rx = 0; 2932272027Shselasky qp->pri_path_fl = 0; 2933272027Shselasky qp->vlan_index = 0; 2934272027Shselasky qp->feup = 0; 2935272027Shselasky qp->qpc_flags = be32_to_cpu(qpc->flags); 2936255932Salfred 2937255932Salfred err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 2938255932Salfred if (err) 2939255932Salfred goto ex_abort; 2940255932Salfred 2941255932Salfred err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); 2942255932Salfred if (err) 2943255932Salfred goto ex_put_mtt; 2944255932Salfred 2945255932Salfred err = get_res(dev, slave, rcqn, RES_CQ, &rcq); 2946255932Salfred if (err) 2947255932Salfred goto ex_put_mtt; 2948255932Salfred 2949255932Salfred if (scqn != rcqn) { 2950255932Salfred err = get_res(dev, slave, scqn, RES_CQ, &scq); 2951255932Salfred if (err) 2952255932Salfred goto ex_put_rcq; 2953255932Salfred } else 2954255932Salfred scq = rcq; 2955255932Salfred 2956255932Salfred if (use_srq) { 2957255932Salfred err = get_res(dev, slave, srqn, RES_SRQ, &srq); 2958255932Salfred if (err) 2959255932Salfred goto ex_put_scq; 2960255932Salfred } 2961255932Salfred 2962255932Salfred adjust_proxy_tun_qkey(dev, vhcr, qpc); 2963255932Salfred update_pkey_index(dev, slave, inbox); 2964255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2965255932Salfred if (err) 2966255932Salfred goto ex_put_srq; 2967255932Salfred atomic_inc(&mtt->ref_count); 2968255932Salfred qp->mtt = mtt; 2969255932Salfred atomic_inc(&rcq->ref_count); 2970255932Salfred qp->rcq = rcq; 2971255932Salfred atomic_inc(&scq->ref_count); 2972255932Salfred qp->scq = scq; 2973255932Salfred 2974255932Salfred if (scqn != rcqn) 2975255932Salfred put_res(dev, slave, scqn, RES_CQ); 2976255932Salfred 2977255932Salfred if (use_srq) { 2978255932Salfred atomic_inc(&srq->ref_count); 2979255932Salfred put_res(dev, slave, srqn, RES_SRQ); 2980255932Salfred qp->srq = srq; 2981255932Salfred } 2982255932Salfred put_res(dev, slave, rcqn, RES_CQ); 2983255932Salfred put_res(dev, slave, mtt_base, RES_MTT); 2984255932Salfred res_end_move(dev, slave, RES_QP, qpn); 2985255932Salfred 2986255932Salfred return 0; 2987255932Salfred 2988255932Salfredex_put_srq: 2989255932Salfred if (use_srq) 2990255932Salfred put_res(dev, slave, srqn, RES_SRQ); 2991255932Salfredex_put_scq: 2992255932Salfred if (scqn != rcqn) 2993255932Salfred put_res(dev, slave, scqn, RES_CQ); 2994255932Salfredex_put_rcq: 2995255932Salfred put_res(dev, slave, rcqn, RES_CQ); 2996255932Salfredex_put_mtt: 2997255932Salfred put_res(dev, slave, mtt_base, RES_MTT); 2998255932Salfredex_abort: 2999255932Salfred res_abort_move(dev, slave, RES_QP, qpn); 3000255932Salfred 3001255932Salfred return err; 3002255932Salfred} 3003255932Salfred 3004255932Salfredstatic int eq_get_mtt_addr(struct mlx4_eq_context *eqc) 3005255932Salfred{ 3006255932Salfred return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8; 3007255932Salfred} 3008255932Salfred 3009255932Salfredstatic int eq_get_mtt_size(struct mlx4_eq_context *eqc) 3010255932Salfred{ 3011255932Salfred int log_eq_size = eqc->log_eq_size & 0x1f; 3012255932Salfred int page_shift = (eqc->log_page_size & 0x3f) + 12; 3013255932Salfred 3014255932Salfred if (log_eq_size + 5 < page_shift) 3015255932Salfred return 1; 3016255932Salfred 3017255932Salfred return 1 << (log_eq_size + 5 - page_shift); 3018255932Salfred} 3019255932Salfred 3020255932Salfredstatic int cq_get_mtt_addr(struct mlx4_cq_context *cqc) 3021255932Salfred{ 3022255932Salfred return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; 3023255932Salfred} 3024255932Salfred 3025255932Salfredstatic int cq_get_mtt_size(struct mlx4_cq_context *cqc) 3026255932Salfred{ 3027255932Salfred int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f; 3028255932Salfred int page_shift = (cqc->log_page_size & 0x3f) + 12; 3029255932Salfred 3030255932Salfred if (log_cq_size + 5 < page_shift) 3031255932Salfred return 1; 3032255932Salfred 3033255932Salfred return 1 << (log_cq_size + 5 - page_shift); 3034255932Salfred} 3035255932Salfred 3036255932Salfredint mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, 3037255932Salfred struct mlx4_vhcr *vhcr, 3038255932Salfred struct mlx4_cmd_mailbox *inbox, 3039255932Salfred struct mlx4_cmd_mailbox *outbox, 3040255932Salfred struct mlx4_cmd_info *cmd) 3041255932Salfred{ 3042255932Salfred int err; 3043255932Salfred int eqn = vhcr->in_modifier; 3044329159Shselasky int res_id = (slave << 10) | eqn; 3045255932Salfred struct mlx4_eq_context *eqc = inbox->buf; 3046255932Salfred int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; 3047255932Salfred int mtt_size = eq_get_mtt_size(eqc); 3048255932Salfred struct res_eq *eq; 3049255932Salfred struct res_mtt *mtt; 3050255932Salfred 3051255932Salfred err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0); 3052255932Salfred if (err) 3053255932Salfred return err; 3054255932Salfred err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq); 3055255932Salfred if (err) 3056255932Salfred goto out_add; 3057255932Salfred 3058255932Salfred err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 3059255932Salfred if (err) 3060255932Salfred goto out_move; 3061255932Salfred 3062255932Salfred err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); 3063255932Salfred if (err) 3064255932Salfred goto out_put; 3065255932Salfred 3066255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3067255932Salfred if (err) 3068255932Salfred goto out_put; 3069255932Salfred 3070255932Salfred atomic_inc(&mtt->ref_count); 3071255932Salfred eq->mtt = mtt; 3072255932Salfred put_res(dev, slave, mtt->com.res_id, RES_MTT); 3073255932Salfred res_end_move(dev, slave, RES_EQ, res_id); 3074255932Salfred return 0; 3075255932Salfred 3076255932Salfredout_put: 3077255932Salfred put_res(dev, slave, mtt->com.res_id, RES_MTT); 3078255932Salfredout_move: 3079255932Salfred res_abort_move(dev, slave, RES_EQ, res_id); 3080255932Salfredout_add: 3081255932Salfred rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); 3082255932Salfred return err; 3083255932Salfred} 3084255932Salfred 3085329159Shselaskyint mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, 3086329159Shselasky struct mlx4_vhcr *vhcr, 3087329159Shselasky struct mlx4_cmd_mailbox *inbox, 3088329159Shselasky struct mlx4_cmd_mailbox *outbox, 3089329159Shselasky struct mlx4_cmd_info *cmd) 3090329159Shselasky{ 3091329159Shselasky int err; 3092329159Shselasky u8 get = vhcr->op_modifier; 3093329159Shselasky 3094329159Shselasky if (get != 1) 3095329159Shselasky return -EPERM; 3096329159Shselasky 3097329159Shselasky err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3098329159Shselasky 3099329159Shselasky return err; 3100329159Shselasky} 3101329159Shselasky 3102255932Salfredstatic int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, 3103255932Salfred int len, struct res_mtt **res) 3104255932Salfred{ 3105255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 3106255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 3107255932Salfred struct res_mtt *mtt; 3108255932Salfred int err = -EINVAL; 3109255932Salfred 3110255932Salfred spin_lock_irq(mlx4_tlock(dev)); 3111255932Salfred list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT], 3112255932Salfred com.list) { 3113255932Salfred if (!check_mtt_range(dev, slave, start, len, mtt)) { 3114255932Salfred *res = mtt; 3115255932Salfred mtt->com.from_state = mtt->com.state; 3116255932Salfred mtt->com.state = RES_MTT_BUSY; 3117255932Salfred err = 0; 3118255932Salfred break; 3119255932Salfred } 3120255932Salfred } 3121255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 3122255932Salfred 3123255932Salfred return err; 3124255932Salfred} 3125255932Salfred 3126255932Salfredstatic int verify_qp_parameters(struct mlx4_dev *dev, 3127329159Shselasky struct mlx4_vhcr *vhcr, 3128255932Salfred struct mlx4_cmd_mailbox *inbox, 3129255932Salfred enum qp_transition transition, u8 slave) 3130255932Salfred{ 3131255932Salfred u32 qp_type; 3132329159Shselasky u32 qpn; 3133255932Salfred struct mlx4_qp_context *qp_ctx; 3134255932Salfred enum mlx4_qp_optpar optpar; 3135255932Salfred int port; 3136255932Salfred int num_gids; 3137255932Salfred 3138255932Salfred qp_ctx = inbox->buf + 8; 3139255932Salfred qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 3140255932Salfred optpar = be32_to_cpu(*(__be32 *) inbox->buf); 3141255932Salfred 3142329159Shselasky if (slave != mlx4_master_func_num(dev)) { 3143329159Shselasky qp_ctx->params2 &= ~MLX4_QP_BIT_FPP; 3144329159Shselasky /* setting QP rate-limit is disallowed for VFs */ 3145329159Shselasky if (qp_ctx->rate_limit_params) 3146329159Shselasky return -EPERM; 3147329159Shselasky } 3148329159Shselasky 3149255932Salfred switch (qp_type) { 3150255932Salfred case MLX4_QP_ST_RC: 3151329159Shselasky case MLX4_QP_ST_XRC: 3152255932Salfred case MLX4_QP_ST_UC: 3153255932Salfred switch (transition) { 3154255932Salfred case QP_TRANS_INIT2RTR: 3155255932Salfred case QP_TRANS_RTR2RTS: 3156255932Salfred case QP_TRANS_RTS2RTS: 3157255932Salfred case QP_TRANS_SQD2SQD: 3158255932Salfred case QP_TRANS_SQD2RTS: 3159329159Shselasky if (slave != mlx4_master_func_num(dev)) { 3160255932Salfred if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { 3161255932Salfred port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 3162255932Salfred if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) 3163329159Shselasky num_gids = mlx4_get_slave_num_gids(dev, slave, port); 3164255932Salfred else 3165255932Salfred num_gids = 1; 3166255932Salfred if (qp_ctx->pri_path.mgid_index >= num_gids) 3167255932Salfred return -EINVAL; 3168255932Salfred } 3169255932Salfred if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { 3170255932Salfred port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; 3171255932Salfred if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) 3172329159Shselasky num_gids = mlx4_get_slave_num_gids(dev, slave, port); 3173255932Salfred else 3174255932Salfred num_gids = 1; 3175255932Salfred if (qp_ctx->alt_path.mgid_index >= num_gids) 3176255932Salfred return -EINVAL; 3177255932Salfred } 3178329159Shselasky } 3179255932Salfred break; 3180255932Salfred default: 3181255932Salfred break; 3182255932Salfred } 3183329159Shselasky break; 3184255932Salfred 3185329159Shselasky case MLX4_QP_ST_MLX: 3186329159Shselasky qpn = vhcr->in_modifier & 0x7fffff; 3187329159Shselasky port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; 3188329159Shselasky if (transition == QP_TRANS_INIT2RTR && 3189329159Shselasky slave != mlx4_master_func_num(dev) && 3190329159Shselasky mlx4_is_qp_reserved(dev, qpn) && 3191329159Shselasky !mlx4_vf_smi_enabled(dev, slave, port)) { 3192329159Shselasky /* only enabled VFs may create MLX proxy QPs */ 3193329159Shselasky mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n", 3194329159Shselasky __func__, slave, port); 3195329159Shselasky return -EPERM; 3196329159Shselasky } 3197255932Salfred break; 3198329159Shselasky 3199255932Salfred default: 3200255932Salfred break; 3201255932Salfred } 3202255932Salfred 3203255932Salfred return 0; 3204255932Salfred} 3205255932Salfred 3206255932Salfredint mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, 3207255932Salfred struct mlx4_vhcr *vhcr, 3208255932Salfred struct mlx4_cmd_mailbox *inbox, 3209255932Salfred struct mlx4_cmd_mailbox *outbox, 3210255932Salfred struct mlx4_cmd_info *cmd) 3211255932Salfred{ 3212255932Salfred struct mlx4_mtt mtt; 3213255932Salfred __be64 *page_list = inbox->buf; 3214255932Salfred u64 *pg_list = (u64 *)page_list; 3215255932Salfred int i; 3216255932Salfred struct res_mtt *rmtt = NULL; 3217255932Salfred int start = be64_to_cpu(page_list[0]); 3218255932Salfred int npages = vhcr->in_modifier; 3219255932Salfred int err; 3220255932Salfred 3221255932Salfred err = get_containing_mtt(dev, slave, start, npages, &rmtt); 3222255932Salfred if (err) 3223255932Salfred return err; 3224255932Salfred 3225255932Salfred /* Call the SW implementation of write_mtt: 3226255932Salfred * - Prepare a dummy mtt struct 3227299179Spfg * - Translate inbox contents to simple addresses in host endianness */ 3228255932Salfred mtt.offset = 0; /* TBD this is broken but I don't handle it since 3229255932Salfred we don't really use it */ 3230255932Salfred mtt.order = 0; 3231255932Salfred mtt.page_shift = 0; 3232255932Salfred for (i = 0; i < npages; ++i) 3233255932Salfred pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL); 3234255932Salfred 3235255932Salfred err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, 3236255932Salfred ((u64 *)page_list + 2)); 3237255932Salfred 3238255932Salfred if (rmtt) 3239255932Salfred put_res(dev, slave, rmtt->com.res_id, RES_MTT); 3240255932Salfred 3241255932Salfred return err; 3242255932Salfred} 3243255932Salfred 3244255932Salfredint mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, 3245255932Salfred struct mlx4_vhcr *vhcr, 3246255932Salfred struct mlx4_cmd_mailbox *inbox, 3247255932Salfred struct mlx4_cmd_mailbox *outbox, 3248255932Salfred struct mlx4_cmd_info *cmd) 3249255932Salfred{ 3250255932Salfred int eqn = vhcr->in_modifier; 3251329159Shselasky int res_id = eqn | (slave << 10); 3252255932Salfred struct res_eq *eq; 3253255932Salfred int err; 3254255932Salfred 3255255932Salfred err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq); 3256255932Salfred if (err) 3257255932Salfred return err; 3258255932Salfred 3259255932Salfred err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL); 3260255932Salfred if (err) 3261255932Salfred goto ex_abort; 3262255932Salfred 3263255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3264255932Salfred if (err) 3265255932Salfred goto ex_put; 3266255932Salfred 3267255932Salfred atomic_dec(&eq->mtt->ref_count); 3268255932Salfred put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); 3269255932Salfred res_end_move(dev, slave, RES_EQ, res_id); 3270255932Salfred rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); 3271255932Salfred 3272255932Salfred return 0; 3273255932Salfred 3274255932Salfredex_put: 3275255932Salfred put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); 3276255932Salfredex_abort: 3277255932Salfred res_abort_move(dev, slave, RES_EQ, res_id); 3278255932Salfred 3279255932Salfred return err; 3280255932Salfred} 3281255932Salfred 3282255932Salfredint mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) 3283255932Salfred{ 3284255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 3285255932Salfred struct mlx4_slave_event_eq_info *event_eq; 3286255932Salfred struct mlx4_cmd_mailbox *mailbox; 3287255932Salfred u32 in_modifier = 0; 3288255932Salfred int err; 3289255932Salfred int res_id; 3290255932Salfred struct res_eq *req; 3291255932Salfred 3292255932Salfred if (!priv->mfunc.master.slave_state) 3293255932Salfred return -EINVAL; 3294255932Salfred 3295272027Shselasky /* check for slave valid, slave not PF, and slave active */ 3296329159Shselasky if (slave < 0 || slave > dev->persist->num_vfs || 3297272027Shselasky slave == dev->caps.function || 3298272027Shselasky !priv->mfunc.master.slave_state[slave].active) 3299272027Shselasky return 0; 3300272027Shselasky 3301255932Salfred event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; 3302255932Salfred 3303255932Salfred /* Create the event only if the slave is registered */ 3304255932Salfred if (event_eq->eqn < 0) 3305255932Salfred return 0; 3306255932Salfred 3307255932Salfred mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); 3308329159Shselasky res_id = (slave << 10) | event_eq->eqn; 3309255932Salfred err = get_res(dev, slave, res_id, RES_EQ, &req); 3310255932Salfred if (err) 3311255932Salfred goto unlock; 3312255932Salfred 3313255932Salfred if (req->com.from_state != RES_EQ_HW) { 3314255932Salfred err = -EINVAL; 3315255932Salfred goto put; 3316255932Salfred } 3317255932Salfred 3318255932Salfred mailbox = mlx4_alloc_cmd_mailbox(dev); 3319255932Salfred if (IS_ERR(mailbox)) { 3320255932Salfred err = PTR_ERR(mailbox); 3321255932Salfred goto put; 3322255932Salfred } 3323255932Salfred 3324255932Salfred if (eqe->type == MLX4_EVENT_TYPE_CMD) { 3325255932Salfred ++event_eq->token; 3326255932Salfred eqe->event.cmd.token = cpu_to_be16(event_eq->token); 3327255932Salfred } 3328255932Salfred 3329255932Salfred memcpy(mailbox->buf, (u8 *) eqe, 28); 3330255932Salfred 3331329159Shselasky in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16); 3332255932Salfred 3333255932Salfred err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0, 3334255932Salfred MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B, 3335255932Salfred MLX4_CMD_NATIVE); 3336255932Salfred 3337255932Salfred put_res(dev, slave, res_id, RES_EQ); 3338255932Salfred mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); 3339255932Salfred mlx4_free_cmd_mailbox(dev, mailbox); 3340255932Salfred return err; 3341255932Salfred 3342255932Salfredput: 3343255932Salfred put_res(dev, slave, res_id, RES_EQ); 3344255932Salfred 3345255932Salfredunlock: 3346255932Salfred mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); 3347255932Salfred return err; 3348255932Salfred} 3349255932Salfred 3350255932Salfredint mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, 3351255932Salfred struct mlx4_vhcr *vhcr, 3352255932Salfred struct mlx4_cmd_mailbox *inbox, 3353255932Salfred struct mlx4_cmd_mailbox *outbox, 3354255932Salfred struct mlx4_cmd_info *cmd) 3355255932Salfred{ 3356255932Salfred int eqn = vhcr->in_modifier; 3357329159Shselasky int res_id = eqn | (slave << 10); 3358255932Salfred struct res_eq *eq; 3359255932Salfred int err; 3360255932Salfred 3361255932Salfred err = get_res(dev, slave, res_id, RES_EQ, &eq); 3362255932Salfred if (err) 3363255932Salfred return err; 3364255932Salfred 3365255932Salfred if (eq->com.from_state != RES_EQ_HW) { 3366255932Salfred err = -EINVAL; 3367255932Salfred goto ex_put; 3368255932Salfred } 3369255932Salfred 3370255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3371255932Salfred 3372255932Salfredex_put: 3373255932Salfred put_res(dev, slave, res_id, RES_EQ); 3374255932Salfred return err; 3375255932Salfred} 3376255932Salfred 3377255932Salfredint mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, 3378255932Salfred struct mlx4_vhcr *vhcr, 3379255932Salfred struct mlx4_cmd_mailbox *inbox, 3380255932Salfred struct mlx4_cmd_mailbox *outbox, 3381255932Salfred struct mlx4_cmd_info *cmd) 3382255932Salfred{ 3383255932Salfred int err; 3384255932Salfred int cqn = vhcr->in_modifier; 3385255932Salfred struct mlx4_cq_context *cqc = inbox->buf; 3386255932Salfred int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; 3387329159Shselasky struct res_cq *cq = NULL; 3388255932Salfred struct res_mtt *mtt; 3389255932Salfred 3390255932Salfred err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); 3391255932Salfred if (err) 3392255932Salfred return err; 3393255932Salfred err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 3394255932Salfred if (err) 3395255932Salfred goto out_move; 3396255932Salfred err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); 3397255932Salfred if (err) 3398255932Salfred goto out_put; 3399255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3400255932Salfred if (err) 3401255932Salfred goto out_put; 3402255932Salfred atomic_inc(&mtt->ref_count); 3403255932Salfred cq->mtt = mtt; 3404255932Salfred put_res(dev, slave, mtt->com.res_id, RES_MTT); 3405255932Salfred res_end_move(dev, slave, RES_CQ, cqn); 3406255932Salfred return 0; 3407255932Salfred 3408255932Salfredout_put: 3409255932Salfred put_res(dev, slave, mtt->com.res_id, RES_MTT); 3410255932Salfredout_move: 3411255932Salfred res_abort_move(dev, slave, RES_CQ, cqn); 3412255932Salfred return err; 3413255932Salfred} 3414255932Salfred 3415255932Salfredint mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, 3416255932Salfred struct mlx4_vhcr *vhcr, 3417255932Salfred struct mlx4_cmd_mailbox *inbox, 3418255932Salfred struct mlx4_cmd_mailbox *outbox, 3419255932Salfred struct mlx4_cmd_info *cmd) 3420255932Salfred{ 3421255932Salfred int err; 3422255932Salfred int cqn = vhcr->in_modifier; 3423329159Shselasky struct res_cq *cq = NULL; 3424255932Salfred 3425255932Salfred err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); 3426255932Salfred if (err) 3427255932Salfred return err; 3428255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3429255932Salfred if (err) 3430255932Salfred goto out_move; 3431255932Salfred atomic_dec(&cq->mtt->ref_count); 3432255932Salfred res_end_move(dev, slave, RES_CQ, cqn); 3433255932Salfred return 0; 3434255932Salfred 3435255932Salfredout_move: 3436255932Salfred res_abort_move(dev, slave, RES_CQ, cqn); 3437255932Salfred return err; 3438255932Salfred} 3439255932Salfred 3440255932Salfredint mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, 3441255932Salfred struct mlx4_vhcr *vhcr, 3442255932Salfred struct mlx4_cmd_mailbox *inbox, 3443255932Salfred struct mlx4_cmd_mailbox *outbox, 3444255932Salfred struct mlx4_cmd_info *cmd) 3445255932Salfred{ 3446255932Salfred int cqn = vhcr->in_modifier; 3447255932Salfred struct res_cq *cq; 3448255932Salfred int err; 3449255932Salfred 3450255932Salfred err = get_res(dev, slave, cqn, RES_CQ, &cq); 3451255932Salfred if (err) 3452255932Salfred return err; 3453255932Salfred 3454255932Salfred if (cq->com.from_state != RES_CQ_HW) 3455255932Salfred goto ex_put; 3456255932Salfred 3457255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3458255932Salfredex_put: 3459255932Salfred put_res(dev, slave, cqn, RES_CQ); 3460255932Salfred 3461255932Salfred return err; 3462255932Salfred} 3463255932Salfred 3464255932Salfredstatic int handle_resize(struct mlx4_dev *dev, int slave, 3465255932Salfred struct mlx4_vhcr *vhcr, 3466255932Salfred struct mlx4_cmd_mailbox *inbox, 3467255932Salfred struct mlx4_cmd_mailbox *outbox, 3468255932Salfred struct mlx4_cmd_info *cmd, 3469255932Salfred struct res_cq *cq) 3470255932Salfred{ 3471255932Salfred int err; 3472255932Salfred struct res_mtt *orig_mtt; 3473255932Salfred struct res_mtt *mtt; 3474255932Salfred struct mlx4_cq_context *cqc = inbox->buf; 3475255932Salfred int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; 3476255932Salfred 3477255932Salfred err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); 3478255932Salfred if (err) 3479255932Salfred return err; 3480255932Salfred 3481255932Salfred if (orig_mtt != cq->mtt) { 3482255932Salfred err = -EINVAL; 3483255932Salfred goto ex_put; 3484255932Salfred } 3485255932Salfred 3486255932Salfred err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 3487255932Salfred if (err) 3488255932Salfred goto ex_put; 3489255932Salfred 3490255932Salfred err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); 3491255932Salfred if (err) 3492255932Salfred goto ex_put1; 3493255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3494255932Salfred if (err) 3495255932Salfred goto ex_put1; 3496255932Salfred atomic_dec(&orig_mtt->ref_count); 3497255932Salfred put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); 3498255932Salfred atomic_inc(&mtt->ref_count); 3499255932Salfred cq->mtt = mtt; 3500255932Salfred put_res(dev, slave, mtt->com.res_id, RES_MTT); 3501255932Salfred return 0; 3502255932Salfred 3503255932Salfredex_put1: 3504255932Salfred put_res(dev, slave, mtt->com.res_id, RES_MTT); 3505255932Salfredex_put: 3506255932Salfred put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); 3507255932Salfred 3508255932Salfred return err; 3509255932Salfred 3510255932Salfred} 3511255932Salfred 3512255932Salfredint mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, 3513255932Salfred struct mlx4_vhcr *vhcr, 3514255932Salfred struct mlx4_cmd_mailbox *inbox, 3515255932Salfred struct mlx4_cmd_mailbox *outbox, 3516255932Salfred struct mlx4_cmd_info *cmd) 3517255932Salfred{ 3518255932Salfred int cqn = vhcr->in_modifier; 3519255932Salfred struct res_cq *cq; 3520255932Salfred int err; 3521255932Salfred 3522255932Salfred err = get_res(dev, slave, cqn, RES_CQ, &cq); 3523255932Salfred if (err) 3524255932Salfred return err; 3525255932Salfred 3526255932Salfred if (cq->com.from_state != RES_CQ_HW) 3527255932Salfred goto ex_put; 3528255932Salfred 3529255932Salfred if (vhcr->op_modifier == 0) { 3530255932Salfred err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); 3531255932Salfred goto ex_put; 3532255932Salfred } 3533255932Salfred 3534255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3535255932Salfredex_put: 3536255932Salfred put_res(dev, slave, cqn, RES_CQ); 3537255932Salfred 3538255932Salfred return err; 3539255932Salfred} 3540255932Salfred 3541255932Salfredstatic int srq_get_mtt_size(struct mlx4_srq_context *srqc) 3542255932Salfred{ 3543255932Salfred int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf; 3544255932Salfred int log_rq_stride = srqc->logstride & 7; 3545255932Salfred int page_shift = (srqc->log_page_size & 0x3f) + 12; 3546255932Salfred 3547255932Salfred if (log_srq_size + log_rq_stride + 4 < page_shift) 3548255932Salfred return 1; 3549255932Salfred 3550255932Salfred return 1 << (log_srq_size + log_rq_stride + 4 - page_shift); 3551255932Salfred} 3552255932Salfred 3553255932Salfredint mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3554255932Salfred struct mlx4_vhcr *vhcr, 3555255932Salfred struct mlx4_cmd_mailbox *inbox, 3556255932Salfred struct mlx4_cmd_mailbox *outbox, 3557255932Salfred struct mlx4_cmd_info *cmd) 3558255932Salfred{ 3559255932Salfred int err; 3560255932Salfred int srqn = vhcr->in_modifier; 3561255932Salfred struct res_mtt *mtt; 3562329159Shselasky struct res_srq *srq = NULL; 3563255932Salfred struct mlx4_srq_context *srqc = inbox->buf; 3564255932Salfred int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; 3565255932Salfred 3566255932Salfred if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff)) 3567255932Salfred return -EINVAL; 3568255932Salfred 3569255932Salfred err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq); 3570255932Salfred if (err) 3571255932Salfred return err; 3572255932Salfred err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); 3573255932Salfred if (err) 3574255932Salfred goto ex_abort; 3575255932Salfred err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc), 3576255932Salfred mtt); 3577255932Salfred if (err) 3578255932Salfred goto ex_put_mtt; 3579255932Salfred 3580255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3581255932Salfred if (err) 3582255932Salfred goto ex_put_mtt; 3583255932Salfred 3584255932Salfred atomic_inc(&mtt->ref_count); 3585255932Salfred srq->mtt = mtt; 3586255932Salfred put_res(dev, slave, mtt->com.res_id, RES_MTT); 3587255932Salfred res_end_move(dev, slave, RES_SRQ, srqn); 3588255932Salfred return 0; 3589255932Salfred 3590255932Salfredex_put_mtt: 3591255932Salfred put_res(dev, slave, mtt->com.res_id, RES_MTT); 3592255932Salfredex_abort: 3593255932Salfred res_abort_move(dev, slave, RES_SRQ, srqn); 3594255932Salfred 3595255932Salfred return err; 3596255932Salfred} 3597255932Salfred 3598255932Salfredint mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3599255932Salfred struct mlx4_vhcr *vhcr, 3600255932Salfred struct mlx4_cmd_mailbox *inbox, 3601255932Salfred struct mlx4_cmd_mailbox *outbox, 3602255932Salfred struct mlx4_cmd_info *cmd) 3603255932Salfred{ 3604255932Salfred int err; 3605255932Salfred int srqn = vhcr->in_modifier; 3606329159Shselasky struct res_srq *srq = NULL; 3607255932Salfred 3608255932Salfred err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq); 3609255932Salfred if (err) 3610255932Salfred return err; 3611255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3612255932Salfred if (err) 3613255932Salfred goto ex_abort; 3614255932Salfred atomic_dec(&srq->mtt->ref_count); 3615255932Salfred if (srq->cq) 3616255932Salfred atomic_dec(&srq->cq->ref_count); 3617255932Salfred res_end_move(dev, slave, RES_SRQ, srqn); 3618255932Salfred 3619255932Salfred return 0; 3620255932Salfred 3621255932Salfredex_abort: 3622255932Salfred res_abort_move(dev, slave, RES_SRQ, srqn); 3623255932Salfred 3624255932Salfred return err; 3625255932Salfred} 3626255932Salfred 3627255932Salfredint mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3628255932Salfred struct mlx4_vhcr *vhcr, 3629255932Salfred struct mlx4_cmd_mailbox *inbox, 3630255932Salfred struct mlx4_cmd_mailbox *outbox, 3631255932Salfred struct mlx4_cmd_info *cmd) 3632255932Salfred{ 3633255932Salfred int err; 3634255932Salfred int srqn = vhcr->in_modifier; 3635255932Salfred struct res_srq *srq; 3636255932Salfred 3637255932Salfred err = get_res(dev, slave, srqn, RES_SRQ, &srq); 3638255932Salfred if (err) 3639255932Salfred return err; 3640255932Salfred if (srq->com.from_state != RES_SRQ_HW) { 3641255932Salfred err = -EBUSY; 3642255932Salfred goto out; 3643255932Salfred } 3644255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3645255932Salfredout: 3646255932Salfred put_res(dev, slave, srqn, RES_SRQ); 3647255932Salfred return err; 3648255932Salfred} 3649255932Salfred 3650255932Salfredint mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, 3651255932Salfred struct mlx4_vhcr *vhcr, 3652255932Salfred struct mlx4_cmd_mailbox *inbox, 3653255932Salfred struct mlx4_cmd_mailbox *outbox, 3654255932Salfred struct mlx4_cmd_info *cmd) 3655255932Salfred{ 3656255932Salfred int err; 3657255932Salfred int srqn = vhcr->in_modifier; 3658255932Salfred struct res_srq *srq; 3659255932Salfred 3660255932Salfred err = get_res(dev, slave, srqn, RES_SRQ, &srq); 3661255932Salfred if (err) 3662255932Salfred return err; 3663255932Salfred 3664255932Salfred if (srq->com.from_state != RES_SRQ_HW) { 3665255932Salfred err = -EBUSY; 3666255932Salfred goto out; 3667255932Salfred } 3668255932Salfred 3669255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3670255932Salfredout: 3671255932Salfred put_res(dev, slave, srqn, RES_SRQ); 3672255932Salfred return err; 3673255932Salfred} 3674255932Salfred 3675255932Salfredint mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, 3676255932Salfred struct mlx4_vhcr *vhcr, 3677255932Salfred struct mlx4_cmd_mailbox *inbox, 3678255932Salfred struct mlx4_cmd_mailbox *outbox, 3679255932Salfred struct mlx4_cmd_info *cmd) 3680255932Salfred{ 3681255932Salfred int err; 3682255932Salfred int qpn = vhcr->in_modifier & 0x7fffff; 3683255932Salfred struct res_qp *qp; 3684255932Salfred 3685255932Salfred err = get_res(dev, slave, qpn, RES_QP, &qp); 3686255932Salfred if (err) 3687255932Salfred return err; 3688255932Salfred if (qp->com.from_state != RES_QP_HW) { 3689255932Salfred err = -EBUSY; 3690255932Salfred goto out; 3691255932Salfred } 3692255932Salfred 3693255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3694255932Salfredout: 3695255932Salfred put_res(dev, slave, qpn, RES_QP); 3696255932Salfred return err; 3697255932Salfred} 3698255932Salfred 3699255932Salfredint mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, 3700255932Salfred struct mlx4_vhcr *vhcr, 3701255932Salfred struct mlx4_cmd_mailbox *inbox, 3702255932Salfred struct mlx4_cmd_mailbox *outbox, 3703255932Salfred struct mlx4_cmd_info *cmd) 3704255932Salfred{ 3705255932Salfred struct mlx4_qp_context *context = inbox->buf + 8; 3706255932Salfred adjust_proxy_tun_qkey(dev, vhcr, context); 3707255932Salfred update_pkey_index(dev, slave, inbox); 3708255932Salfred return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3709255932Salfred} 3710255932Salfred 3711329159Shselaskystatic int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, 3712329159Shselasky struct mlx4_qp_context *qpc, 3713329159Shselasky struct mlx4_cmd_mailbox *inbox) 3714329159Shselasky{ 3715329159Shselasky enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf); 3716329159Shselasky u8 pri_sched_queue; 3717329159Shselasky int port = mlx4_slave_convert_port( 3718329159Shselasky dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1; 3719329159Shselasky 3720329159Shselasky if (port < 0) 3721329159Shselasky return -EINVAL; 3722329159Shselasky 3723329159Shselasky pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) | 3724329159Shselasky ((port & 1) << 6); 3725329159Shselasky 3726329159Shselasky if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) || 3727329159Shselasky qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) { 3728329159Shselasky qpc->pri_path.sched_queue = pri_sched_queue; 3729329159Shselasky } 3730329159Shselasky 3731329159Shselasky if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { 3732329159Shselasky port = mlx4_slave_convert_port( 3733329159Shselasky dev, slave, (qpc->alt_path.sched_queue >> 6 & 1) 3734329159Shselasky + 1) - 1; 3735329159Shselasky if (port < 0) 3736329159Shselasky return -EINVAL; 3737329159Shselasky qpc->alt_path.sched_queue = 3738329159Shselasky (qpc->alt_path.sched_queue & ~(1 << 6)) | 3739329159Shselasky (port & 1) << 6; 3740329159Shselasky } 3741329159Shselasky return 0; 3742329159Shselasky} 3743329159Shselasky 3744255932Salfredstatic int roce_verify_mac(struct mlx4_dev *dev, int slave, 3745255932Salfred struct mlx4_qp_context *qpc, 3746255932Salfred struct mlx4_cmd_mailbox *inbox) 3747255932Salfred{ 3748255932Salfred u64 mac; 3749255932Salfred int port; 3750255932Salfred u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; 3751255932Salfred u8 sched = *(u8 *)(inbox->buf + 64); 3752255932Salfred u8 smac_ix; 3753255932Salfred 3754255932Salfred port = (sched >> 6 & 1) + 1; 3755255932Salfred if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) { 3756255932Salfred smac_ix = qpc->pri_path.grh_mylmc & 0x7f; 3757255932Salfred if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac)) 3758255932Salfred return -ENOENT; 3759255932Salfred } 3760255932Salfred return 0; 3761255932Salfred} 3762255932Salfred 3763255932Salfredint mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, 3764255932Salfred struct mlx4_vhcr *vhcr, 3765255932Salfred struct mlx4_cmd_mailbox *inbox, 3766255932Salfred struct mlx4_cmd_mailbox *outbox, 3767255932Salfred struct mlx4_cmd_info *cmd) 3768255932Salfred{ 3769255932Salfred int err; 3770255932Salfred struct mlx4_qp_context *qpc = inbox->buf + 8; 3771272027Shselasky int qpn = vhcr->in_modifier & 0x7fffff; 3772272027Shselasky struct res_qp *qp; 3773272027Shselasky u8 orig_sched_queue; 3774272027Shselasky __be32 orig_param3 = qpc->param3; 3775272027Shselasky u8 orig_vlan_control = qpc->pri_path.vlan_control; 3776272027Shselasky u8 orig_fvl_rx = qpc->pri_path.fvl_rx; 3777272027Shselasky u8 orig_pri_path_fl = qpc->pri_path.fl; 3778272027Shselasky u8 orig_vlan_index = qpc->pri_path.vlan_index; 3779272027Shselasky u8 orig_feup = qpc->pri_path.feup; 3780255932Salfred 3781329159Shselasky err = adjust_qp_sched_queue(dev, slave, qpc, inbox); 3782255932Salfred if (err) 3783255932Salfred return err; 3784329159Shselasky err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave); 3785329159Shselasky if (err) 3786329159Shselasky return err; 3787255932Salfred 3788255932Salfred if (roce_verify_mac(dev, slave, qpc, inbox)) 3789255932Salfred return -EINVAL; 3790255932Salfred 3791255932Salfred update_pkey_index(dev, slave, inbox); 3792255932Salfred update_gid(dev, inbox, (u8)slave); 3793255932Salfred adjust_proxy_tun_qkey(dev, vhcr, qpc); 3794272027Shselasky orig_sched_queue = qpc->pri_path.sched_queue; 3795272027Shselasky 3796272027Shselasky err = get_res(dev, slave, qpn, RES_QP, &qp); 3797255932Salfred if (err) 3798255932Salfred return err; 3799272027Shselasky if (qp->com.from_state != RES_QP_HW) { 3800272027Shselasky err = -EBUSY; 3801272027Shselasky goto out; 3802272027Shselasky } 3803255932Salfred 3804329159Shselasky err = update_vport_qp_param(dev, inbox, slave, qpn); 3805329159Shselasky if (err) 3806329159Shselasky goto out; 3807272027Shselasky 3808272027Shselasky err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3809272027Shselaskyout: 3810272027Shselasky /* if no error, save sched queue value passed in by VF. This is 3811272027Shselasky * essentially the QOS value provided by the VF. This will be useful 3812272027Shselasky * if we allow dynamic changes from VST back to VGT 3813272027Shselasky */ 3814272027Shselasky if (!err) { 3815272027Shselasky qp->sched_queue = orig_sched_queue; 3816272027Shselasky qp->param3 = orig_param3; 3817272027Shselasky qp->vlan_control = orig_vlan_control; 3818272027Shselasky qp->fvl_rx = orig_fvl_rx; 3819272027Shselasky qp->pri_path_fl = orig_pri_path_fl; 3820272027Shselasky qp->vlan_index = orig_vlan_index; 3821272027Shselasky qp->feup = orig_feup; 3822272027Shselasky } 3823272027Shselasky put_res(dev, slave, qpn, RES_QP); 3824272027Shselasky return err; 3825255932Salfred} 3826255932Salfred 3827255932Salfredint mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3828255932Salfred struct mlx4_vhcr *vhcr, 3829255932Salfred struct mlx4_cmd_mailbox *inbox, 3830255932Salfred struct mlx4_cmd_mailbox *outbox, 3831255932Salfred struct mlx4_cmd_info *cmd) 3832255932Salfred{ 3833255932Salfred int err; 3834255932Salfred struct mlx4_qp_context *context = inbox->buf + 8; 3835255932Salfred 3836329159Shselasky err = adjust_qp_sched_queue(dev, slave, context, inbox); 3837255932Salfred if (err) 3838255932Salfred return err; 3839329159Shselasky err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave); 3840329159Shselasky if (err) 3841329159Shselasky return err; 3842255932Salfred 3843255932Salfred update_pkey_index(dev, slave, inbox); 3844255932Salfred update_gid(dev, inbox, (u8)slave); 3845255932Salfred adjust_proxy_tun_qkey(dev, vhcr, context); 3846255932Salfred return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3847255932Salfred} 3848255932Salfred 3849255932Salfredint mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3850255932Salfred struct mlx4_vhcr *vhcr, 3851255932Salfred struct mlx4_cmd_mailbox *inbox, 3852255932Salfred struct mlx4_cmd_mailbox *outbox, 3853255932Salfred struct mlx4_cmd_info *cmd) 3854255932Salfred{ 3855255932Salfred int err; 3856255932Salfred struct mlx4_qp_context *context = inbox->buf + 8; 3857255932Salfred 3858329159Shselasky err = adjust_qp_sched_queue(dev, slave, context, inbox); 3859255932Salfred if (err) 3860255932Salfred return err; 3861329159Shselasky err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave); 3862329159Shselasky if (err) 3863329159Shselasky return err; 3864255932Salfred 3865255932Salfred update_pkey_index(dev, slave, inbox); 3866255932Salfred update_gid(dev, inbox, (u8)slave); 3867255932Salfred adjust_proxy_tun_qkey(dev, vhcr, context); 3868255932Salfred return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3869255932Salfred} 3870255932Salfred 3871255932Salfred 3872255932Salfredint mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3873255932Salfred struct mlx4_vhcr *vhcr, 3874255932Salfred struct mlx4_cmd_mailbox *inbox, 3875255932Salfred struct mlx4_cmd_mailbox *outbox, 3876255932Salfred struct mlx4_cmd_info *cmd) 3877255932Salfred{ 3878255932Salfred struct mlx4_qp_context *context = inbox->buf + 8; 3879329159Shselasky int err = adjust_qp_sched_queue(dev, slave, context, inbox); 3880329159Shselasky if (err) 3881329159Shselasky return err; 3882255932Salfred adjust_proxy_tun_qkey(dev, vhcr, context); 3883255932Salfred return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3884255932Salfred} 3885255932Salfred 3886255932Salfredint mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, 3887255932Salfred struct mlx4_vhcr *vhcr, 3888255932Salfred struct mlx4_cmd_mailbox *inbox, 3889255932Salfred struct mlx4_cmd_mailbox *outbox, 3890255932Salfred struct mlx4_cmd_info *cmd) 3891255932Salfred{ 3892255932Salfred int err; 3893255932Salfred struct mlx4_qp_context *context = inbox->buf + 8; 3894255932Salfred 3895329159Shselasky err = adjust_qp_sched_queue(dev, slave, context, inbox); 3896255932Salfred if (err) 3897255932Salfred return err; 3898329159Shselasky err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave); 3899329159Shselasky if (err) 3900329159Shselasky return err; 3901255932Salfred 3902255932Salfred adjust_proxy_tun_qkey(dev, vhcr, context); 3903255932Salfred update_gid(dev, inbox, (u8)slave); 3904255932Salfred update_pkey_index(dev, slave, inbox); 3905255932Salfred return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3906255932Salfred} 3907255932Salfred 3908255932Salfredint mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, 3909255932Salfred struct mlx4_vhcr *vhcr, 3910255932Salfred struct mlx4_cmd_mailbox *inbox, 3911255932Salfred struct mlx4_cmd_mailbox *outbox, 3912255932Salfred struct mlx4_cmd_info *cmd) 3913255932Salfred{ 3914255932Salfred int err; 3915255932Salfred struct mlx4_qp_context *context = inbox->buf + 8; 3916255932Salfred 3917329159Shselasky err = adjust_qp_sched_queue(dev, slave, context, inbox); 3918255932Salfred if (err) 3919255932Salfred return err; 3920329159Shselasky err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave); 3921329159Shselasky if (err) 3922329159Shselasky return err; 3923255932Salfred 3924255932Salfred adjust_proxy_tun_qkey(dev, vhcr, context); 3925255932Salfred update_gid(dev, inbox, (u8)slave); 3926255932Salfred update_pkey_index(dev, slave, inbox); 3927255932Salfred return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3928255932Salfred} 3929255932Salfred 3930255932Salfredint mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, 3931255932Salfred struct mlx4_vhcr *vhcr, 3932255932Salfred struct mlx4_cmd_mailbox *inbox, 3933255932Salfred struct mlx4_cmd_mailbox *outbox, 3934255932Salfred struct mlx4_cmd_info *cmd) 3935255932Salfred{ 3936255932Salfred int err; 3937255932Salfred int qpn = vhcr->in_modifier & 0x7fffff; 3938255932Salfred struct res_qp *qp; 3939255932Salfred 3940255932Salfred err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0); 3941255932Salfred if (err) 3942255932Salfred return err; 3943255932Salfred err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3944255932Salfred if (err) 3945255932Salfred goto ex_abort; 3946255932Salfred 3947255932Salfred atomic_dec(&qp->mtt->ref_count); 3948255932Salfred atomic_dec(&qp->rcq->ref_count); 3949255932Salfred atomic_dec(&qp->scq->ref_count); 3950255932Salfred if (qp->srq) 3951255932Salfred atomic_dec(&qp->srq->ref_count); 3952255932Salfred res_end_move(dev, slave, RES_QP, qpn); 3953255932Salfred return 0; 3954255932Salfred 3955255932Salfredex_abort: 3956255932Salfred res_abort_move(dev, slave, RES_QP, qpn); 3957255932Salfred 3958255932Salfred return err; 3959255932Salfred} 3960255932Salfred 3961255932Salfredstatic struct res_gid *find_gid(struct mlx4_dev *dev, int slave, 3962255932Salfred struct res_qp *rqp, u8 *gid) 3963255932Salfred{ 3964255932Salfred struct res_gid *res; 3965255932Salfred 3966255932Salfred list_for_each_entry(res, &rqp->mcg_list, list) { 3967255932Salfred if (!memcmp(res->gid, gid, 16)) 3968255932Salfred return res; 3969255932Salfred } 3970255932Salfred return NULL; 3971255932Salfred} 3972255932Salfred 3973255932Salfredstatic int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, 3974255932Salfred u8 *gid, enum mlx4_protocol prot, 3975272027Shselasky enum mlx4_steer_type steer, u64 reg_id) 3976255932Salfred{ 3977255932Salfred struct res_gid *res; 3978255932Salfred int err; 3979255932Salfred 3980255932Salfred res = kzalloc(sizeof *res, GFP_KERNEL); 3981255932Salfred if (!res) 3982255932Salfred return -ENOMEM; 3983255932Salfred 3984255932Salfred spin_lock_irq(&rqp->mcg_spl); 3985255932Salfred if (find_gid(dev, slave, rqp, gid)) { 3986255932Salfred kfree(res); 3987255932Salfred err = -EEXIST; 3988255932Salfred } else { 3989255932Salfred memcpy(res->gid, gid, 16); 3990255932Salfred res->prot = prot; 3991255932Salfred res->steer = steer; 3992272027Shselasky res->reg_id = reg_id; 3993255932Salfred list_add_tail(&res->list, &rqp->mcg_list); 3994255932Salfred err = 0; 3995255932Salfred } 3996255932Salfred spin_unlock_irq(&rqp->mcg_spl); 3997255932Salfred 3998255932Salfred return err; 3999255932Salfred} 4000255932Salfred 4001255932Salfredstatic int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, 4002255932Salfred u8 *gid, enum mlx4_protocol prot, 4003272027Shselasky enum mlx4_steer_type steer, u64 *reg_id) 4004255932Salfred{ 4005255932Salfred struct res_gid *res; 4006255932Salfred int err; 4007255932Salfred 4008255932Salfred spin_lock_irq(&rqp->mcg_spl); 4009255932Salfred res = find_gid(dev, slave, rqp, gid); 4010255932Salfred if (!res || res->prot != prot || res->steer != steer) 4011255932Salfred err = -EINVAL; 4012255932Salfred else { 4013272027Shselasky *reg_id = res->reg_id; 4014255932Salfred list_del(&res->list); 4015255932Salfred kfree(res); 4016255932Salfred err = 0; 4017255932Salfred } 4018255932Salfred spin_unlock_irq(&rqp->mcg_spl); 4019255932Salfred 4020255932Salfred return err; 4021255932Salfred} 4022255932Salfred 4023329159Shselaskystatic int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp, 4024329159Shselasky u8 gid[16], int block_loopback, enum mlx4_protocol prot, 4025272027Shselasky enum mlx4_steer_type type, u64 *reg_id) 4026272027Shselasky{ 4027272027Shselasky switch (dev->caps.steering_mode) { 4028329159Shselasky case MLX4_STEERING_MODE_DEVICE_MANAGED: { 4029329159Shselasky int port = mlx4_slave_convert_port(dev, slave, gid[5]); 4030329159Shselasky if (port < 0) 4031329159Shselasky return port; 4032329159Shselasky return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, 4033272027Shselasky block_loopback, prot, 4034272027Shselasky reg_id); 4035329159Shselasky } 4036272027Shselasky case MLX4_STEERING_MODE_B0: 4037329159Shselasky if (prot == MLX4_PROT_ETH) { 4038329159Shselasky int port = mlx4_slave_convert_port(dev, slave, gid[5]); 4039329159Shselasky if (port < 0) 4040329159Shselasky return port; 4041329159Shselasky gid[5] = port; 4042329159Shselasky } 4043272027Shselasky return mlx4_qp_attach_common(dev, qp, gid, 4044272027Shselasky block_loopback, prot, type); 4045272027Shselasky default: 4046272027Shselasky return -EINVAL; 4047272027Shselasky } 4048272027Shselasky} 4049272027Shselasky 4050329159Shselaskystatic int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 4051329159Shselasky u8 gid[16], enum mlx4_protocol prot, 4052329159Shselasky enum mlx4_steer_type type, u64 reg_id) 4053272027Shselasky{ 4054272027Shselasky switch (dev->caps.steering_mode) { 4055272027Shselasky case MLX4_STEERING_MODE_DEVICE_MANAGED: 4056272027Shselasky return mlx4_flow_detach(dev, reg_id); 4057272027Shselasky case MLX4_STEERING_MODE_B0: 4058272027Shselasky return mlx4_qp_detach_common(dev, qp, gid, prot, type); 4059272027Shselasky default: 4060272027Shselasky return -EINVAL; 4061272027Shselasky } 4062272027Shselasky} 4063272027Shselasky 4064329159Shselaskystatic int mlx4_adjust_port(struct mlx4_dev *dev, int slave, 4065329159Shselasky u8 *gid, enum mlx4_protocol prot) 4066329159Shselasky{ 4067329159Shselasky int real_port; 4068329159Shselasky 4069329159Shselasky if (prot != MLX4_PROT_ETH) 4070329159Shselasky return 0; 4071329159Shselasky 4072329159Shselasky if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 || 4073329159Shselasky dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 4074329159Shselasky real_port = mlx4_slave_convert_port(dev, slave, gid[5]); 4075329159Shselasky if (real_port < 0) 4076329159Shselasky return -EINVAL; 4077329159Shselasky gid[5] = real_port; 4078329159Shselasky } 4079329159Shselasky 4080329159Shselasky return 0; 4081329159Shselasky} 4082329159Shselasky 4083255932Salfredint mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 4084255932Salfred struct mlx4_vhcr *vhcr, 4085255932Salfred struct mlx4_cmd_mailbox *inbox, 4086255932Salfred struct mlx4_cmd_mailbox *outbox, 4087255932Salfred struct mlx4_cmd_info *cmd) 4088255932Salfred{ 4089255932Salfred struct mlx4_qp qp; /* dummy for calling attach/detach */ 4090255932Salfred u8 *gid = inbox->buf; 4091255932Salfred enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7; 4092255932Salfred int err; 4093255932Salfred int qpn; 4094255932Salfred struct res_qp *rqp; 4095272027Shselasky u64 reg_id = 0; 4096255932Salfred int attach = vhcr->op_modifier; 4097255932Salfred int block_loopback = vhcr->in_modifier >> 31; 4098255932Salfred u8 steer_type_mask = 2; 4099255932Salfred enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; 4100255932Salfred 4101255932Salfred qpn = vhcr->in_modifier & 0xffffff; 4102255932Salfred err = get_res(dev, slave, qpn, RES_QP, &rqp); 4103255932Salfred if (err) 4104255932Salfred return err; 4105255932Salfred 4106255932Salfred qp.qpn = qpn; 4107255932Salfred if (attach) { 4108329159Shselasky err = qp_attach(dev, slave, &qp, gid, block_loopback, prot, 4109272027Shselasky type, ®_id); 4110272027Shselasky if (err) { 4111272027Shselasky pr_err("Fail to attach rule to qp 0x%x\n", qpn); 4112255932Salfred goto ex_put; 4113272027Shselasky } 4114272027Shselasky err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id); 4115255932Salfred if (err) 4116272027Shselasky goto ex_detach; 4117255932Salfred } else { 4118329159Shselasky err = mlx4_adjust_port(dev, slave, gid, prot); 4119329159Shselasky if (err) 4120329159Shselasky goto ex_put; 4121329159Shselasky 4122272027Shselasky err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id); 4123255932Salfred if (err) 4124255932Salfred goto ex_put; 4125272027Shselasky 4126272027Shselasky err = qp_detach(dev, &qp, gid, prot, type, reg_id); 4127272027Shselasky if (err) 4128272027Shselasky pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n", 4129272027Shselasky qpn, (unsigned long long)reg_id); 4130255932Salfred } 4131255932Salfred put_res(dev, slave, qpn, RES_QP); 4132272027Shselasky return err; 4133255932Salfred 4134272027Shselaskyex_detach: 4135272027Shselasky qp_detach(dev, &qp, gid, prot, type, reg_id); 4136255932Salfredex_put: 4137255932Salfred put_res(dev, slave, qpn, RES_QP); 4138255932Salfred return err; 4139255932Salfred} 4140255932Salfred 4141255932Salfred/* 4142255932Salfred * MAC validation for Flow Steering rules. 4143255932Salfred * VF can attach rules only with a mac address which is assigned to it. 4144255932Salfred */ 4145255932Salfredstatic int validate_eth_header_mac(int slave, struct _rule_hw *eth_header, 4146255932Salfred struct list_head *rlist) 4147255932Salfred{ 4148255932Salfred struct mac_res *res, *tmp; 4149255932Salfred __be64 be_mac; 4150255932Salfred 4151255932Salfred /* make sure it isn't multicast or broadcast mac*/ 4152255932Salfred if (!is_multicast_ether_addr(eth_header->eth.dst_mac) && 4153255932Salfred !is_broadcast_ether_addr(eth_header->eth.dst_mac)) { 4154255932Salfred list_for_each_entry_safe(res, tmp, rlist, list) { 4155255932Salfred be_mac = cpu_to_be64(res->mac << 16); 4156329159Shselasky if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac)) 4157255932Salfred return 0; 4158255932Salfred } 4159255932Salfred pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n", 4160255932Salfred eth_header->eth.dst_mac, slave); 4161255932Salfred return -EINVAL; 4162255932Salfred } 4163255932Salfred return 0; 4164255932Salfred} 4165255932Salfred 4166329159Shselaskystatic void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl, 4167329159Shselasky struct _rule_hw *eth_header) 4168329159Shselasky{ 4169329159Shselasky if (is_multicast_ether_addr(eth_header->eth.dst_mac) || 4170329159Shselasky is_broadcast_ether_addr(eth_header->eth.dst_mac)) { 4171329159Shselasky struct mlx4_net_trans_rule_hw_eth *eth = 4172329159Shselasky (struct mlx4_net_trans_rule_hw_eth *)eth_header; 4173329159Shselasky struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1); 4174329159Shselasky bool last_rule = next_rule->size == 0 && next_rule->id == 0 && 4175329159Shselasky next_rule->rsvd == 0; 4176329159Shselasky 4177329159Shselasky if (last_rule) 4178329159Shselasky ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC); 4179329159Shselasky } 4180329159Shselasky} 4181329159Shselasky 4182255932Salfred/* 4183255932Salfred * In case of missing eth header, append eth header with a MAC address 4184255932Salfred * assigned to the VF. 4185255932Salfred */ 4186255932Salfredstatic int add_eth_header(struct mlx4_dev *dev, int slave, 4187255932Salfred struct mlx4_cmd_mailbox *inbox, 4188255932Salfred struct list_head *rlist, int header_id) 4189255932Salfred{ 4190255932Salfred struct mac_res *res, *tmp; 4191255932Salfred u8 port; 4192255932Salfred struct mlx4_net_trans_rule_hw_ctrl *ctrl; 4193255932Salfred struct mlx4_net_trans_rule_hw_eth *eth_header; 4194255932Salfred struct mlx4_net_trans_rule_hw_ipv4 *ip_header; 4195255932Salfred struct mlx4_net_trans_rule_hw_tcp_udp *l4_header; 4196255932Salfred __be64 be_mac = 0; 4197255932Salfred __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); 4198255932Salfred 4199255932Salfred ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 4200255932Salfred port = ctrl->port; 4201255932Salfred eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1); 4202255932Salfred 4203255932Salfred /* Clear a space in the inbox for eth header */ 4204255932Salfred switch (header_id) { 4205255932Salfred case MLX4_NET_TRANS_RULE_ID_IPV4: 4206255932Salfred ip_header = 4207255932Salfred (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1); 4208255932Salfred memmove(ip_header, eth_header, 4209255932Salfred sizeof(*ip_header) + sizeof(*l4_header)); 4210255932Salfred break; 4211255932Salfred case MLX4_NET_TRANS_RULE_ID_TCP: 4212255932Salfred case MLX4_NET_TRANS_RULE_ID_UDP: 4213255932Salfred l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *) 4214255932Salfred (eth_header + 1); 4215255932Salfred memmove(l4_header, eth_header, sizeof(*l4_header)); 4216255932Salfred break; 4217255932Salfred default: 4218255932Salfred return -EINVAL; 4219255932Salfred } 4220255932Salfred list_for_each_entry_safe(res, tmp, rlist, list) { 4221255932Salfred if (port == res->port) { 4222255932Salfred be_mac = cpu_to_be64(res->mac << 16); 4223255932Salfred break; 4224255932Salfred } 4225255932Salfred } 4226255932Salfred if (!be_mac) { 4227329159Shselasky pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n", 4228255932Salfred port); 4229255932Salfred return -EINVAL; 4230255932Salfred } 4231255932Salfred 4232255932Salfred memset(eth_header, 0, sizeof(*eth_header)); 4233255932Salfred eth_header->size = sizeof(*eth_header) >> 2; 4234255932Salfred eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]); 4235255932Salfred memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN); 4236255932Salfred memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN); 4237255932Salfred 4238255932Salfred return 0; 4239329159Shselasky} 4240255932Salfred 4241329159Shselasky#define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \ 4242329159Shselasky 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\ 4243329159Shselasky 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB) 4244329159Shselaskyint mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, 4245329159Shselasky struct mlx4_vhcr *vhcr, 4246329159Shselasky struct mlx4_cmd_mailbox *inbox, 4247329159Shselasky struct mlx4_cmd_mailbox *outbox, 4248329159Shselasky struct mlx4_cmd_info *cmd_info) 4249329159Shselasky{ 4250329159Shselasky int err; 4251329159Shselasky u32 qpn = vhcr->in_modifier & 0xffffff; 4252329159Shselasky struct res_qp *rqp; 4253329159Shselasky u64 mac; 4254329159Shselasky unsigned port; 4255329159Shselasky u64 pri_addr_path_mask; 4256329159Shselasky struct mlx4_update_qp_context *cmd; 4257329159Shselasky int smac_index; 4258329159Shselasky 4259329159Shselasky cmd = (struct mlx4_update_qp_context *)inbox->buf; 4260329159Shselasky 4261329159Shselasky pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask); 4262329159Shselasky if (cmd->qp_mask || cmd->secondary_addr_path_mask || 4263329159Shselasky (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED)) 4264329159Shselasky return -EPERM; 4265329159Shselasky 4266329159Shselasky if ((pri_addr_path_mask & 4267329159Shselasky (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) && 4268329159Shselasky !(dev->caps.flags2 & 4269329159Shselasky MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) { 4270329159Shselasky mlx4_warn(dev, "Src check LB for slave %d isn't supported\n", 4271329159Shselasky slave); 4272329159Shselasky return -ENOTSUPP; 4273329159Shselasky } 4274329159Shselasky 4275329159Shselasky /* Just change the smac for the QP */ 4276329159Shselasky err = get_res(dev, slave, qpn, RES_QP, &rqp); 4277329159Shselasky if (err) { 4278329159Shselasky mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave); 4279329159Shselasky return err; 4280329159Shselasky } 4281329159Shselasky 4282329159Shselasky port = (rqp->sched_queue >> 6 & 1) + 1; 4283329159Shselasky 4284329159Shselasky if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) { 4285329159Shselasky smac_index = cmd->qp_context.pri_path.grh_mylmc; 4286329159Shselasky err = mac_find_smac_ix_in_slave(dev, slave, port, 4287329159Shselasky smac_index, &mac); 4288329159Shselasky 4289329159Shselasky if (err) { 4290329159Shselasky mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", 4291329159Shselasky qpn, smac_index); 4292329159Shselasky goto err_mac; 4293329159Shselasky } 4294329159Shselasky } 4295329159Shselasky 4296329159Shselasky err = mlx4_cmd(dev, inbox->dma, 4297329159Shselasky vhcr->in_modifier, 0, 4298329159Shselasky MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, 4299329159Shselasky MLX4_CMD_NATIVE); 4300329159Shselasky if (err) { 4301329159Shselasky mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn); 4302329159Shselasky goto err_mac; 4303329159Shselasky } 4304329159Shselasky 4305329159Shselaskyerr_mac: 4306329159Shselasky put_res(dev, slave, qpn, RES_QP); 4307329159Shselasky return err; 4308255932Salfred} 4309255932Salfred 4310329159Shselaskystatic u32 qp_attach_mbox_size(void *mbox) 4311329159Shselasky{ 4312329159Shselasky u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl); 4313329159Shselasky struct _rule_hw *rule_header; 4314329159Shselasky 4315329159Shselasky rule_header = (struct _rule_hw *)(mbox + size); 4316329159Shselasky 4317329159Shselasky while (rule_header->size) { 4318329159Shselasky size += rule_header->size * sizeof(u32); 4319329159Shselasky rule_header += 1; 4320329159Shselasky } 4321329159Shselasky return size; 4322329159Shselasky} 4323329159Shselasky 4324329159Shselaskystatic int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule); 4325329159Shselasky 4326255932Salfredint mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 4327255932Salfred struct mlx4_vhcr *vhcr, 4328255932Salfred struct mlx4_cmd_mailbox *inbox, 4329255932Salfred struct mlx4_cmd_mailbox *outbox, 4330255932Salfred struct mlx4_cmd_info *cmd) 4331255932Salfred{ 4332255932Salfred 4333255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 4334255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4335255932Salfred struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; 4336255932Salfred int err; 4337272027Shselasky int qpn; 4338272027Shselasky struct res_qp *rqp; 4339255932Salfred struct mlx4_net_trans_rule_hw_ctrl *ctrl; 4340255932Salfred struct _rule_hw *rule_header; 4341255932Salfred int header_id; 4342329159Shselasky struct res_fs_rule *rrule; 4343329159Shselasky u32 mbox_size; 4344255932Salfred 4345255932Salfred if (dev->caps.steering_mode != 4346255932Salfred MLX4_STEERING_MODE_DEVICE_MANAGED) 4347255932Salfred return -EOPNOTSUPP; 4348255932Salfred 4349255932Salfred ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 4350329159Shselasky err = mlx4_slave_convert_port(dev, slave, ctrl->port); 4351329159Shselasky if (err <= 0) 4352329159Shselasky return -EINVAL; 4353329159Shselasky ctrl->port = err; 4354272027Shselasky qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 4355272027Shselasky err = get_res(dev, slave, qpn, RES_QP, &rqp); 4356272027Shselasky if (err) { 4357329159Shselasky pr_err("Steering rule with qpn 0x%x rejected\n", qpn); 4358272027Shselasky return err; 4359272027Shselasky } 4360255932Salfred rule_header = (struct _rule_hw *)(ctrl + 1); 4361255932Salfred header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); 4362255932Salfred 4363329159Shselasky if (header_id == MLX4_NET_TRANS_RULE_ID_ETH) 4364329159Shselasky handle_eth_header_mcast_prio(ctrl, rule_header); 4365329159Shselasky 4366329159Shselasky if (slave == dev->caps.function) 4367329159Shselasky goto execute; 4368329159Shselasky 4369255932Salfred switch (header_id) { 4370255932Salfred case MLX4_NET_TRANS_RULE_ID_ETH: 4371272027Shselasky if (validate_eth_header_mac(slave, rule_header, rlist)) { 4372272027Shselasky err = -EINVAL; 4373329159Shselasky goto err_put_qp; 4374272027Shselasky } 4375255932Salfred break; 4376255932Salfred case MLX4_NET_TRANS_RULE_ID_IB: 4377255932Salfred break; 4378255932Salfred case MLX4_NET_TRANS_RULE_ID_IPV4: 4379255932Salfred case MLX4_NET_TRANS_RULE_ID_TCP: 4380255932Salfred case MLX4_NET_TRANS_RULE_ID_UDP: 4381329159Shselasky pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n"); 4382272027Shselasky if (add_eth_header(dev, slave, inbox, rlist, header_id)) { 4383272027Shselasky err = -EINVAL; 4384329159Shselasky goto err_put_qp; 4385272027Shselasky } 4386255932Salfred vhcr->in_modifier += 4387255932Salfred sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; 4388255932Salfred break; 4389255932Salfred default: 4390329159Shselasky pr_err("Corrupted mailbox\n"); 4391272027Shselasky err = -EINVAL; 4392329159Shselasky goto err_put_qp; 4393255932Salfred } 4394255932Salfred 4395329159Shselaskyexecute: 4396255932Salfred err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, 4397255932Salfred vhcr->in_modifier, 0, 4398255932Salfred MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 4399255932Salfred MLX4_CMD_NATIVE); 4400255932Salfred if (err) 4401329159Shselasky goto err_put_qp; 4402255932Salfred 4403329159Shselasky 4404272027Shselasky err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); 4405255932Salfred if (err) { 4406329159Shselasky mlx4_err(dev, "Fail to add flow steering resources\n"); 4407329159Shselasky goto err_detach; 4408329159Shselasky } 4409329159Shselasky 4410329159Shselasky err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule); 4411329159Shselasky if (err) 4412329159Shselasky goto err_detach; 4413329159Shselasky 4414329159Shselasky mbox_size = qp_attach_mbox_size(inbox->buf); 4415329159Shselasky rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL); 4416329159Shselasky if (!rrule->mirr_mbox) { 4417329159Shselasky err = -ENOMEM; 4418329159Shselasky goto err_put_rule; 4419329159Shselasky } 4420329159Shselasky rrule->mirr_mbox_size = mbox_size; 4421329159Shselasky rrule->mirr_rule_id = 0; 4422329159Shselasky memcpy(rrule->mirr_mbox, inbox->buf, mbox_size); 4423329159Shselasky 4424329159Shselasky /* set different port */ 4425329159Shselasky ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox; 4426329159Shselasky if (ctrl->port == 1) 4427329159Shselasky ctrl->port = 2; 4428329159Shselasky else 4429329159Shselasky ctrl->port = 1; 4430329159Shselasky 4431329159Shselasky if (mlx4_is_bonded(dev)) 4432329159Shselasky mlx4_do_mirror_rule(dev, rrule); 4433329159Shselasky 4434329159Shselasky atomic_inc(&rqp->ref_count); 4435329159Shselasky 4436329159Shselaskyerr_put_rule: 4437329159Shselasky put_res(dev, slave, vhcr->out_param, RES_FS_RULE); 4438329159Shselaskyerr_detach: 4439329159Shselasky /* detach rule on error */ 4440329159Shselasky if (err) 4441255932Salfred mlx4_cmd(dev, vhcr->out_param, 0, 0, 4442272027Shselasky MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 4443255932Salfred MLX4_CMD_NATIVE); 4444329159Shselaskyerr_put_qp: 4445272027Shselasky put_res(dev, slave, qpn, RES_QP); 4446255932Salfred return err; 4447255932Salfred} 4448255932Salfred 4449329159Shselaskystatic int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) 4450329159Shselasky{ 4451329159Shselasky int err; 4452329159Shselasky 4453329159Shselasky err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0); 4454329159Shselasky if (err) { 4455329159Shselasky mlx4_err(dev, "Fail to remove flow steering resources\n"); 4456329159Shselasky return err; 4457329159Shselasky } 4458329159Shselasky 4459329159Shselasky mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, 4460329159Shselasky MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 4461329159Shselasky return 0; 4462329159Shselasky} 4463329159Shselasky 4464255932Salfredint mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, 4465255932Salfred struct mlx4_vhcr *vhcr, 4466255932Salfred struct mlx4_cmd_mailbox *inbox, 4467255932Salfred struct mlx4_cmd_mailbox *outbox, 4468255932Salfred struct mlx4_cmd_info *cmd) 4469255932Salfred{ 4470255932Salfred int err; 4471272027Shselasky struct res_qp *rqp; 4472272027Shselasky struct res_fs_rule *rrule; 4473329159Shselasky u64 mirr_reg_id; 4474255932Salfred 4475255932Salfred if (dev->caps.steering_mode != 4476255932Salfred MLX4_STEERING_MODE_DEVICE_MANAGED) 4477255932Salfred return -EOPNOTSUPP; 4478255932Salfred 4479272027Shselasky err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); 4480272027Shselasky if (err) 4481255932Salfred return err; 4482329159Shselasky 4483329159Shselasky if (!rrule->mirr_mbox) { 4484329159Shselasky mlx4_err(dev, "Mirror rules cannot be removed explicitly\n"); 4485329159Shselasky put_res(dev, slave, vhcr->in_param, RES_FS_RULE); 4486329159Shselasky return -EINVAL; 4487329159Shselasky } 4488329159Shselasky mirr_reg_id = rrule->mirr_rule_id; 4489329159Shselasky kfree(rrule->mirr_mbox); 4490329159Shselasky 4491272027Shselasky /* Release the rule form busy state before removal */ 4492272027Shselasky put_res(dev, slave, vhcr->in_param, RES_FS_RULE); 4493272027Shselasky err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); 4494272027Shselasky if (err) 4495272027Shselasky return err; 4496255932Salfred 4497329159Shselasky if (mirr_reg_id && mlx4_is_bonded(dev)) { 4498329159Shselasky err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule); 4499329159Shselasky if (err) { 4500329159Shselasky mlx4_err(dev, "Fail to get resource of mirror rule\n"); 4501329159Shselasky } else { 4502329159Shselasky put_res(dev, slave, mirr_reg_id, RES_FS_RULE); 4503329159Shselasky mlx4_undo_mirror_rule(dev, rrule); 4504329159Shselasky } 4505329159Shselasky } 4506329159Shselasky err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); 4507329159Shselasky if (err) { 4508329159Shselasky mlx4_err(dev, "Fail to remove flow steering resources\n"); 4509329159Shselasky goto out; 4510329159Shselasky } 4511329159Shselasky 4512255932Salfred err = mlx4_cmd(dev, vhcr->in_param, 0, 0, 4513255932Salfred MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 4514255932Salfred MLX4_CMD_NATIVE); 4515329159Shselasky if (!err) 4516272027Shselasky atomic_dec(&rqp->ref_count); 4517272027Shselaskyout: 4518272027Shselasky put_res(dev, slave, rrule->qpn, RES_QP); 4519255932Salfred return err; 4520255932Salfred} 4521255932Salfred 4522255932Salfredenum { 4523255932Salfred BUSY_MAX_RETRIES = 10 4524255932Salfred}; 4525255932Salfred 4526255932Salfredint mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, 4527255932Salfred struct mlx4_vhcr *vhcr, 4528255932Salfred struct mlx4_cmd_mailbox *inbox, 4529255932Salfred struct mlx4_cmd_mailbox *outbox, 4530255932Salfred struct mlx4_cmd_info *cmd) 4531255932Salfred{ 4532255932Salfred int err; 4533329159Shselasky int index = vhcr->in_modifier & 0xffff; 4534255932Salfred 4535329159Shselasky err = get_res(dev, slave, index, RES_COUNTER, NULL); 4536329159Shselasky if (err) 4537329159Shselasky return err; 4538329159Shselasky 4539272027Shselasky err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 4540329159Shselasky put_res(dev, slave, index, RES_COUNTER); 4541255932Salfred return err; 4542255932Salfred} 4543255932Salfred 4544255932Salfredstatic void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) 4545255932Salfred{ 4546255932Salfred struct res_gid *rgid; 4547255932Salfred struct res_gid *tmp; 4548255932Salfred struct mlx4_qp qp; /* dummy for calling attach/detach */ 4549255932Salfred 4550255932Salfred list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) { 4551272027Shselasky switch (dev->caps.steering_mode) { 4552272027Shselasky case MLX4_STEERING_MODE_DEVICE_MANAGED: 4553272027Shselasky mlx4_flow_detach(dev, rgid->reg_id); 4554272027Shselasky break; 4555272027Shselasky case MLX4_STEERING_MODE_B0: 4556272027Shselasky qp.qpn = rqp->local_qpn; 4557272027Shselasky (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, 4558272027Shselasky rgid->prot, rgid->steer); 4559272027Shselasky break; 4560272027Shselasky } 4561255932Salfred list_del(&rgid->list); 4562255932Salfred kfree(rgid); 4563255932Salfred } 4564255932Salfred} 4565255932Salfred 4566255932Salfredstatic int _move_all_busy(struct mlx4_dev *dev, int slave, 4567255932Salfred enum mlx4_resource type, int print) 4568255932Salfred{ 4569255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 4570255932Salfred struct mlx4_resource_tracker *tracker = 4571255932Salfred &priv->mfunc.master.res_tracker; 4572255932Salfred struct list_head *rlist = &tracker->slave_list[slave].res_list[type]; 4573255932Salfred struct res_common *r; 4574255932Salfred struct res_common *tmp; 4575255932Salfred int busy; 4576255932Salfred 4577255932Salfred busy = 0; 4578255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4579255932Salfred list_for_each_entry_safe(r, tmp, rlist, list) { 4580255932Salfred if (r->owner == slave) { 4581255932Salfred if (!r->removing) { 4582255932Salfred if (r->state == RES_ANY_BUSY) { 4583255932Salfred if (print) 4584255932Salfred mlx4_dbg(dev, 4585255932Salfred "%s id 0x%llx is busy\n", 4586329159Shselasky resource_str(type), 4587329159Shselasky (long long)r->res_id); 4588255932Salfred ++busy; 4589255932Salfred } else { 4590255932Salfred r->from_state = r->state; 4591255932Salfred r->state = RES_ANY_BUSY; 4592255932Salfred r->removing = 1; 4593255932Salfred } 4594255932Salfred } 4595255932Salfred } 4596255932Salfred } 4597255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4598255932Salfred 4599255932Salfred return busy; 4600255932Salfred} 4601255932Salfred 4602255932Salfredstatic int move_all_busy(struct mlx4_dev *dev, int slave, 4603255932Salfred enum mlx4_resource type) 4604255932Salfred{ 4605255932Salfred unsigned long begin; 4606255932Salfred int busy; 4607255932Salfred 4608255932Salfred begin = jiffies; 4609255932Salfred do { 4610255932Salfred busy = _move_all_busy(dev, slave, type, 0); 4611255932Salfred if (time_after(jiffies, begin + 5 * HZ)) 4612255932Salfred break; 4613255932Salfred if (busy) 4614255932Salfred cond_resched(); 4615255932Salfred } while (busy); 4616255932Salfred 4617255932Salfred if (busy) 4618255932Salfred busy = _move_all_busy(dev, slave, type, 1); 4619255932Salfred 4620255932Salfred return busy; 4621255932Salfred} 4622255932Salfredstatic void rem_slave_qps(struct mlx4_dev *dev, int slave) 4623255932Salfred{ 4624255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 4625255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4626255932Salfred struct list_head *qp_list = 4627255932Salfred &tracker->slave_list[slave].res_list[RES_QP]; 4628255932Salfred struct res_qp *qp; 4629255932Salfred struct res_qp *tmp; 4630255932Salfred int state; 4631255932Salfred u64 in_param; 4632255932Salfred int qpn; 4633255932Salfred int err; 4634255932Salfred 4635255932Salfred err = move_all_busy(dev, slave, RES_QP); 4636255932Salfred if (err) 4637329159Shselasky mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n", 4638329159Shselasky slave); 4639255932Salfred 4640255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4641255932Salfred list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 4642255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4643255932Salfred if (qp->com.owner == slave) { 4644255932Salfred qpn = qp->com.res_id; 4645255932Salfred detach_qp(dev, slave, qp); 4646255932Salfred state = qp->com.from_state; 4647255932Salfred while (state != 0) { 4648255932Salfred switch (state) { 4649255932Salfred case RES_QP_RESERVED: 4650255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4651255932Salfred rb_erase(&qp->com.node, 4652255932Salfred &tracker->res_tree[RES_QP]); 4653255932Salfred list_del(&qp->com.list); 4654255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4655272027Shselasky if (!valid_reserved(dev, slave, qpn)) { 4656272027Shselasky __mlx4_qp_release_range(dev, qpn, 1); 4657272027Shselasky mlx4_release_resource(dev, slave, 4658272027Shselasky RES_QP, 1, 0); 4659272027Shselasky } 4660255932Salfred kfree(qp); 4661255932Salfred state = 0; 4662255932Salfred break; 4663255932Salfred case RES_QP_MAPPED: 4664255932Salfred if (!valid_reserved(dev, slave, qpn)) 4665255932Salfred __mlx4_qp_free_icm(dev, qpn); 4666255932Salfred state = RES_QP_RESERVED; 4667255932Salfred break; 4668255932Salfred case RES_QP_HW: 4669255932Salfred in_param = slave; 4670255932Salfred err = mlx4_cmd(dev, in_param, 4671255932Salfred qp->local_qpn, 2, 4672255932Salfred MLX4_CMD_2RST_QP, 4673255932Salfred MLX4_CMD_TIME_CLASS_A, 4674255932Salfred MLX4_CMD_NATIVE); 4675255932Salfred if (err) 4676329159Shselasky mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n", 4677329159Shselasky slave, qp->local_qpn); 4678255932Salfred atomic_dec(&qp->rcq->ref_count); 4679255932Salfred atomic_dec(&qp->scq->ref_count); 4680255932Salfred atomic_dec(&qp->mtt->ref_count); 4681255932Salfred if (qp->srq) 4682255932Salfred atomic_dec(&qp->srq->ref_count); 4683255932Salfred state = RES_QP_MAPPED; 4684255932Salfred break; 4685255932Salfred default: 4686255932Salfred state = 0; 4687255932Salfred } 4688255932Salfred } 4689255932Salfred } 4690255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4691255932Salfred } 4692255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4693255932Salfred} 4694255932Salfred 4695255932Salfredstatic void rem_slave_srqs(struct mlx4_dev *dev, int slave) 4696255932Salfred{ 4697255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 4698255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4699255932Salfred struct list_head *srq_list = 4700255932Salfred &tracker->slave_list[slave].res_list[RES_SRQ]; 4701255932Salfred struct res_srq *srq; 4702255932Salfred struct res_srq *tmp; 4703255932Salfred int state; 4704255932Salfred u64 in_param; 4705255932Salfred LIST_HEAD(tlist); 4706255932Salfred int srqn; 4707255932Salfred int err; 4708255932Salfred 4709255932Salfred err = move_all_busy(dev, slave, RES_SRQ); 4710255932Salfred if (err) 4711329159Shselasky mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n", 4712329159Shselasky slave); 4713255932Salfred 4714255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4715255932Salfred list_for_each_entry_safe(srq, tmp, srq_list, com.list) { 4716255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4717255932Salfred if (srq->com.owner == slave) { 4718255932Salfred srqn = srq->com.res_id; 4719255932Salfred state = srq->com.from_state; 4720255932Salfred while (state != 0) { 4721255932Salfred switch (state) { 4722255932Salfred case RES_SRQ_ALLOCATED: 4723255932Salfred __mlx4_srq_free_icm(dev, srqn); 4724255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4725255932Salfred rb_erase(&srq->com.node, 4726255932Salfred &tracker->res_tree[RES_SRQ]); 4727255932Salfred list_del(&srq->com.list); 4728255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4729272027Shselasky mlx4_release_resource(dev, slave, 4730272027Shselasky RES_SRQ, 1, 0); 4731255932Salfred kfree(srq); 4732255932Salfred state = 0; 4733255932Salfred break; 4734255932Salfred 4735255932Salfred case RES_SRQ_HW: 4736255932Salfred in_param = slave; 4737255932Salfred err = mlx4_cmd(dev, in_param, srqn, 1, 4738255932Salfred MLX4_CMD_HW2SW_SRQ, 4739255932Salfred MLX4_CMD_TIME_CLASS_A, 4740255932Salfred MLX4_CMD_NATIVE); 4741255932Salfred if (err) 4742329159Shselasky mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n", 4743255932Salfred slave, srqn); 4744255932Salfred 4745255932Salfred atomic_dec(&srq->mtt->ref_count); 4746255932Salfred if (srq->cq) 4747255932Salfred atomic_dec(&srq->cq->ref_count); 4748255932Salfred state = RES_SRQ_ALLOCATED; 4749255932Salfred break; 4750255932Salfred 4751255932Salfred default: 4752255932Salfred state = 0; 4753255932Salfred } 4754255932Salfred } 4755255932Salfred } 4756255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4757255932Salfred } 4758255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4759255932Salfred} 4760255932Salfred 4761255932Salfredstatic void rem_slave_cqs(struct mlx4_dev *dev, int slave) 4762255932Salfred{ 4763255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 4764255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4765255932Salfred struct list_head *cq_list = 4766255932Salfred &tracker->slave_list[slave].res_list[RES_CQ]; 4767255932Salfred struct res_cq *cq; 4768255932Salfred struct res_cq *tmp; 4769255932Salfred int state; 4770255932Salfred u64 in_param; 4771255932Salfred LIST_HEAD(tlist); 4772255932Salfred int cqn; 4773255932Salfred int err; 4774255932Salfred 4775255932Salfred err = move_all_busy(dev, slave, RES_CQ); 4776255932Salfred if (err) 4777329159Shselasky mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n", 4778329159Shselasky slave); 4779255932Salfred 4780255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4781255932Salfred list_for_each_entry_safe(cq, tmp, cq_list, com.list) { 4782255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4783255932Salfred if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) { 4784255932Salfred cqn = cq->com.res_id; 4785255932Salfred state = cq->com.from_state; 4786255932Salfred while (state != 0) { 4787255932Salfred switch (state) { 4788255932Salfred case RES_CQ_ALLOCATED: 4789255932Salfred __mlx4_cq_free_icm(dev, cqn); 4790255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4791255932Salfred rb_erase(&cq->com.node, 4792255932Salfred &tracker->res_tree[RES_CQ]); 4793255932Salfred list_del(&cq->com.list); 4794255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4795272027Shselasky mlx4_release_resource(dev, slave, 4796272027Shselasky RES_CQ, 1, 0); 4797255932Salfred kfree(cq); 4798255932Salfred state = 0; 4799255932Salfred break; 4800255932Salfred 4801255932Salfred case RES_CQ_HW: 4802255932Salfred in_param = slave; 4803255932Salfred err = mlx4_cmd(dev, in_param, cqn, 1, 4804255932Salfred MLX4_CMD_HW2SW_CQ, 4805255932Salfred MLX4_CMD_TIME_CLASS_A, 4806255932Salfred MLX4_CMD_NATIVE); 4807255932Salfred if (err) 4808329159Shselasky mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n", 4809255932Salfred slave, cqn); 4810255932Salfred atomic_dec(&cq->mtt->ref_count); 4811255932Salfred state = RES_CQ_ALLOCATED; 4812255932Salfred break; 4813255932Salfred 4814255932Salfred default: 4815255932Salfred state = 0; 4816255932Salfred } 4817255932Salfred } 4818255932Salfred } 4819255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4820255932Salfred } 4821255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4822255932Salfred} 4823255932Salfred 4824255932Salfredstatic void rem_slave_mrs(struct mlx4_dev *dev, int slave) 4825255932Salfred{ 4826255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 4827255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 4828255932Salfred struct list_head *mpt_list = 4829255932Salfred &tracker->slave_list[slave].res_list[RES_MPT]; 4830255932Salfred struct res_mpt *mpt; 4831255932Salfred struct res_mpt *tmp; 4832255932Salfred int state; 4833255932Salfred u64 in_param; 4834255932Salfred LIST_HEAD(tlist); 4835255932Salfred int mptn; 4836255932Salfred int err; 4837255932Salfred 4838255932Salfred err = move_all_busy(dev, slave, RES_MPT); 4839255932Salfred if (err) 4840329159Shselasky mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n", 4841329159Shselasky slave); 4842255932Salfred 4843255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4844255932Salfred list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { 4845255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4846255932Salfred if (mpt->com.owner == slave) { 4847255932Salfred mptn = mpt->com.res_id; 4848255932Salfred state = mpt->com.from_state; 4849255932Salfred while (state != 0) { 4850255932Salfred switch (state) { 4851255932Salfred case RES_MPT_RESERVED: 4852272027Shselasky __mlx4_mpt_release(dev, mpt->key); 4853255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4854255932Salfred rb_erase(&mpt->com.node, 4855255932Salfred &tracker->res_tree[RES_MPT]); 4856255932Salfred list_del(&mpt->com.list); 4857255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4858272027Shselasky mlx4_release_resource(dev, slave, 4859272027Shselasky RES_MPT, 1, 0); 4860255932Salfred kfree(mpt); 4861255932Salfred state = 0; 4862255932Salfred break; 4863255932Salfred 4864255932Salfred case RES_MPT_MAPPED: 4865272027Shselasky __mlx4_mpt_free_icm(dev, mpt->key); 4866255932Salfred state = RES_MPT_RESERVED; 4867255932Salfred break; 4868255932Salfred 4869255932Salfred case RES_MPT_HW: 4870255932Salfred in_param = slave; 4871255932Salfred err = mlx4_cmd(dev, in_param, mptn, 0, 4872255932Salfred MLX4_CMD_HW2SW_MPT, 4873255932Salfred MLX4_CMD_TIME_CLASS_A, 4874255932Salfred MLX4_CMD_NATIVE); 4875255932Salfred if (err) 4876329159Shselasky mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n", 4877255932Salfred slave, mptn); 4878255932Salfred if (mpt->mtt) 4879255932Salfred atomic_dec(&mpt->mtt->ref_count); 4880255932Salfred state = RES_MPT_MAPPED; 4881255932Salfred break; 4882255932Salfred default: 4883255932Salfred state = 0; 4884255932Salfred } 4885255932Salfred } 4886255932Salfred } 4887255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4888255932Salfred } 4889255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4890255932Salfred} 4891255932Salfred 4892255932Salfredstatic void rem_slave_mtts(struct mlx4_dev *dev, int slave) 4893255932Salfred{ 4894255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 4895255932Salfred struct mlx4_resource_tracker *tracker = 4896255932Salfred &priv->mfunc.master.res_tracker; 4897255932Salfred struct list_head *mtt_list = 4898255932Salfred &tracker->slave_list[slave].res_list[RES_MTT]; 4899255932Salfred struct res_mtt *mtt; 4900255932Salfred struct res_mtt *tmp; 4901255932Salfred int state; 4902255932Salfred LIST_HEAD(tlist); 4903255932Salfred int base; 4904255932Salfred int err; 4905255932Salfred 4906255932Salfred err = move_all_busy(dev, slave, RES_MTT); 4907255932Salfred if (err) 4908329159Shselasky mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n", 4909329159Shselasky slave); 4910255932Salfred 4911255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4912255932Salfred list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { 4913255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4914255932Salfred if (mtt->com.owner == slave) { 4915255932Salfred base = mtt->com.res_id; 4916255932Salfred state = mtt->com.from_state; 4917255932Salfred while (state != 0) { 4918255932Salfred switch (state) { 4919255932Salfred case RES_MTT_ALLOCATED: 4920255932Salfred __mlx4_free_mtt_range(dev, base, 4921255932Salfred mtt->order); 4922255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4923255932Salfred rb_erase(&mtt->com.node, 4924255932Salfred &tracker->res_tree[RES_MTT]); 4925255932Salfred list_del(&mtt->com.list); 4926255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4927272027Shselasky mlx4_release_resource(dev, slave, RES_MTT, 4928272027Shselasky 1 << mtt->order, 0); 4929255932Salfred kfree(mtt); 4930255932Salfred state = 0; 4931255932Salfred break; 4932255932Salfred 4933255932Salfred default: 4934255932Salfred state = 0; 4935255932Salfred } 4936255932Salfred } 4937255932Salfred } 4938255932Salfred spin_lock_irq(mlx4_tlock(dev)); 4939255932Salfred } 4940255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 4941255932Salfred} 4942255932Salfred 4943329159Shselaskystatic int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) 4944329159Shselasky{ 4945329159Shselasky struct mlx4_cmd_mailbox *mailbox; 4946329159Shselasky int err; 4947329159Shselasky struct res_fs_rule *mirr_rule; 4948329159Shselasky u64 reg_id; 4949329159Shselasky 4950329159Shselasky mailbox = mlx4_alloc_cmd_mailbox(dev); 4951329159Shselasky if (IS_ERR(mailbox)) 4952329159Shselasky return PTR_ERR(mailbox); 4953329159Shselasky 4954329159Shselasky if (!fs_rule->mirr_mbox) { 4955329159Shselasky mlx4_err(dev, "rule mirroring mailbox is null\n"); 4956329159Shselasky return -EINVAL; 4957329159Shselasky } 4958329159Shselasky memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size); 4959329159Shselasky err = mlx4_cmd_imm(dev, mailbox->dma, ®_id, fs_rule->mirr_mbox_size >> 2, 0, 4960329159Shselasky MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 4961329159Shselasky MLX4_CMD_NATIVE); 4962329159Shselasky mlx4_free_cmd_mailbox(dev, mailbox); 4963329159Shselasky 4964329159Shselasky if (err) 4965329159Shselasky goto err; 4966329159Shselasky 4967329159Shselasky err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn); 4968329159Shselasky if (err) 4969329159Shselasky goto err_detach; 4970329159Shselasky 4971329159Shselasky err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule); 4972329159Shselasky if (err) 4973329159Shselasky goto err_rem; 4974329159Shselasky 4975329159Shselasky fs_rule->mirr_rule_id = reg_id; 4976329159Shselasky mirr_rule->mirr_rule_id = 0; 4977329159Shselasky mirr_rule->mirr_mbox_size = 0; 4978329159Shselasky mirr_rule->mirr_mbox = NULL; 4979329159Shselasky put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE); 4980329159Shselasky 4981329159Shselasky return 0; 4982329159Shselaskyerr_rem: 4983329159Shselasky rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0); 4984329159Shselaskyerr_detach: 4985329159Shselasky mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, 4986329159Shselasky MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 4987329159Shselaskyerr: 4988329159Shselasky return err; 4989329159Shselasky} 4990329159Shselasky 4991329159Shselaskystatic int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond) 4992329159Shselasky{ 4993329159Shselasky struct mlx4_priv *priv = mlx4_priv(dev); 4994329159Shselasky struct mlx4_resource_tracker *tracker = 4995329159Shselasky &priv->mfunc.master.res_tracker; 4996329159Shselasky struct rb_root *root = &tracker->res_tree[RES_FS_RULE]; 4997329159Shselasky struct rb_node *p; 4998329159Shselasky struct res_fs_rule *fs_rule; 4999329159Shselasky int err = 0; 5000329159Shselasky LIST_HEAD(mirr_list); 5001329159Shselasky 5002329159Shselasky for (p = rb_first(root); p; p = rb_next(p)) { 5003329159Shselasky fs_rule = rb_entry(p, struct res_fs_rule, com.node); 5004329159Shselasky if ((bond && fs_rule->mirr_mbox_size) || 5005329159Shselasky (!bond && !fs_rule->mirr_mbox_size)) 5006329159Shselasky list_add_tail(&fs_rule->mirr_list, &mirr_list); 5007329159Shselasky } 5008329159Shselasky 5009329159Shselasky list_for_each_entry(fs_rule, &mirr_list, mirr_list) { 5010329159Shselasky if (bond) 5011329159Shselasky err += mlx4_do_mirror_rule(dev, fs_rule); 5012329159Shselasky else 5013329159Shselasky err += mlx4_undo_mirror_rule(dev, fs_rule); 5014329159Shselasky } 5015329159Shselasky return err; 5016329159Shselasky} 5017329159Shselasky 5018329159Shselaskyint mlx4_bond_fs_rules(struct mlx4_dev *dev) 5019329159Shselasky{ 5020329159Shselasky return mlx4_mirror_fs_rules(dev, true); 5021329159Shselasky} 5022329159Shselasky 5023329159Shselaskyint mlx4_unbond_fs_rules(struct mlx4_dev *dev) 5024329159Shselasky{ 5025329159Shselasky return mlx4_mirror_fs_rules(dev, false); 5026329159Shselasky} 5027329159Shselasky 5028255932Salfredstatic void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) 5029255932Salfred{ 5030255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 5031255932Salfred struct mlx4_resource_tracker *tracker = 5032255932Salfred &priv->mfunc.master.res_tracker; 5033255932Salfred struct list_head *fs_rule_list = 5034255932Salfred &tracker->slave_list[slave].res_list[RES_FS_RULE]; 5035255932Salfred struct res_fs_rule *fs_rule; 5036255932Salfred struct res_fs_rule *tmp; 5037255932Salfred int state; 5038255932Salfred u64 base; 5039255932Salfred int err; 5040255932Salfred 5041255932Salfred err = move_all_busy(dev, slave, RES_FS_RULE); 5042255932Salfred if (err) 5043255932Salfred mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n", 5044255932Salfred slave); 5045255932Salfred 5046255932Salfred spin_lock_irq(mlx4_tlock(dev)); 5047255932Salfred list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) { 5048255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 5049255932Salfred if (fs_rule->com.owner == slave) { 5050255932Salfred base = fs_rule->com.res_id; 5051255932Salfred state = fs_rule->com.from_state; 5052255932Salfred while (state != 0) { 5053255932Salfred switch (state) { 5054255932Salfred case RES_FS_RULE_ALLOCATED: 5055255932Salfred /* detach rule */ 5056255932Salfred err = mlx4_cmd(dev, base, 0, 0, 5057255932Salfred MLX4_QP_FLOW_STEERING_DETACH, 5058255932Salfred MLX4_CMD_TIME_CLASS_A, 5059255932Salfred MLX4_CMD_NATIVE); 5060255932Salfred 5061255932Salfred spin_lock_irq(mlx4_tlock(dev)); 5062255932Salfred rb_erase(&fs_rule->com.node, 5063255932Salfred &tracker->res_tree[RES_FS_RULE]); 5064255932Salfred list_del(&fs_rule->com.list); 5065255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 5066255932Salfred kfree(fs_rule); 5067255932Salfred state = 0; 5068255932Salfred break; 5069255932Salfred 5070255932Salfred default: 5071255932Salfred state = 0; 5072255932Salfred } 5073255932Salfred } 5074255932Salfred } 5075255932Salfred spin_lock_irq(mlx4_tlock(dev)); 5076255932Salfred } 5077255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 5078255932Salfred} 5079255932Salfred 5080255932Salfredstatic void rem_slave_eqs(struct mlx4_dev *dev, int slave) 5081255932Salfred{ 5082255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 5083255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 5084255932Salfred struct list_head *eq_list = 5085255932Salfred &tracker->slave_list[slave].res_list[RES_EQ]; 5086255932Salfred struct res_eq *eq; 5087255932Salfred struct res_eq *tmp; 5088255932Salfred int err; 5089255932Salfred int state; 5090255932Salfred LIST_HEAD(tlist); 5091255932Salfred int eqn; 5092255932Salfred 5093255932Salfred err = move_all_busy(dev, slave, RES_EQ); 5094255932Salfred if (err) 5095329159Shselasky mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n", 5096329159Shselasky slave); 5097255932Salfred 5098255932Salfred spin_lock_irq(mlx4_tlock(dev)); 5099255932Salfred list_for_each_entry_safe(eq, tmp, eq_list, com.list) { 5100255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 5101255932Salfred if (eq->com.owner == slave) { 5102255932Salfred eqn = eq->com.res_id; 5103255932Salfred state = eq->com.from_state; 5104255932Salfred while (state != 0) { 5105255932Salfred switch (state) { 5106255932Salfred case RES_EQ_RESERVED: 5107255932Salfred spin_lock_irq(mlx4_tlock(dev)); 5108255932Salfred rb_erase(&eq->com.node, 5109255932Salfred &tracker->res_tree[RES_EQ]); 5110255932Salfred list_del(&eq->com.list); 5111255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 5112255932Salfred kfree(eq); 5113255932Salfred state = 0; 5114255932Salfred break; 5115255932Salfred 5116255932Salfred case RES_EQ_HW: 5117329159Shselasky err = mlx4_cmd(dev, slave, eqn & 0x3ff, 5118329159Shselasky 1, MLX4_CMD_HW2SW_EQ, 5119329159Shselasky MLX4_CMD_TIME_CLASS_A, 5120329159Shselasky MLX4_CMD_NATIVE); 5121255932Salfred if (err) 5122329159Shselasky mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", 5123329159Shselasky slave, eqn & 0x3ff); 5124255932Salfred atomic_dec(&eq->mtt->ref_count); 5125255932Salfred state = RES_EQ_RESERVED; 5126255932Salfred break; 5127255932Salfred 5128255932Salfred default: 5129255932Salfred state = 0; 5130255932Salfred } 5131255932Salfred } 5132255932Salfred } 5133255932Salfred spin_lock_irq(mlx4_tlock(dev)); 5134255932Salfred } 5135255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 5136255932Salfred} 5137255932Salfred 5138255932Salfredstatic void rem_slave_counters(struct mlx4_dev *dev, int slave) 5139255932Salfred{ 5140329159Shselasky struct mlx4_priv *priv = mlx4_priv(dev); 5141329159Shselasky struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 5142329159Shselasky struct list_head *counter_list = 5143329159Shselasky &tracker->slave_list[slave].res_list[RES_COUNTER]; 5144329159Shselasky struct res_counter *counter; 5145329159Shselasky struct res_counter *tmp; 5146329159Shselasky int err; 5147329159Shselasky int *counters_arr = NULL; 5148329159Shselasky int i, j; 5149329159Shselasky 5150329159Shselasky err = move_all_busy(dev, slave, RES_COUNTER); 5151329159Shselasky if (err) 5152329159Shselasky mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", 5153329159Shselasky slave); 5154329159Shselasky 5155329159Shselasky counters_arr = kmalloc_array(dev->caps.max_counters, 5156329159Shselasky sizeof(*counters_arr), GFP_KERNEL); 5157329159Shselasky if (!counters_arr) 5158329159Shselasky return; 5159329159Shselasky 5160329159Shselasky do { 5161329159Shselasky i = 0; 5162329159Shselasky j = 0; 5163329159Shselasky spin_lock_irq(mlx4_tlock(dev)); 5164329159Shselasky list_for_each_entry_safe(counter, tmp, counter_list, com.list) { 5165329159Shselasky if (counter->com.owner == slave) { 5166329159Shselasky counters_arr[i++] = counter->com.res_id; 5167329159Shselasky rb_erase(&counter->com.node, 5168329159Shselasky &tracker->res_tree[RES_COUNTER]); 5169329159Shselasky list_del(&counter->com.list); 5170329159Shselasky kfree(counter); 5171329159Shselasky } 5172329159Shselasky } 5173329159Shselasky spin_unlock_irq(mlx4_tlock(dev)); 5174329159Shselasky 5175329159Shselasky while (j < i) { 5176329159Shselasky __mlx4_counter_free(dev, counters_arr[j++]); 5177329159Shselasky mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 5178329159Shselasky } 5179329159Shselasky } while (i); 5180329159Shselasky 5181329159Shselasky kfree(counters_arr); 5182255932Salfred} 5183255932Salfred 5184255932Salfredstatic void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) 5185255932Salfred{ 5186255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 5187255932Salfred struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 5188255932Salfred struct list_head *xrcdn_list = 5189255932Salfred &tracker->slave_list[slave].res_list[RES_XRCD]; 5190255932Salfred struct res_xrcdn *xrcd; 5191255932Salfred struct res_xrcdn *tmp; 5192255932Salfred int err; 5193255932Salfred int xrcdn; 5194255932Salfred 5195255932Salfred err = move_all_busy(dev, slave, RES_XRCD); 5196255932Salfred if (err) 5197329159Shselasky mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n", 5198329159Shselasky slave); 5199255932Salfred 5200255932Salfred spin_lock_irq(mlx4_tlock(dev)); 5201255932Salfred list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { 5202255932Salfred if (xrcd->com.owner == slave) { 5203255932Salfred xrcdn = xrcd->com.res_id; 5204255932Salfred rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]); 5205255932Salfred list_del(&xrcd->com.list); 5206255932Salfred kfree(xrcd); 5207255932Salfred __mlx4_xrcd_free(dev, xrcdn); 5208255932Salfred } 5209255932Salfred } 5210255932Salfred spin_unlock_irq(mlx4_tlock(dev)); 5211255932Salfred} 5212255932Salfred 5213255932Salfredvoid mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) 5214255932Salfred{ 5215255932Salfred struct mlx4_priv *priv = mlx4_priv(dev); 5216329159Shselasky mlx4_reset_roce_gids(dev, slave); 5217255932Salfred mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 5218329159Shselasky rem_slave_vlans(dev, slave); 5219255932Salfred rem_slave_macs(dev, slave); 5220272027Shselasky rem_slave_fs_rule(dev, slave); 5221255932Salfred rem_slave_qps(dev, slave); 5222255932Salfred rem_slave_srqs(dev, slave); 5223255932Salfred rem_slave_cqs(dev, slave); 5224255932Salfred rem_slave_mrs(dev, slave); 5225255932Salfred rem_slave_eqs(dev, slave); 5226255932Salfred rem_slave_mtts(dev, slave); 5227255932Salfred rem_slave_counters(dev, slave); 5228255932Salfred rem_slave_xrcdns(dev, slave); 5229255932Salfred mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 5230255932Salfred} 5231272027Shselasky 5232272027Shselaskyvoid mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) 5233272027Shselasky{ 5234272027Shselasky struct mlx4_vf_immed_vlan_work *work = 5235272027Shselasky container_of(_work, struct mlx4_vf_immed_vlan_work, work); 5236272027Shselasky struct mlx4_cmd_mailbox *mailbox; 5237272027Shselasky struct mlx4_update_qp_context *upd_context; 5238272027Shselasky struct mlx4_dev *dev = &work->priv->dev; 5239272027Shselasky struct mlx4_resource_tracker *tracker = 5240272027Shselasky &work->priv->mfunc.master.res_tracker; 5241272027Shselasky struct list_head *qp_list = 5242272027Shselasky &tracker->slave_list[work->slave].res_list[RES_QP]; 5243272027Shselasky struct res_qp *qp; 5244272027Shselasky struct res_qp *tmp; 5245272027Shselasky u64 qp_path_mask_vlan_ctrl = 5246272027Shselasky ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) | 5247272027Shselasky (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) | 5248272027Shselasky (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) | 5249272027Shselasky (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) | 5250272027Shselasky (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) | 5251272027Shselasky (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED)); 5252272027Shselasky 5253272027Shselasky u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) | 5254272027Shselasky (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) | 5255272027Shselasky (1ULL << MLX4_UPD_QP_PATH_MASK_CV) | 5256329159Shselasky (1ULL << MLX4_UPD_QP_PATH_MASK_SV) | 5257272027Shselasky (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) | 5258272027Shselasky (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) | 5259272027Shselasky (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) | 5260272027Shselasky (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE)); 5261272027Shselasky 5262272027Shselasky int err; 5263272027Shselasky int port, errors = 0; 5264272027Shselasky u8 vlan_control; 5265272027Shselasky 5266272027Shselasky if (mlx4_is_slave(dev)) { 5267272027Shselasky mlx4_warn(dev, "Trying to update-qp in slave %d\n", 5268272027Shselasky work->slave); 5269272027Shselasky goto out; 5270272027Shselasky } 5271272027Shselasky 5272272027Shselasky mailbox = mlx4_alloc_cmd_mailbox(dev); 5273272027Shselasky if (IS_ERR(mailbox)) 5274272027Shselasky goto out; 5275329159Shselasky if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */ 5276272027Shselasky vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 5277329159Shselasky MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | 5278329159Shselasky MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | 5279329159Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 5280329159Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | 5281272027Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 5282329159Shselasky else if (!work->vlan_id) 5283272027Shselasky vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 5284329159Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; 5285329159Shselasky else if (work->vlan_proto == htons(ETH_P_8021AD)) 5286329159Shselasky vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | 5287329159Shselasky MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 5288272027Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 5289272027Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; 5290329159Shselasky else /* vst 802.1Q */ 5291329159Shselasky vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | 5292329159Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | 5293329159Shselasky MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; 5294272027Shselasky 5295272027Shselasky upd_context = mailbox->buf; 5296329159Shselasky upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD); 5297272027Shselasky 5298272027Shselasky spin_lock_irq(mlx4_tlock(dev)); 5299272027Shselasky list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 5300272027Shselasky spin_unlock_irq(mlx4_tlock(dev)); 5301272027Shselasky if (qp->com.owner == work->slave) { 5302272027Shselasky if (qp->com.from_state != RES_QP_HW || 5303272027Shselasky !qp->sched_queue || /* no INIT2RTR trans yet */ 5304272027Shselasky mlx4_is_qp_reserved(dev, qp->local_qpn) || 5305272027Shselasky qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) { 5306272027Shselasky spin_lock_irq(mlx4_tlock(dev)); 5307272027Shselasky continue; 5308272027Shselasky } 5309272027Shselasky port = (qp->sched_queue >> 6 & 1) + 1; 5310272027Shselasky if (port != work->port) { 5311272027Shselasky spin_lock_irq(mlx4_tlock(dev)); 5312272027Shselasky continue; 5313272027Shselasky } 5314272027Shselasky if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff)) 5315272027Shselasky upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask); 5316272027Shselasky else 5317272027Shselasky upd_context->primary_addr_path_mask = 5318272027Shselasky cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl); 5319272027Shselasky if (work->vlan_id == MLX4_VGT) { 5320272027Shselasky upd_context->qp_context.param3 = qp->param3; 5321272027Shselasky upd_context->qp_context.pri_path.vlan_control = qp->vlan_control; 5322272027Shselasky upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx; 5323272027Shselasky upd_context->qp_context.pri_path.vlan_index = qp->vlan_index; 5324272027Shselasky upd_context->qp_context.pri_path.fl = qp->pri_path_fl; 5325272027Shselasky upd_context->qp_context.pri_path.feup = qp->feup; 5326272027Shselasky upd_context->qp_context.pri_path.sched_queue = 5327272027Shselasky qp->sched_queue; 5328272027Shselasky } else { 5329272027Shselasky upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN); 5330272027Shselasky upd_context->qp_context.pri_path.vlan_control = vlan_control; 5331272027Shselasky upd_context->qp_context.pri_path.vlan_index = work->vlan_ix; 5332272027Shselasky upd_context->qp_context.pri_path.fvl_rx = 5333272027Shselasky qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN; 5334272027Shselasky upd_context->qp_context.pri_path.fl = 5335329159Shselasky qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN; 5336329159Shselasky if (work->vlan_proto == htons(ETH_P_8021AD)) 5337329159Shselasky upd_context->qp_context.pri_path.fl |= MLX4_FL_SV; 5338329159Shselasky else 5339329159Shselasky upd_context->qp_context.pri_path.fl |= MLX4_FL_CV; 5340272027Shselasky upd_context->qp_context.pri_path.feup = 5341272027Shselasky qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; 5342272027Shselasky upd_context->qp_context.pri_path.sched_queue = 5343272027Shselasky qp->sched_queue & 0xC7; 5344272027Shselasky upd_context->qp_context.pri_path.sched_queue |= 5345272027Shselasky ((work->qos & 0x7) << 3); 5346329159Shselasky upd_context->qp_mask |= 5347329159Shselasky cpu_to_be64(1ULL << 5348329159Shselasky MLX4_UPD_QP_MASK_QOS_VPP); 5349329159Shselasky upd_context->qp_context.qos_vport = 5350329159Shselasky work->qos_vport; 5351272027Shselasky } 5352272027Shselasky 5353272027Shselasky err = mlx4_cmd(dev, mailbox->dma, 5354272027Shselasky qp->local_qpn & 0xffffff, 5355272027Shselasky 0, MLX4_CMD_UPDATE_QP, 5356272027Shselasky MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 5357272027Shselasky if (err) { 5358329159Shselasky mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n", 5359329159Shselasky work->slave, port, qp->local_qpn, err); 5360272027Shselasky errors++; 5361272027Shselasky } 5362272027Shselasky } 5363272027Shselasky spin_lock_irq(mlx4_tlock(dev)); 5364272027Shselasky } 5365272027Shselasky spin_unlock_irq(mlx4_tlock(dev)); 5366272027Shselasky mlx4_free_cmd_mailbox(dev, mailbox); 5367272027Shselasky 5368272027Shselasky if (errors) 5369272027Shselasky mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n", 5370272027Shselasky errors, work->slave, work->port); 5371272027Shselasky 5372272027Shselasky /* unregister previous vlan_id if needed and we had no errors 5373272027Shselasky * while updating the QPs 5374272027Shselasky */ 5375272027Shselasky if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && 5376272027Shselasky NO_INDX != work->orig_vlan_ix) 5377272027Shselasky __mlx4_unregister_vlan(&work->priv->dev, work->port, 5378272027Shselasky work->orig_vlan_id); 5379272027Shselaskyout: 5380272027Shselasky kfree(work); 5381272027Shselasky return; 5382272027Shselasky} 5383272027Shselasky 5384