mlx5_cq.c revision 306233
1139749Simp/*- 285810Sobrien * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 385810Sobrien * 485810Sobrien * Redistribution and use in source and binary forms, with or without 585810Sobrien * modification, are permitted provided that the following conditions 685810Sobrien * are met: 785810Sobrien * 1. Redistributions of source code must retain the above copyright 885810Sobrien * notice, this list of conditions and the following disclaimer. 985810Sobrien * 2. Redistributions in binary form must reproduce the above copyright 1085810Sobrien * notice, this list of conditions and the following disclaimer in the 1185810Sobrien * documentation and/or other materials provided with the distribution. 1285810Sobrien * 1385810Sobrien * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 1485810Sobrien * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1585810Sobrien * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1685810Sobrien * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 1785810Sobrien * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1885810Sobrien * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 1985810Sobrien * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2085810Sobrien * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2185810Sobrien * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2285810Sobrien * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2385810Sobrien * SUCH DAMAGE. 2485810Sobrien * 2585810Sobrien * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_cq.c 306233 2016-09-23 08:17:51Z hselasky $ 2685810Sobrien */ 2785810Sobrien 2885810Sobrien#include <linux/kernel.h> 2985810Sobrien#include <linux/module.h> 3085810Sobrien#include <linux/hardirq.h> 3185810Sobrien#include <dev/mlx5/driver.h> 3285810Sobrien#include <rdma/ib_verbs.h> 3385810Sobrien#include <dev/mlx5/cq.h> 3485810Sobrien#include "mlx5_core.h" 3585810Sobrien 3685810Sobrienvoid mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) 3785810Sobrien{ 38170837Smarius struct mlx5_core_cq *cq; 3985810Sobrien struct mlx5_cq_table *table = &dev->priv.cq_table; 4085810Sobrien 4185810Sobrien if (cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) { 42 struct mlx5_cq_linear_array_entry *entry; 43 44 entry = &table->linear_array[cqn]; 45 spin_lock(&entry->lock); 46 cq = entry->cq; 47 if (cq == NULL) { 48 mlx5_core_warn(dev, 49 "Completion event for bogus CQ 0x%x\n", cqn); 50 } else { 51 ++cq->arm_sn; 52 cq->comp(cq); 53 } 54 spin_unlock(&entry->lock); 55 return; 56 } 57 58 spin_lock(&table->lock); 59 cq = radix_tree_lookup(&table->tree, cqn); 60 if (likely(cq)) 61 atomic_inc(&cq->refcount); 62 spin_unlock(&table->lock); 63 64 if (!cq) { 65 mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn); 66 return; 67 } 68 69 ++cq->arm_sn; 70 71 cq->comp(cq); 72 73 if (atomic_dec_and_test(&cq->refcount)) 74 complete(&cq->free); 75} 76 77void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) 78{ 79 struct mlx5_cq_table *table = &dev->priv.cq_table; 80 struct mlx5_core_cq *cq; 81 82 spin_lock(&table->lock); 83 84 cq = radix_tree_lookup(&table->tree, cqn); 85 if (cq) 86 atomic_inc(&cq->refcount); 87 88 spin_unlock(&table->lock); 89 90 if (!cq) { 91 mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn); 92 return; 93 } 94 95 cq->event(cq, event_type); 96 97 if (atomic_dec_and_test(&cq->refcount)) 98 complete(&cq->free); 99} 100 101 102int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 103 struct mlx5_create_cq_mbox_in *in, int inlen) 104{ 105 int err; 106 struct mlx5_cq_table *table = &dev->priv.cq_table; 107 struct mlx5_create_cq_mbox_out out; 108 struct mlx5_destroy_cq_mbox_in din; 109 struct mlx5_destroy_cq_mbox_out dout; 110 111 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ); 112 memset(&out, 0, sizeof(out)); 113 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 114 if (err) 115 return err; 116 117 if (out.hdr.status) 118 return mlx5_cmd_status_to_err(&out.hdr); 119 120 cq->cqn = be32_to_cpu(out.cqn) & 0xffffff; 121 cq->cons_index = 0; 122 cq->arm_sn = 0; 123 atomic_set(&cq->refcount, 1); 124 init_completion(&cq->free); 125 126 spin_lock_irq(&table->lock); 127 err = radix_tree_insert(&table->tree, cq->cqn, cq); 128 spin_unlock_irq(&table->lock); 129 if (err) 130 goto err_cmd; 131 132 if (cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) { 133 struct mlx5_cq_linear_array_entry *entry; 134 135 entry = &table->linear_array[cq->cqn]; 136 spin_lock_irq(&entry->lock); 137 entry->cq = cq; 138 spin_unlock_irq(&entry->lock); 139 } 140 141 cq->pid = curthread->td_proc->p_pid; 142 143 return 0; 144 145err_cmd: 146 memset(&din, 0, sizeof(din)); 147 memset(&dout, 0, sizeof(dout)); 148 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); 149 din.cqn = cpu_to_be32(cq->cqn); 150 mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); 151 return err; 152} 153EXPORT_SYMBOL(mlx5_core_create_cq); 154 155int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) 156{ 157 struct mlx5_cq_table *table = &dev->priv.cq_table; 158 struct mlx5_destroy_cq_mbox_in in; 159 struct mlx5_destroy_cq_mbox_out out; 160 struct mlx5_core_cq *tmp; 161 int err; 162 163 if (cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) { 164 struct mlx5_cq_linear_array_entry *entry; 165 166 entry = &table->linear_array[cq->cqn]; 167 spin_lock_irq(&entry->lock); 168 entry->cq = NULL; 169 spin_unlock_irq(&entry->lock); 170 } 171 172 spin_lock_irq(&table->lock); 173 tmp = radix_tree_delete(&table->tree, cq->cqn); 174 spin_unlock_irq(&table->lock); 175 if (!tmp) { 176 mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn); 177 return -EINVAL; 178 } 179 if (tmp != cq) { 180 mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn); 181 return -EINVAL; 182 } 183 184 memset(&in, 0, sizeof(in)); 185 memset(&out, 0, sizeof(out)); 186 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); 187 in.cqn = cpu_to_be32(cq->cqn); 188 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 189 if (err) 190 goto out; 191 192 if (out.hdr.status) { 193 err = mlx5_cmd_status_to_err(&out.hdr); 194 goto out; 195 } 196 197 synchronize_irq(cq->irqn); 198 199 if (atomic_dec_and_test(&cq->refcount)) 200 complete(&cq->free); 201 wait_for_completion(&cq->free); 202 203out: 204 205 return err; 206} 207EXPORT_SYMBOL(mlx5_core_destroy_cq); 208 209int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 210 struct mlx5_query_cq_mbox_out *out) 211{ 212 struct mlx5_query_cq_mbox_in in; 213 int err; 214 215 memset(&in, 0, sizeof(in)); 216 memset(out, 0, sizeof(*out)); 217 218 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ); 219 in.cqn = cpu_to_be32(cq->cqn); 220 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); 221 if (err) 222 return err; 223 224 if (out->hdr.status) 225 return mlx5_cmd_status_to_err(&out->hdr); 226 227 return err; 228} 229EXPORT_SYMBOL(mlx5_core_query_cq); 230 231 232int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 233 struct mlx5_modify_cq_mbox_in *in, int in_sz) 234{ 235 struct mlx5_modify_cq_mbox_out out; 236 int err; 237 238 memset(&out, 0, sizeof(out)); 239 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ); 240 err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out)); 241 if (err) 242 return err; 243 244 if (out.hdr.status) 245 return mlx5_cmd_status_to_err(&out.hdr); 246 247 return 0; 248} 249EXPORT_SYMBOL(mlx5_core_modify_cq); 250 251int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, 252 struct mlx5_core_cq *cq, 253 u16 cq_period, 254 u16 cq_max_count) 255{ 256 struct mlx5_modify_cq_mbox_in in; 257 258 memset(&in, 0, sizeof(in)); 259 260 in.cqn = cpu_to_be32(cq->cqn); 261 in.ctx.cq_period = cpu_to_be16(cq_period); 262 in.ctx.cq_max_count = cpu_to_be16(cq_max_count); 263 in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD | 264 MLX5_CQ_MODIFY_COUNT); 265 266 return mlx5_core_modify_cq(dev, cq, &in, sizeof(in)); 267} 268 269int mlx5_init_cq_table(struct mlx5_core_dev *dev) 270{ 271 struct mlx5_cq_table *table = &dev->priv.cq_table; 272 int err; 273 int x; 274 275 spin_lock_init(&table->lock); 276 for (x = 0; x != MLX5_CQ_LINEAR_ARRAY_SIZE; x++) 277 spin_lock_init(&table->linear_array[x].lock); 278 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); 279 err = 0; 280 281 return err; 282} 283 284void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev) 285{ 286} 287