mlx5_cq.c revision 306233
1254721Semaste/*- 2254721Semaste * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3254721Semaste * 4254721Semaste * Redistribution and use in source and binary forms, with or without 5254721Semaste * modification, are permitted provided that the following conditions 6254721Semaste * are met: 7254721Semaste * 1. Redistributions of source code must retain the above copyright 8254721Semaste * notice, this list of conditions and the following disclaimer. 9254721Semaste * 2. Redistributions in binary form must reproduce the above copyright 10254721Semaste * notice, this list of conditions and the following disclaimer in the 11254721Semaste * documentation and/or other materials provided with the distribution. 12254721Semaste * 13254721Semaste * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14254721Semaste * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15254721Semaste * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16254721Semaste * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17254721Semaste * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18314564Sdim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19314564Sdim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20314564Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21314564Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22314564Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23314564Sdim * SUCH DAMAGE. 24314564Sdim * 25314564Sdim * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_cq.c 306233 2016-09-23 08:17:51Z hselasky $ 26314564Sdim */ 27314564Sdim 28314564Sdim#include <linux/kernel.h> 29314564Sdim#include <linux/module.h> 30341825Sdim#include <linux/hardirq.h> 31341825Sdim#include <dev/mlx5/driver.h> 32314564Sdim#include <rdma/ib_verbs.h> 33314564Sdim#include <dev/mlx5/cq.h> 34314564Sdim#include "mlx5_core.h" 35314564Sdim 36314564Sdimvoid mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) 37314564Sdim{ 38314564Sdim struct mlx5_core_cq *cq; 39314564Sdim struct mlx5_cq_table *table = &dev->priv.cq_table; 40314564Sdim 41314564Sdim if (cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) { 42314564Sdim struct mlx5_cq_linear_array_entry *entry; 43314564Sdim 44314564Sdim entry = &table->linear_array[cqn]; 45314564Sdim spin_lock(&entry->lock); 46314564Sdim cq = entry->cq; 47314564Sdim if (cq == NULL) { 48254721Semaste mlx5_core_warn(dev, 49314564Sdim "Completion event for bogus CQ 0x%x\n", cqn); 50314564Sdim } else { 51314564Sdim ++cq->arm_sn; 52314564Sdim cq->comp(cq); 53314564Sdim } 54314564Sdim spin_unlock(&entry->lock); 55314564Sdim return; 56314564Sdim } 57314564Sdim 58254721Semaste spin_lock(&table->lock); 59314564Sdim cq = radix_tree_lookup(&table->tree, cqn); 60341825Sdim if (likely(cq)) 61314564Sdim atomic_inc(&cq->refcount); 62314564Sdim spin_unlock(&table->lock); 63314564Sdim 64254721Semaste if (!cq) { 65314564Sdim mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn); 66314564Sdim return; 67314564Sdim } 68314564Sdim 69314564Sdim ++cq->arm_sn; 70314564Sdim 71314564Sdim cq->comp(cq); 72314564Sdim 73254721Semaste if (atomic_dec_and_test(&cq->refcount)) 74314564Sdim complete(&cq->free); 75254721Semaste} 76314564Sdim 77314564Sdimvoid mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) 78254721Semaste{ 79 struct mlx5_cq_table *table = &dev->priv.cq_table; 80 struct mlx5_core_cq *cq; 81 82 spin_lock(&table->lock); 83 84 cq = radix_tree_lookup(&table->tree, cqn); 85 if (cq) 86 atomic_inc(&cq->refcount); 87 88 spin_unlock(&table->lock); 89 90 if (!cq) { 91 mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn); 92 return; 93 } 94 95 cq->event(cq, event_type); 96 97 if (atomic_dec_and_test(&cq->refcount)) 98 complete(&cq->free); 99} 100 101 102int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 103 struct mlx5_create_cq_mbox_in *in, int inlen) 104{ 105 int err; 106 struct mlx5_cq_table *table = &dev->priv.cq_table; 107 struct mlx5_create_cq_mbox_out out; 108 struct mlx5_destroy_cq_mbox_in din; 109 struct mlx5_destroy_cq_mbox_out dout; 110 111 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ); 112 memset(&out, 0, sizeof(out)); 113 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 114 if (err) 115 return err; 116 117 if (out.hdr.status) 118 return mlx5_cmd_status_to_err(&out.hdr); 119 120 cq->cqn = be32_to_cpu(out.cqn) & 0xffffff; 121 cq->cons_index = 0; 122 cq->arm_sn = 0; 123 atomic_set(&cq->refcount, 1); 124 init_completion(&cq->free); 125 126 spin_lock_irq(&table->lock); 127 err = radix_tree_insert(&table->tree, cq->cqn, cq); 128 spin_unlock_irq(&table->lock); 129 if (err) 130 goto err_cmd; 131 132 if (cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) { 133 struct mlx5_cq_linear_array_entry *entry; 134 135 entry = &table->linear_array[cq->cqn]; 136 spin_lock_irq(&entry->lock); 137 entry->cq = cq; 138 spin_unlock_irq(&entry->lock); 139 } 140 141 cq->pid = curthread->td_proc->p_pid; 142 143 return 0; 144 145err_cmd: 146 memset(&din, 0, sizeof(din)); 147 memset(&dout, 0, sizeof(dout)); 148 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); 149 din.cqn = cpu_to_be32(cq->cqn); 150 mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); 151 return err; 152} 153EXPORT_SYMBOL(mlx5_core_create_cq); 154 155int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) 156{ 157 struct mlx5_cq_table *table = &dev->priv.cq_table; 158 struct mlx5_destroy_cq_mbox_in in; 159 struct mlx5_destroy_cq_mbox_out out; 160 struct mlx5_core_cq *tmp; 161 int err; 162 163 if (cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) { 164 struct mlx5_cq_linear_array_entry *entry; 165 166 entry = &table->linear_array[cq->cqn]; 167 spin_lock_irq(&entry->lock); 168 entry->cq = NULL; 169 spin_unlock_irq(&entry->lock); 170 } 171 172 spin_lock_irq(&table->lock); 173 tmp = radix_tree_delete(&table->tree, cq->cqn); 174 spin_unlock_irq(&table->lock); 175 if (!tmp) { 176 mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn); 177 return -EINVAL; 178 } 179 if (tmp != cq) { 180 mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn); 181 return -EINVAL; 182 } 183 184 memset(&in, 0, sizeof(in)); 185 memset(&out, 0, sizeof(out)); 186 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); 187 in.cqn = cpu_to_be32(cq->cqn); 188 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 189 if (err) 190 goto out; 191 192 if (out.hdr.status) { 193 err = mlx5_cmd_status_to_err(&out.hdr); 194 goto out; 195 } 196 197 synchronize_irq(cq->irqn); 198 199 if (atomic_dec_and_test(&cq->refcount)) 200 complete(&cq->free); 201 wait_for_completion(&cq->free); 202 203out: 204 205 return err; 206} 207EXPORT_SYMBOL(mlx5_core_destroy_cq); 208 209int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 210 struct mlx5_query_cq_mbox_out *out) 211{ 212 struct mlx5_query_cq_mbox_in in; 213 int err; 214 215 memset(&in, 0, sizeof(in)); 216 memset(out, 0, sizeof(*out)); 217 218 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ); 219 in.cqn = cpu_to_be32(cq->cqn); 220 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); 221 if (err) 222 return err; 223 224 if (out->hdr.status) 225 return mlx5_cmd_status_to_err(&out->hdr); 226 227 return err; 228} 229EXPORT_SYMBOL(mlx5_core_query_cq); 230 231 232int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 233 struct mlx5_modify_cq_mbox_in *in, int in_sz) 234{ 235 struct mlx5_modify_cq_mbox_out out; 236 int err; 237 238 memset(&out, 0, sizeof(out)); 239 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ); 240 err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out)); 241 if (err) 242 return err; 243 244 if (out.hdr.status) 245 return mlx5_cmd_status_to_err(&out.hdr); 246 247 return 0; 248} 249EXPORT_SYMBOL(mlx5_core_modify_cq); 250 251int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, 252 struct mlx5_core_cq *cq, 253 u16 cq_period, 254 u16 cq_max_count) 255{ 256 struct mlx5_modify_cq_mbox_in in; 257 258 memset(&in, 0, sizeof(in)); 259 260 in.cqn = cpu_to_be32(cq->cqn); 261 in.ctx.cq_period = cpu_to_be16(cq_period); 262 in.ctx.cq_max_count = cpu_to_be16(cq_max_count); 263 in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD | 264 MLX5_CQ_MODIFY_COUNT); 265 266 return mlx5_core_modify_cq(dev, cq, &in, sizeof(in)); 267} 268 269int mlx5_init_cq_table(struct mlx5_core_dev *dev) 270{ 271 struct mlx5_cq_table *table = &dev->priv.cq_table; 272 int err; 273 int x; 274 275 spin_lock_init(&table->lock); 276 for (x = 0; x != MLX5_CQ_LINEAR_ARRAY_SIZE; x++) 277 spin_lock_init(&table->linear_array[x].lock); 278 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); 279 err = 0; 280 281 return err; 282} 283 284void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev) 285{ 286} 287