mlx5_cmd.c revision 306233
1/*- 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_cmd.c 306233 2016-09-23 08:17:51Z hselasky $ 26 */ 27 28#include <linux/module.h> 29#include <linux/errno.h> 30#include <linux/pci.h> 31#include <linux/dma-mapping.h> 32#include <linux/slab.h> 33#include <linux/delay.h> 34#include <linux/random.h> 35#include <linux/io-mapping.h> 36#include <linux/hardirq.h> 37#include <linux/ktime.h> 38#include <dev/mlx5/driver.h> 39 40#include "mlx5_core.h" 41 42enum { 43 CMD_IF_REV = 5, 44}; 45 46enum { 47 CMD_MODE_POLLING, 48 CMD_MODE_EVENTS 49}; 50 51enum { 52 NUM_LONG_LISTS = 2, 53 NUM_MED_LISTS = 64, 54 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + 55 MLX5_CMD_DATA_BLOCK_SIZE, 56 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, 57}; 58 59enum { 60 MLX5_CMD_DELIVERY_STAT_OK = 0x0, 61 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, 62 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, 63 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, 64 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, 65 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, 66 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, 67 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, 68 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, 69 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, 70 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, 71}; 72 73static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, 74 struct mlx5_cmd_msg *in, 75 struct mlx5_cmd_msg *out, 76 void *uout, int uout_size, 77 mlx5_cmd_cbk_t cbk, 78 void *context, int page_queue) 79{ 80 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; 81 struct mlx5_cmd_work_ent *ent; 82 83 ent = kzalloc(sizeof(*ent), alloc_flags); 84 if (!ent) 85 return ERR_PTR(-ENOMEM); 86 87 ent->in = in; 88 ent->out = out; 89 ent->uout = uout; 90 ent->uout_size = uout_size; 91 ent->callback = cbk; 92 ent->context = context; 93 ent->cmd = cmd; 94 ent->page_queue = page_queue; 95 96 return ent; 97} 98 99static u8 alloc_token(struct mlx5_cmd *cmd) 100{ 101 u8 token; 102 103 spin_lock(&cmd->token_lock); 104 cmd->token++; 105 if (cmd->token == 0) 106 cmd->token++; 107 token = cmd->token; 108 spin_unlock(&cmd->token_lock); 109 110 return token; 111} 112 113static int alloc_ent(struct mlx5_cmd *cmd) 114{ 115 unsigned long flags; 116 int ret; 117 118 spin_lock_irqsave(&cmd->alloc_lock, flags); 119 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); 120 if (ret < cmd->max_reg_cmds) 121 clear_bit(ret, &cmd->bitmask); 122 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 123 124 return ret < cmd->max_reg_cmds ? ret : -1; 125} 126 127static void free_ent(struct mlx5_cmd *cmd, int idx) 128{ 129 unsigned long flags; 130 131 spin_lock_irqsave(&cmd->alloc_lock, flags); 132 set_bit(idx, &cmd->bitmask); 133 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 134} 135 136static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) 137{ 138 return cmd->cmd_buf + (idx << cmd->log_stride); 139} 140 141static u8 xor8_buf(void *buf, int len) 142{ 143 u8 *ptr = buf; 144 u8 sum = 0; 145 int i; 146 147 for (i = 0; i < len; i++) 148 sum ^= ptr[i]; 149 150 return sum; 151} 152 153static int verify_block_sig(struct mlx5_cmd_prot_block *block) 154{ 155 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) 156 return -EINVAL; 157 158 if (xor8_buf(block, sizeof(*block)) != 0xff) 159 return -EINVAL; 160 161 return 0; 162} 163 164static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, 165 int csum) 166{ 167 block->token = token; 168 if (csum) { 169 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - 170 sizeof(block->data) - 2); 171 block->sig = ~xor8_buf(block, sizeof(*block) - 1); 172 } 173} 174 175static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) 176{ 177 struct mlx5_cmd_mailbox *next = msg->next; 178 179 while (next) { 180 calc_block_sig(next->buf, token, csum); 181 next = next->next; 182 } 183} 184 185static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) 186{ 187 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); 188 calc_chain_sig(ent->in, ent->token, csum); 189 calc_chain_sig(ent->out, ent->token, csum); 190} 191 192static void poll_timeout(struct mlx5_cmd_work_ent *ent) 193{ 194 int poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); 195 u8 own; 196 197 do { 198 own = ent->lay->status_own; 199 if (!(own & CMD_OWNER_HW)) { 200 ent->ret = 0; 201 return; 202 } 203 usleep_range(5000, 10000); 204 } while (time_before(jiffies, poll_end)); 205 206 ent->ret = -ETIMEDOUT; 207} 208 209static void free_cmd(struct mlx5_cmd_work_ent *ent) 210{ 211 kfree(ent); 212} 213 214 215static int verify_signature(struct mlx5_cmd_work_ent *ent) 216{ 217 struct mlx5_cmd_mailbox *next = ent->out->next; 218 int err; 219 u8 sig; 220 221 sig = xor8_buf(ent->lay, sizeof(*ent->lay)); 222 if (sig != 0xff) 223 return -EINVAL; 224 225 while (next) { 226 err = verify_block_sig(next->buf); 227 if (err) 228 return err; 229 230 next = next->next; 231 } 232 233 return 0; 234} 235 236static void dump_buf(void *buf, int size, int data_only, int offset) 237{ 238 __be32 *p = buf; 239 int i; 240 241 for (i = 0; i < size; i += 16) { 242 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), 243 be32_to_cpu(p[1]), be32_to_cpu(p[2]), 244 be32_to_cpu(p[3])); 245 p += 4; 246 offset += 16; 247 } 248 if (!data_only) 249 pr_debug("\n"); 250} 251 252const char *mlx5_command_str(int command) 253{ 254 switch (command) { 255 case MLX5_CMD_OP_QUERY_HCA_CAP: 256 return "QUERY_HCA_CAP"; 257 258 case MLX5_CMD_OP_SET_HCA_CAP: 259 return "SET_HCA_CAP"; 260 261 case MLX5_CMD_OP_QUERY_ADAPTER: 262 return "QUERY_ADAPTER"; 263 264 case MLX5_CMD_OP_INIT_HCA: 265 return "INIT_HCA"; 266 267 case MLX5_CMD_OP_TEARDOWN_HCA: 268 return "TEARDOWN_HCA"; 269 270 case MLX5_CMD_OP_ENABLE_HCA: 271 return "MLX5_CMD_OP_ENABLE_HCA"; 272 273 case MLX5_CMD_OP_DISABLE_HCA: 274 return "MLX5_CMD_OP_DISABLE_HCA"; 275 276 case MLX5_CMD_OP_QUERY_PAGES: 277 return "QUERY_PAGES"; 278 279 case MLX5_CMD_OP_MANAGE_PAGES: 280 return "MANAGE_PAGES"; 281 282 case MLX5_CMD_OP_QUERY_ISSI: 283 return "QUERY_ISSI"; 284 285 case MLX5_CMD_OP_SET_ISSI: 286 return "SET_ISSI"; 287 288 case MLX5_CMD_OP_CREATE_MKEY: 289 return "CREATE_MKEY"; 290 291 case MLX5_CMD_OP_QUERY_MKEY: 292 return "QUERY_MKEY"; 293 294 case MLX5_CMD_OP_DESTROY_MKEY: 295 return "DESTROY_MKEY"; 296 297 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: 298 return "QUERY_SPECIAL_CONTEXTS"; 299 300 case MLX5_CMD_OP_PAGE_FAULT_RESUME: 301 return "PAGE_FAULT_RESUME"; 302 303 case MLX5_CMD_OP_CREATE_EQ: 304 return "CREATE_EQ"; 305 306 case MLX5_CMD_OP_DESTROY_EQ: 307 return "DESTROY_EQ"; 308 309 case MLX5_CMD_OP_QUERY_EQ: 310 return "QUERY_EQ"; 311 312 case MLX5_CMD_OP_GEN_EQE: 313 return "GEN_EQE"; 314 315 case MLX5_CMD_OP_CREATE_CQ: 316 return "CREATE_CQ"; 317 318 case MLX5_CMD_OP_DESTROY_CQ: 319 return "DESTROY_CQ"; 320 321 case MLX5_CMD_OP_QUERY_CQ: 322 return "QUERY_CQ"; 323 324 case MLX5_CMD_OP_MODIFY_CQ: 325 return "MODIFY_CQ"; 326 327 case MLX5_CMD_OP_CREATE_QP: 328 return "CREATE_QP"; 329 330 case MLX5_CMD_OP_DESTROY_QP: 331 return "DESTROY_QP"; 332 333 case MLX5_CMD_OP_RST2INIT_QP: 334 return "RST2INIT_QP"; 335 336 case MLX5_CMD_OP_INIT2RTR_QP: 337 return "INIT2RTR_QP"; 338 339 case MLX5_CMD_OP_RTR2RTS_QP: 340 return "RTR2RTS_QP"; 341 342 case MLX5_CMD_OP_RTS2RTS_QP: 343 return "RTS2RTS_QP"; 344 345 case MLX5_CMD_OP_SQERR2RTS_QP: 346 return "SQERR2RTS_QP"; 347 348 case MLX5_CMD_OP_2ERR_QP: 349 return "2ERR_QP"; 350 351 case MLX5_CMD_OP_2RST_QP: 352 return "2RST_QP"; 353 354 case MLX5_CMD_OP_QUERY_QP: 355 return "QUERY_QP"; 356 357 case MLX5_CMD_OP_SQD_RTS_QP: 358 return "SQD_RTS_QP"; 359 360 case MLX5_CMD_OP_MAD_IFC: 361 return "MAD_IFC"; 362 363 case MLX5_CMD_OP_INIT2INIT_QP: 364 return "INIT2INIT_QP"; 365 366 case MLX5_CMD_OP_CREATE_PSV: 367 return "CREATE_PSV"; 368 369 case MLX5_CMD_OP_DESTROY_PSV: 370 return "DESTROY_PSV"; 371 372 case MLX5_CMD_OP_CREATE_SRQ: 373 return "CREATE_SRQ"; 374 375 case MLX5_CMD_OP_DESTROY_SRQ: 376 return "DESTROY_SRQ"; 377 378 case MLX5_CMD_OP_QUERY_SRQ: 379 return "QUERY_SRQ"; 380 381 case MLX5_CMD_OP_ARM_RQ: 382 return "ARM_RQ"; 383 384 case MLX5_CMD_OP_CREATE_XRC_SRQ: 385 return "CREATE_XRC_SRQ"; 386 387 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 388 return "DESTROY_XRC_SRQ"; 389 390 case MLX5_CMD_OP_QUERY_XRC_SRQ: 391 return "QUERY_XRC_SRQ"; 392 393 case MLX5_CMD_OP_ARM_XRC_SRQ: 394 return "ARM_XRC_SRQ"; 395 396 case MLX5_CMD_OP_CREATE_DCT: 397 return "CREATE_DCT"; 398 399 case MLX5_CMD_OP_SET_DC_CNAK_TRACE: 400 return "SET_DC_CNAK_TRACE"; 401 402 case MLX5_CMD_OP_DESTROY_DCT: 403 return "DESTROY_DCT"; 404 405 case MLX5_CMD_OP_DRAIN_DCT: 406 return "DRAIN_DCT"; 407 408 case MLX5_CMD_OP_QUERY_DCT: 409 return "QUERY_DCT"; 410 411 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 412 return "ARM_DCT_FOR_KEY_VIOLATION"; 413 414 case MLX5_CMD_OP_QUERY_VPORT_STATE: 415 return "QUERY_VPORT_STATE"; 416 417 case MLX5_CMD_OP_MODIFY_VPORT_STATE: 418 return "MODIFY_VPORT_STATE"; 419 420 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 421 return "QUERY_ESW_VPORT_CONTEXT"; 422 423 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 424 return "MODIFY_ESW_VPORT_CONTEXT"; 425 426 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 427 return "QUERY_NIC_VPORT_CONTEXT"; 428 429 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 430 return "MODIFY_NIC_VPORT_CONTEXT"; 431 432 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 433 return "QUERY_ROCE_ADDRESS"; 434 435 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 436 return "SET_ROCE_ADDRESS"; 437 438 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 439 return "QUERY_HCA_VPORT_CONTEXT"; 440 441 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: 442 return "MODIFY_HCA_VPORT_CONTEXT"; 443 444 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: 445 return "QUERY_HCA_VPORT_GID"; 446 447 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: 448 return "QUERY_HCA_VPORT_PKEY"; 449 450 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 451 return "QUERY_VPORT_COUNTER"; 452 453 case MLX5_CMD_OP_SET_WOL_ROL: 454 return "SET_WOL_ROL"; 455 456 case MLX5_CMD_OP_QUERY_WOL_ROL: 457 return "QUERY_WOL_ROL"; 458 459 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 460 return "ALLOC_Q_COUNTER"; 461 462 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 463 return "DEALLOC_Q_COUNTER"; 464 465 case MLX5_CMD_OP_QUERY_Q_COUNTER: 466 return "QUERY_Q_COUNTER"; 467 468 case MLX5_CMD_OP_ALLOC_PD: 469 return "ALLOC_PD"; 470 471 case MLX5_CMD_OP_DEALLOC_PD: 472 return "DEALLOC_PD"; 473 474 case MLX5_CMD_OP_ALLOC_UAR: 475 return "ALLOC_UAR"; 476 477 case MLX5_CMD_OP_DEALLOC_UAR: 478 return "DEALLOC_UAR"; 479 480 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 481 return "CONFIG_INT_MODERATION"; 482 483 case MLX5_CMD_OP_ATTACH_TO_MCG: 484 return "ATTACH_TO_MCG"; 485 486 case MLX5_CMD_OP_DETACH_FROM_MCG: 487 return "DETACH_FROM_MCG"; 488 489 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 490 return "GET_DROPPED_PACKET_LOG"; 491 492 case MLX5_CMD_OP_QUERY_MAD_DEMUX: 493 return "QUERY_MAD_DEMUX"; 494 495 case MLX5_CMD_OP_SET_MAD_DEMUX: 496 return "SET_MAD_DEMUX"; 497 498 case MLX5_CMD_OP_NOP: 499 return "NOP"; 500 501 case MLX5_CMD_OP_ALLOC_XRCD: 502 return "ALLOC_XRCD"; 503 504 case MLX5_CMD_OP_DEALLOC_XRCD: 505 return "DEALLOC_XRCD"; 506 507 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 508 return "ALLOC_TRANSPORT_DOMAIN"; 509 510 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: 511 return "DEALLOC_TRANSPORT_DOMAIN"; 512 513 case MLX5_CMD_OP_QUERY_CONG_STATUS: 514 return "QUERY_CONG_STATUS"; 515 516 case MLX5_CMD_OP_MODIFY_CONG_STATUS: 517 return "MODIFY_CONG_STATUS"; 518 519 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 520 return "QUERY_CONG_PARAMS"; 521 522 case MLX5_CMD_OP_MODIFY_CONG_PARAMS: 523 return "MODIFY_CONG_PARAMS"; 524 525 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 526 return "QUERY_CONG_STATISTICS"; 527 528 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 529 return "ADD_VXLAN_UDP_DPORT"; 530 531 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: 532 return "DELETE_VXLAN_UDP_DPORT"; 533 534 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 535 return "SET_L2_TABLE_ENTRY"; 536 537 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 538 return "QUERY_L2_TABLE_ENTRY"; 539 540 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: 541 return "DELETE_L2_TABLE_ENTRY"; 542 543 case MLX5_CMD_OP_CREATE_RMP: 544 return "CREATE_RMP"; 545 546 case MLX5_CMD_OP_MODIFY_RMP: 547 return "MODIFY_RMP"; 548 549 case MLX5_CMD_OP_DESTROY_RMP: 550 return "DESTROY_RMP"; 551 552 case MLX5_CMD_OP_QUERY_RMP: 553 return "QUERY_RMP"; 554 555 case MLX5_CMD_OP_CREATE_RQT: 556 return "CREATE_RQT"; 557 558 case MLX5_CMD_OP_MODIFY_RQT: 559 return "MODIFY_RQT"; 560 561 case MLX5_CMD_OP_DESTROY_RQT: 562 return "DESTROY_RQT"; 563 564 case MLX5_CMD_OP_QUERY_RQT: 565 return "QUERY_RQT"; 566 567 case MLX5_CMD_OP_ACCESS_REG: 568 return "MLX5_CMD_OP_ACCESS_REG"; 569 570 case MLX5_CMD_OP_CREATE_SQ: 571 return "CREATE_SQ"; 572 573 case MLX5_CMD_OP_MODIFY_SQ: 574 return "MODIFY_SQ"; 575 576 case MLX5_CMD_OP_DESTROY_SQ: 577 return "DESTROY_SQ"; 578 579 case MLX5_CMD_OP_QUERY_SQ: 580 return "QUERY_SQ"; 581 582 case MLX5_CMD_OP_CREATE_RQ: 583 return "CREATE_RQ"; 584 585 case MLX5_CMD_OP_MODIFY_RQ: 586 return "MODIFY_RQ"; 587 588 case MLX5_CMD_OP_DESTROY_RQ: 589 return "DESTROY_RQ"; 590 591 case MLX5_CMD_OP_QUERY_RQ: 592 return "QUERY_RQ"; 593 594 case MLX5_CMD_OP_CREATE_TIR: 595 return "CREATE_TIR"; 596 597 case MLX5_CMD_OP_MODIFY_TIR: 598 return "MODIFY_TIR"; 599 600 case MLX5_CMD_OP_DESTROY_TIR: 601 return "DESTROY_TIR"; 602 603 case MLX5_CMD_OP_QUERY_TIR: 604 return "QUERY_TIR"; 605 606 case MLX5_CMD_OP_CREATE_TIS: 607 return "CREATE_TIS"; 608 609 case MLX5_CMD_OP_MODIFY_TIS: 610 return "MODIFY_TIS"; 611 612 case MLX5_CMD_OP_DESTROY_TIS: 613 return "DESTROY_TIS"; 614 615 case MLX5_CMD_OP_QUERY_TIS: 616 return "QUERY_TIS"; 617 618 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 619 return "CREATE_FLOW_TABLE"; 620 621 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 622 return "DESTROY_FLOW_TABLE"; 623 624 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 625 return "QUERY_FLOW_TABLE"; 626 627 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 628 return "CREATE_FLOW_GROUP"; 629 630 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 631 return "DESTROY_FLOW_GROUP"; 632 633 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 634 return "QUERY_FLOW_GROUP"; 635 636 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 637 return "SET_FLOW_TABLE_ENTRY"; 638 639 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 640 return "QUERY_FLOW_TABLE_ENTRY"; 641 642 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 643 return "DELETE_FLOW_TABLE_ENTRY"; 644 645 case MLX5_CMD_OP_SET_DIAGNOSTICS: 646 return "MLX5_CMD_OP_SET_DIAGNOSTICS"; 647 648 case MLX5_CMD_OP_QUERY_DIAGNOSTICS: 649 return "MLX5_CMD_OP_QUERY_DIAGNOSTICS"; 650 651 default: return "unknown command opcode"; 652 } 653} 654 655static void dump_command(struct mlx5_core_dev *dev, 656 struct mlx5_cmd_work_ent *ent, int input) 657{ 658 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); 659 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 660 struct mlx5_cmd_mailbox *next = msg->next; 661 int data_only; 662 u32 offset = 0; 663 int dump_len; 664 665 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); 666 667 if (data_only) 668 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, 669 "dump command data %s(0x%x) %s\n", 670 mlx5_command_str(op), op, 671 input ? "INPUT" : "OUTPUT"); 672 else 673 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", 674 mlx5_command_str(op), op, 675 input ? "INPUT" : "OUTPUT"); 676 677 if (data_only) { 678 if (input) { 679 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); 680 offset += sizeof(ent->lay->in); 681 } else { 682 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); 683 offset += sizeof(ent->lay->out); 684 } 685 } else { 686 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); 687 offset += sizeof(*ent->lay); 688 } 689 690 while (next && offset < msg->len) { 691 if (data_only) { 692 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); 693 dump_buf(next->buf, dump_len, 1, offset); 694 offset += MLX5_CMD_DATA_BLOCK_SIZE; 695 } else { 696 mlx5_core_dbg(dev, "command block:\n"); 697 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset); 698 offset += sizeof(struct mlx5_cmd_prot_block); 699 } 700 next = next->next; 701 } 702 703 if (data_only) 704 pr_debug("\n"); 705} 706 707static void cmd_work_handler(struct work_struct *work) 708{ 709 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 710 struct mlx5_cmd *cmd = ent->cmd; 711 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); 712 struct mlx5_cmd_layout *lay; 713 struct semaphore *sem; 714 715 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 716 if (cmd->moving_to_polling) { 717 mlx5_core_warn(dev, "not expecting command execution, ignoring...\n"); 718 return; 719 } 720 721 down(sem); 722 if (!ent->page_queue) { 723 ent->idx = alloc_ent(cmd); 724 if (ent->idx < 0) { 725 mlx5_core_err(dev, "failed to allocate command entry\n"); 726 up(sem); 727 return; 728 } 729 } else { 730 ent->idx = cmd->max_reg_cmds; 731 } 732 733 ent->token = alloc_token(cmd); 734 cmd->ent_arr[ent->idx] = ent; 735 lay = get_inst(cmd, ent->idx); 736 ent->lay = lay; 737 memset(lay, 0, sizeof(*lay)); 738 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 739 ent->op = be32_to_cpu(lay->in[0]) >> 16; 740 if (ent->in->next) 741 lay->in_ptr = cpu_to_be64(ent->in->next->dma); 742 lay->inlen = cpu_to_be32(ent->in->len); 743 if (ent->out->next) 744 lay->out_ptr = cpu_to_be64(ent->out->next->dma); 745 lay->outlen = cpu_to_be32(ent->out->len); 746 lay->type = MLX5_PCI_CMD_XPORT; 747 lay->token = ent->token; 748 lay->status_own = CMD_OWNER_HW; 749 set_signature(ent, !cmd->checksum_disabled); 750 dump_command(dev, ent, 1); 751 ent->ts1 = ktime_get_ns(); 752 753 /* ring doorbell after the descriptor is valid */ 754 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 755 wmb(); 756 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 757 mmiowb(); 758 /* if not in polling don't use ent after this point*/ 759 if (cmd->mode == CMD_MODE_POLLING) { 760 poll_timeout(ent); 761 /* make sure we read the descriptor after ownership is SW */ 762 rmb(); 763 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 764 } 765} 766 767static const char *deliv_status_to_str(u8 status) 768{ 769 switch (status) { 770 case MLX5_CMD_DELIVERY_STAT_OK: 771 return "no errors"; 772 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 773 return "signature error"; 774 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 775 return "token error"; 776 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 777 return "bad block number"; 778 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 779 return "output pointer not aligned to block size"; 780 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 781 return "input pointer not aligned to block size"; 782 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 783 return "firmware internal error"; 784 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 785 return "command input length error"; 786 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 787 return "command ouput length error"; 788 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 789 return "reserved fields not cleared"; 790 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 791 return "bad command descriptor type"; 792 default: 793 return "unknown status code"; 794 } 795} 796 797static u16 msg_to_opcode(struct mlx5_cmd_msg *in) 798{ 799 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); 800 801 return be16_to_cpu(hdr->opcode); 802} 803 804static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 805{ 806 int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 807 struct mlx5_cmd *cmd = &dev->cmd; 808 int err; 809 810 if (cmd->mode == CMD_MODE_POLLING) { 811 wait_for_completion(&ent->done); 812 err = ent->ret; 813 } else { 814 if (!wait_for_completion_timeout(&ent->done, timeout)) 815 err = -ETIMEDOUT; 816 else 817 err = 0; 818 } 819 if (err == -ETIMEDOUT) { 820 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 821 mlx5_command_str(msg_to_opcode(ent->in)), 822 msg_to_opcode(ent->in)); 823 } 824 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", 825 err, deliv_status_to_str(ent->status), ent->status); 826 827 return err; 828} 829 830/* Notes: 831 * 1. Callback functions may not sleep 832 * 2. page queue commands do not support asynchrous completion 833 */ 834static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 835 struct mlx5_cmd_msg *out, void *uout, int uout_size, 836 mlx5_cmd_cbk_t callback, 837 void *context, int page_queue, u8 *status) 838{ 839 struct mlx5_cmd *cmd = &dev->cmd; 840 struct mlx5_cmd_work_ent *ent; 841 struct mlx5_cmd_stats *stats; 842 int err = 0; 843 s64 ds; 844 u16 op; 845 846 if (callback && page_queue) 847 return -EINVAL; 848 849 ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context, 850 page_queue); 851 if (IS_ERR(ent)) 852 return PTR_ERR(ent); 853 854 if (!callback) 855 init_completion(&ent->done); 856 857 INIT_WORK(&ent->work, cmd_work_handler); 858 if (page_queue) { 859 cmd_work_handler(&ent->work); 860 } else if (!queue_work(cmd->wq, &ent->work)) { 861 mlx5_core_warn(dev, "failed to queue work\n"); 862 err = -ENOMEM; 863 goto out_free; 864 } 865 866 if (!callback) { 867 err = wait_func(dev, ent); 868 if (err == -ETIMEDOUT) 869 goto out; 870 871 ds = ent->ts2 - ent->ts1; 872 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); 873 if (op < ARRAY_SIZE(cmd->stats)) { 874 stats = &cmd->stats[op]; 875 spin_lock_irq(&stats->lock); 876 stats->sum += ds; 877 ++stats->n; 878 spin_unlock_irq(&stats->lock); 879 } 880 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 881 "fw exec time for %s is %lld nsec\n", 882 mlx5_command_str(op), (long long)ds); 883 *status = ent->status; 884 free_cmd(ent); 885 } 886 887 return err; 888 889out_free: 890 free_cmd(ent); 891out: 892 return err; 893} 894 895static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) 896{ 897 struct mlx5_cmd_prot_block *block; 898 struct mlx5_cmd_mailbox *next; 899 int copy; 900 901 if (!to || !from) 902 return -ENOMEM; 903 904 copy = min_t(int, size, sizeof(to->first.data)); 905 memcpy(to->first.data, from, copy); 906 size -= copy; 907 from += copy; 908 909 next = to->next; 910 while (size) { 911 if (!next) { 912 /* this is a BUG */ 913 return -ENOMEM; 914 } 915 916 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 917 block = next->buf; 918 memcpy(block->data, from, copy); 919 from += copy; 920 size -= copy; 921 next = next->next; 922 } 923 924 return 0; 925} 926 927static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) 928{ 929 struct mlx5_cmd_prot_block *block; 930 struct mlx5_cmd_mailbox *next; 931 int copy; 932 933 if (!to || !from) 934 return -ENOMEM; 935 936 copy = min_t(int, size, sizeof(from->first.data)); 937 memcpy(to, from->first.data, copy); 938 size -= copy; 939 to += copy; 940 941 next = from->next; 942 while (size) { 943 if (!next) { 944 /* this is a BUG */ 945 return -ENOMEM; 946 } 947 948 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 949 block = next->buf; 950 951 memcpy(to, block->data, copy); 952 to += copy; 953 size -= copy; 954 next = next->next; 955 } 956 957 return 0; 958} 959 960static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, 961 gfp_t flags) 962{ 963 struct mlx5_cmd_mailbox *mailbox; 964 965 mailbox = kmalloc(sizeof(*mailbox), flags); 966 if (!mailbox) 967 return ERR_PTR(-ENOMEM); 968 969 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags, 970 &mailbox->dma); 971 if (!mailbox->buf) { 972 mlx5_core_dbg(dev, "failed allocation\n"); 973 kfree(mailbox); 974 return ERR_PTR(-ENOMEM); 975 } 976 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block)); 977 mailbox->next = NULL; 978 979 return mailbox; 980} 981 982static void free_cmd_box(struct mlx5_core_dev *dev, 983 struct mlx5_cmd_mailbox *mailbox) 984{ 985 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); 986 kfree(mailbox); 987} 988 989static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, 990 gfp_t flags, int size) 991{ 992 struct mlx5_cmd_mailbox *tmp, *head = NULL; 993 struct mlx5_cmd_prot_block *block; 994 struct mlx5_cmd_msg *msg; 995 int blen; 996 int err; 997 int n; 998 int i; 999 1000 msg = kzalloc(sizeof(*msg), flags); 1001 if (!msg) 1002 return ERR_PTR(-ENOMEM); 1003 1004 blen = size - min_t(int, sizeof(msg->first.data), size); 1005 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE; 1006 1007 for (i = 0; i < n; i++) { 1008 tmp = alloc_cmd_box(dev, flags); 1009 if (IS_ERR(tmp)) { 1010 mlx5_core_warn(dev, "failed allocating block\n"); 1011 err = PTR_ERR(tmp); 1012 goto err_alloc; 1013 } 1014 1015 block = tmp->buf; 1016 tmp->next = head; 1017 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); 1018 block->block_num = cpu_to_be32(n - i - 1); 1019 head = tmp; 1020 } 1021 msg->next = head; 1022 msg->len = size; 1023 return msg; 1024 1025err_alloc: 1026 while (head) { 1027 tmp = head->next; 1028 free_cmd_box(dev, head); 1029 head = tmp; 1030 } 1031 kfree(msg); 1032 1033 return ERR_PTR(err); 1034} 1035 1036static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 1037 struct mlx5_cmd_msg *msg) 1038{ 1039 struct mlx5_cmd_mailbox *head = msg->next; 1040 struct mlx5_cmd_mailbox *next; 1041 1042 while (head) { 1043 next = head->next; 1044 free_cmd_box(dev, head); 1045 head = next; 1046 } 1047 kfree(msg); 1048} 1049 1050static void set_wqname(struct mlx5_core_dev *dev) 1051{ 1052 struct mlx5_cmd *cmd = &dev->cmd; 1053 1054 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", 1055 dev_name(&dev->pdev->dev)); 1056} 1057 1058static void clean_debug_files(struct mlx5_core_dev *dev) 1059{ 1060} 1061 1062 1063void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1064{ 1065 struct mlx5_cmd *cmd = &dev->cmd; 1066 int i; 1067 1068 for (i = 0; i < cmd->max_reg_cmds; i++) 1069 down(&cmd->sem); 1070 1071 down(&cmd->pages_sem); 1072 1073 flush_workqueue(cmd->wq); 1074 1075 cmd->mode = CMD_MODE_EVENTS; 1076 1077 up(&cmd->pages_sem); 1078 for (i = 0; i < cmd->max_reg_cmds; i++) 1079 up(&cmd->sem); 1080} 1081 1082void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1083{ 1084 struct mlx5_cmd *cmd = &dev->cmd; 1085 1086 synchronize_irq(dev->priv.eq_table.pages_eq.irqn); 1087 flush_workqueue(dev->priv.pg_wq); 1088 cmd->moving_to_polling = 1; 1089 flush_workqueue(cmd->wq); 1090 cmd->mode = CMD_MODE_POLLING; 1091 cmd->moving_to_polling = 0; 1092} 1093 1094static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1095{ 1096 unsigned long flags; 1097 1098 if (msg->cache) { 1099 spin_lock_irqsave(&msg->cache->lock, flags); 1100 list_add_tail(&msg->list, &msg->cache->head); 1101 spin_unlock_irqrestore(&msg->cache->lock, flags); 1102 } else { 1103 mlx5_free_cmd_msg(dev, msg); 1104 } 1105} 1106 1107void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) 1108{ 1109 struct mlx5_cmd *cmd = &dev->cmd; 1110 struct mlx5_cmd_work_ent *ent; 1111 mlx5_cmd_cbk_t callback; 1112 void *context; 1113 int err; 1114 int i; 1115 s64 ds; 1116 struct mlx5_cmd_stats *stats; 1117 unsigned long flags; 1118 1119 for (i = 0; i < (1 << cmd->log_sz); i++) { 1120 if (test_bit(i, &vector)) { 1121 struct semaphore *sem; 1122 1123 ent = cmd->ent_arr[i]; 1124 if (ent->page_queue) 1125 sem = &cmd->pages_sem; 1126 else 1127 sem = &cmd->sem; 1128 ent->ts2 = ktime_get_ns(); 1129 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); 1130 dump_command(dev, ent, 0); 1131 if (!ent->ret) { 1132 if (!cmd->checksum_disabled) 1133 ent->ret = verify_signature(ent); 1134 else 1135 ent->ret = 0; 1136 ent->status = ent->lay->status_own >> 1; 1137 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", 1138 ent->ret, deliv_status_to_str(ent->status), ent->status); 1139 } 1140 free_ent(cmd, ent->idx); 1141 if (ent->callback) { 1142 ds = ent->ts2 - ent->ts1; 1143 if (ent->op < ARRAY_SIZE(cmd->stats)) { 1144 stats = &cmd->stats[ent->op]; 1145 spin_lock_irqsave(&stats->lock, flags); 1146 stats->sum += ds; 1147 ++stats->n; 1148 spin_unlock_irqrestore(&stats->lock, flags); 1149 } 1150 1151 callback = ent->callback; 1152 context = ent->context; 1153 err = ent->ret; 1154 if (!err) 1155 err = mlx5_copy_from_msg(ent->uout, 1156 ent->out, 1157 ent->uout_size); 1158 1159 mlx5_free_cmd_msg(dev, ent->out); 1160 free_msg(dev, ent->in); 1161 1162 free_cmd(ent); 1163 callback(err, context); 1164 } else { 1165 complete(&ent->done); 1166 } 1167 up(sem); 1168 } 1169 } 1170} 1171EXPORT_SYMBOL(mlx5_cmd_comp_handler); 1172 1173static int status_to_err(u8 status) 1174{ 1175 return status ? -1 : 0; /* TBD more meaningful codes */ 1176} 1177 1178static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, 1179 gfp_t gfp) 1180{ 1181 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1182 struct mlx5_cmd *cmd = &dev->cmd; 1183 struct cache_ent *ent = NULL; 1184 1185 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) 1186 ent = &cmd->cache.large; 1187 else if (in_size > 16 && in_size <= MED_LIST_SIZE) 1188 ent = &cmd->cache.med; 1189 1190 if (ent) { 1191 spin_lock_irq(&ent->lock); 1192 if (!list_empty(&ent->head)) { 1193 msg = list_entry(ent->head.next, struct mlx5_cmd_msg, 1194 list); 1195 /* For cached lists, we must explicitly state what is 1196 * the real size 1197 */ 1198 msg->len = in_size; 1199 list_del(&msg->list); 1200 } 1201 spin_unlock_irq(&ent->lock); 1202 } 1203 1204 if (IS_ERR(msg)) 1205 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); 1206 1207 return msg; 1208} 1209 1210static int is_manage_pages(struct mlx5_inbox_hdr *in) 1211{ 1212 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; 1213} 1214 1215static int cmd_exec_helper(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1216 int out_size, mlx5_cmd_cbk_t callback, void *context) 1217{ 1218 struct mlx5_cmd_msg *inb; 1219 struct mlx5_cmd_msg *outb; 1220 int pages_queue; 1221 gfp_t gfp; 1222 int err; 1223 u8 status = 0; 1224 1225 pages_queue = is_manage_pages(in); 1226 gfp = callback ? GFP_ATOMIC : GFP_KERNEL; 1227 1228 inb = alloc_msg(dev, in_size, gfp); 1229 if (IS_ERR(inb)) { 1230 err = PTR_ERR(inb); 1231 return err; 1232 } 1233 1234 err = mlx5_copy_to_msg(inb, in, in_size); 1235 if (err) { 1236 mlx5_core_warn(dev, "err %d\n", err); 1237 goto out_in; 1238 } 1239 1240 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); 1241 if (IS_ERR(outb)) { 1242 err = PTR_ERR(outb); 1243 goto out_in; 1244 } 1245 1246 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, 1247 pages_queue, &status); 1248 if (err) 1249 goto out_out; 1250 1251 mlx5_core_dbg(dev, "err %d, status %d\n", err, status); 1252 if (status) { 1253 err = status_to_err(status); 1254 goto out_out; 1255 } 1256 1257 if (callback) 1258 return err; 1259 1260 err = mlx5_copy_from_msg(out, outb, out_size); 1261 1262out_out: 1263 mlx5_free_cmd_msg(dev, outb); 1264 1265out_in: 1266 free_msg(dev, inb); 1267 return err; 1268} 1269 1270int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1271 int out_size) 1272{ 1273 return cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL); 1274} 1275EXPORT_SYMBOL(mlx5_cmd_exec); 1276 1277int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 1278 void *out, int out_size, mlx5_cmd_cbk_t callback, 1279 void *context) 1280{ 1281 return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context); 1282} 1283EXPORT_SYMBOL(mlx5_cmd_exec_cb); 1284 1285static void destroy_msg_cache(struct mlx5_core_dev *dev) 1286{ 1287 struct mlx5_cmd *cmd = &dev->cmd; 1288 struct mlx5_cmd_msg *msg; 1289 struct mlx5_cmd_msg *n; 1290 1291 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { 1292 list_del(&msg->list); 1293 mlx5_free_cmd_msg(dev, msg); 1294 } 1295 1296 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { 1297 list_del(&msg->list); 1298 mlx5_free_cmd_msg(dev, msg); 1299 } 1300} 1301 1302static int create_msg_cache(struct mlx5_core_dev *dev) 1303{ 1304 struct mlx5_cmd *cmd = &dev->cmd; 1305 struct mlx5_cmd_msg *msg; 1306 int err; 1307 int i; 1308 1309 spin_lock_init(&cmd->cache.large.lock); 1310 INIT_LIST_HEAD(&cmd->cache.large.head); 1311 spin_lock_init(&cmd->cache.med.lock); 1312 INIT_LIST_HEAD(&cmd->cache.med.head); 1313 1314 for (i = 0; i < NUM_LONG_LISTS; i++) { 1315 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); 1316 if (IS_ERR(msg)) { 1317 err = PTR_ERR(msg); 1318 goto ex_err; 1319 } 1320 msg->cache = &cmd->cache.large; 1321 list_add_tail(&msg->list, &cmd->cache.large.head); 1322 } 1323 1324 for (i = 0; i < NUM_MED_LISTS; i++) { 1325 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); 1326 if (IS_ERR(msg)) { 1327 err = PTR_ERR(msg); 1328 goto ex_err; 1329 } 1330 msg->cache = &cmd->cache.med; 1331 list_add_tail(&msg->list, &cmd->cache.med.head); 1332 } 1333 1334 return 0; 1335 1336ex_err: 1337 destroy_msg_cache(dev); 1338 return err; 1339} 1340 1341static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1342{ 1343 struct device *ddev = &dev->pdev->dev; 1344 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, 1345 &cmd->alloc_dma, GFP_KERNEL); 1346 if (!cmd->cmd_alloc_buf) 1347 return -ENOMEM; 1348 1349 /* make sure it is aligned to 4K */ 1350 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) { 1351 cmd->cmd_buf = cmd->cmd_alloc_buf; 1352 cmd->dma = cmd->alloc_dma; 1353 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE; 1354 return 0; 1355 } 1356 1357 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, cmd->alloc_dma); 1358 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, 2 * MLX5_ADAPTER_PAGE_SIZE - 1, 1359 &cmd->alloc_dma, GFP_KERNEL); 1360 if (!cmd->cmd_alloc_buf) 1361 return -ENOMEM; 1362 1363 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE); 1364 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE); 1365 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1; 1366 return 0; 1367} 1368 1369static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1370{ 1371 struct device *ddev = &dev->pdev->dev; 1372 dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf, cmd->alloc_dma); 1373} 1374 1375int mlx5_cmd_init(struct mlx5_core_dev *dev) 1376{ 1377 int size = sizeof(struct mlx5_cmd_prot_block); 1378 int align = roundup_pow_of_two(size); 1379 struct mlx5_cmd *cmd = &dev->cmd; 1380 u32 cmd_h, cmd_l; 1381 u16 cmd_if_rev; 1382 int err; 1383 int i; 1384 1385 cmd_if_rev = cmdif_rev_get(dev); 1386 if (cmd_if_rev != CMD_IF_REV) { 1387 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev); 1388 return -EINVAL; 1389 } 1390 1391 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0); 1392 if (!cmd->pool) 1393 return -ENOMEM; 1394 1395 err = alloc_cmd_page(dev, cmd); 1396 if (err) 1397 goto err_free_pool; 1398 1399 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; 1400 cmd->log_sz = cmd_l >> 4 & 0xf; 1401 cmd->log_stride = cmd_l & 0xf; 1402 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { 1403 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz); 1404 err = -EINVAL; 1405 goto err_free_page; 1406 } 1407 1408 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { 1409 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n"); 1410 err = -EINVAL; 1411 goto err_free_page; 1412 } 1413 1414 cmd->checksum_disabled = 1; 1415 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1416 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; 1417 1418 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 1419 if (cmd->cmdif_rev > CMD_IF_REV) { 1420 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev); 1421 err = -ENOTSUPP; 1422 goto err_free_page; 1423 } 1424 1425 spin_lock_init(&cmd->alloc_lock); 1426 spin_lock_init(&cmd->token_lock); 1427 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) 1428 spin_lock_init(&cmd->stats[i].lock); 1429 1430 sema_init(&cmd->sem, cmd->max_reg_cmds); 1431 sema_init(&cmd->pages_sem, 1); 1432 1433 cmd_h = (u32)((u64)(cmd->dma) >> 32); 1434 cmd_l = (u32)(cmd->dma); 1435 if (cmd_l & 0xfff) { 1436 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n"); 1437 err = -ENOMEM; 1438 goto err_free_page; 1439 } 1440 1441 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); 1442 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); 1443 1444 /* Make sure firmware sees the complete address before we proceed */ 1445 wmb(); 1446 1447 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 1448 1449 cmd->mode = CMD_MODE_POLLING; 1450 1451 err = create_msg_cache(dev); 1452 if (err) { 1453 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n"); 1454 goto err_free_page; 1455 } 1456 1457 set_wqname(dev); 1458 cmd->wq = create_singlethread_workqueue(cmd->wq_name); 1459 if (!cmd->wq) { 1460 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command workqueue\n"); 1461 err = -ENOMEM; 1462 goto err_cache; 1463 } 1464 1465 return 0; 1466 1467err_cache: 1468 destroy_msg_cache(dev); 1469 1470err_free_page: 1471 free_cmd_page(dev, cmd); 1472 1473err_free_pool: 1474 pci_pool_destroy(cmd->pool); 1475 1476 return err; 1477} 1478EXPORT_SYMBOL(mlx5_cmd_init); 1479 1480void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) 1481{ 1482 struct mlx5_cmd *cmd = &dev->cmd; 1483 1484 clean_debug_files(dev); 1485 destroy_workqueue(cmd->wq); 1486 destroy_msg_cache(dev); 1487 free_cmd_page(dev, cmd); 1488 pci_pool_destroy(cmd->pool); 1489} 1490EXPORT_SYMBOL(mlx5_cmd_cleanup); 1491 1492static const char *cmd_status_str(u8 status) 1493{ 1494 switch (status) { 1495 case MLX5_CMD_STAT_OK: 1496 return "OK"; 1497 case MLX5_CMD_STAT_INT_ERR: 1498 return "internal error"; 1499 case MLX5_CMD_STAT_BAD_OP_ERR: 1500 return "bad operation"; 1501 case MLX5_CMD_STAT_BAD_PARAM_ERR: 1502 return "bad parameter"; 1503 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: 1504 return "bad system state"; 1505 case MLX5_CMD_STAT_BAD_RES_ERR: 1506 return "bad resource"; 1507 case MLX5_CMD_STAT_RES_BUSY: 1508 return "resource busy"; 1509 case MLX5_CMD_STAT_LIM_ERR: 1510 return "limits exceeded"; 1511 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: 1512 return "bad resource state"; 1513 case MLX5_CMD_STAT_IX_ERR: 1514 return "bad index"; 1515 case MLX5_CMD_STAT_NO_RES_ERR: 1516 return "no resources"; 1517 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: 1518 return "bad input length"; 1519 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: 1520 return "bad output length"; 1521 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: 1522 return "bad QP state"; 1523 case MLX5_CMD_STAT_BAD_PKT_ERR: 1524 return "bad packet (discarded)"; 1525 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: 1526 return "bad size too many outstanding CQEs"; 1527 default: 1528 return "unknown status"; 1529 } 1530} 1531 1532static int cmd_status_to_err_helper(u8 status) 1533{ 1534 switch (status) { 1535 case MLX5_CMD_STAT_OK: return 0; 1536 case MLX5_CMD_STAT_INT_ERR: return -EIO; 1537 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 1538 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; 1539 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 1540 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 1541 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 1542 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; 1543 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 1544 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 1545 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 1546 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; 1547 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; 1548 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; 1549 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; 1550 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; 1551 default: return -EIO; 1552 } 1553} 1554 1555/* this will be available till all the commands use set/get macros */ 1556int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) 1557{ 1558 if (!hdr->status) 1559 return 0; 1560 1561 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(hdr->status), hdr->status, be32_to_cpu(hdr->syndrome)); 1562 1563 return cmd_status_to_err_helper(hdr->status); 1564} 1565 1566int mlx5_cmd_status_to_err_v2(void *ptr) 1567{ 1568 u32 syndrome; 1569 u8 status; 1570 1571 status = be32_to_cpu(*(__be32 *)ptr) >> 24; 1572 if (!status) 1573 return 0; 1574 1575 syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); 1576 1577 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(status), status, syndrome); 1578 1579 return cmd_status_to_err_helper(status); 1580} 1581