1/* 2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved. 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 * 33 */ 34 35 36#include <linux/module.h> 37#include <linux/moduleparam.h> 38#include <linux/pci.h> 39#include <linux/netdevice.h> 40#include <linux/etherdevice.h> 41#include <linux/delay.h> 42#include <linux/ethtool.h> 43#include <linux/mii.h> 44#include <linux/if_vlan.h> 45#include <linux/crc32.h> 46#include <linux/in.h> 47#include <linux/ip.h> 48#include <linux/tcp.h> 49#include <linux/init.h> 50#include <linux/dma-mapping.h> 51#include <linux/mm.h> 52#include <linux/inet.h> 53#include <linux/vmalloc.h> 54#include <linux/slab.h> 55 56#include <linux/route.h> 57 58#include <asm/io.h> 59#include <asm/irq.h> 60#include <asm/byteorder.h> 61#include <rdma/ib_smi.h> 62#include "c2.h" 63#include "c2_vq.h" 64 65/* Device capabilities */ 66#define C2_MIN_PAGESIZE 1024 67 68#define C2_MAX_MRS 32768 69#define C2_MAX_QPS 16000 70#define C2_MAX_WQE_SZ 256 71#define C2_MAX_QP_WR ((128*1024)/C2_MAX_WQE_SZ) 72#define C2_MAX_SGES 4 73#define C2_MAX_SGE_RD 1 74#define C2_MAX_CQS 32768 75#define C2_MAX_CQES 4096 76#define C2_MAX_PDS 16384 77 78/* 79 * Send the adapter INIT message to the amso1100 80 */ 81static int c2_adapter_init(struct c2_dev *c2dev) 82{ 83 struct c2wr_init_req wr; 84 int err; 85 86 memset(&wr, 0, sizeof(wr)); 87 c2_wr_set_id(&wr, CCWR_INIT); 88 wr.hdr.context = 0; 89 wr.hint_count = cpu_to_be64(c2dev->hint_count_dma); 90 wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma); 91 wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma); 92 wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma); 93 wr.q2_host_shared = cpu_to_be64(c2dev->aeq.shared_dma); 94 wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma); 95 96 /* Post the init message */ 97 err = vq_send_wr(c2dev, (union c2wr *) & wr); 98 99 return err; 100} 101 102/* 103 * Send the adapter TERM message to the amso1100 104 */ 105static void c2_adapter_term(struct c2_dev *c2dev) 106{ 107 struct c2wr_init_req wr; 108 109 memset(&wr, 0, sizeof(wr)); 110 c2_wr_set_id(&wr, CCWR_TERM); 111 wr.hdr.context = 0; 112 113 /* Post the init message */ 114 vq_send_wr(c2dev, (union c2wr *) & wr); 115 c2dev->init = 0; 116 117 return; 118} 119 120/* 121 * Query the adapter 122 */ 123static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props) 124{ 125 struct c2_vq_req *vq_req; 126 struct c2wr_rnic_query_req wr; 127 struct c2wr_rnic_query_rep *reply; 128 int err; 129 130 vq_req = vq_req_alloc(c2dev); 131 if (!vq_req) 132 return -ENOMEM; 133 134 c2_wr_set_id(&wr, CCWR_RNIC_QUERY); 135 wr.hdr.context = (unsigned long) vq_req; 136 wr.rnic_handle = c2dev->adapter_handle; 137 138 vq_req_get(c2dev, vq_req); 139 140 err = vq_send_wr(c2dev, (union c2wr *) &wr); 141 if (err) { 142 vq_req_put(c2dev, vq_req); 143 goto bail1; 144 } 145 146 err = vq_wait_for_reply(c2dev, vq_req); 147 if (err) 148 goto bail1; 149 150 reply = 151 (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg); 152 if (!reply) 153 err = -ENOMEM; 154 else 155 err = c2_errno(reply); 156 if (err) 157 goto bail2; 158 159 props->fw_ver = 160 ((u64)be32_to_cpu(reply->fw_ver_major) << 32) | 161 ((be32_to_cpu(reply->fw_ver_minor) & 0xFFFF) << 16) | 162 (be32_to_cpu(reply->fw_ver_patch) & 0xFFFF); 163 memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6); 164 props->max_mr_size = 0xFFFFFFFF; 165 props->page_size_cap = ~(C2_MIN_PAGESIZE-1); 166 props->vendor_id = be32_to_cpu(reply->vendor_id); 167 props->vendor_part_id = be32_to_cpu(reply->part_number); 168 props->hw_ver = be32_to_cpu(reply->hw_version); 169 props->max_qp = be32_to_cpu(reply->max_qps); 170 props->max_qp_wr = be32_to_cpu(reply->max_qp_depth); 171 props->device_cap_flags = c2dev->device_cap_flags; 172 props->max_sge = C2_MAX_SGES; 173 props->max_sge_rd = C2_MAX_SGE_RD; 174 props->max_cq = be32_to_cpu(reply->max_cqs); 175 props->max_cqe = be32_to_cpu(reply->max_cq_depth); 176 props->max_mr = be32_to_cpu(reply->max_mrs); 177 props->max_pd = be32_to_cpu(reply->max_pds); 178 props->max_qp_rd_atom = be32_to_cpu(reply->max_qp_ird); 179 props->max_ee_rd_atom = 0; 180 props->max_res_rd_atom = be32_to_cpu(reply->max_global_ird); 181 props->max_qp_init_rd_atom = be32_to_cpu(reply->max_qp_ord); 182 props->max_ee_init_rd_atom = 0; 183 props->atomic_cap = IB_ATOMIC_NONE; 184 props->max_ee = 0; 185 props->max_rdd = 0; 186 props->max_mw = be32_to_cpu(reply->max_mws); 187 props->max_raw_ipv6_qp = 0; 188 props->max_raw_ethy_qp = 0; 189 props->max_mcast_grp = 0; 190 props->max_mcast_qp_attach = 0; 191 props->max_total_mcast_qp_attach = 0; 192 props->max_ah = 0; 193 props->max_fmr = 0; 194 props->max_map_per_fmr = 0; 195 props->max_srq = 0; 196 props->max_srq_wr = 0; 197 props->max_srq_sge = 0; 198 props->max_pkeys = 0; 199 props->local_ca_ack_delay = 0; 200 201 bail2: 202 vq_repbuf_free(c2dev, reply); 203 204 bail1: 205 vq_req_free(c2dev, vq_req); 206 return err; 207} 208 209/* 210 * Add an IP address to the RNIC interface 211 */ 212int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask) 213{ 214 struct c2_vq_req *vq_req; 215 struct c2wr_rnic_setconfig_req *wr; 216 struct c2wr_rnic_setconfig_rep *reply; 217 struct c2_netaddr netaddr; 218 int err, len; 219 220 vq_req = vq_req_alloc(c2dev); 221 if (!vq_req) 222 return -ENOMEM; 223 224 len = sizeof(struct c2_netaddr); 225 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); 226 if (!wr) { 227 err = -ENOMEM; 228 goto bail0; 229 } 230 231 c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG); 232 wr->hdr.context = (unsigned long) vq_req; 233 wr->rnic_handle = c2dev->adapter_handle; 234 wr->option = cpu_to_be32(C2_CFG_ADD_ADDR); 235 236 netaddr.ip_addr = inaddr; 237 netaddr.netmask = inmask; 238 netaddr.mtu = 0; 239 240 memcpy(wr->data, &netaddr, len); 241 242 vq_req_get(c2dev, vq_req); 243 244 err = vq_send_wr(c2dev, (union c2wr *) wr); 245 if (err) { 246 vq_req_put(c2dev, vq_req); 247 goto bail1; 248 } 249 250 err = vq_wait_for_reply(c2dev, vq_req); 251 if (err) 252 goto bail1; 253 254 reply = 255 (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg); 256 if (!reply) { 257 err = -ENOMEM; 258 goto bail1; 259 } 260 261 err = c2_errno(reply); 262 vq_repbuf_free(c2dev, reply); 263 264 bail1: 265 kfree(wr); 266 bail0: 267 vq_req_free(c2dev, vq_req); 268 return err; 269} 270 271/* 272 * Delete an IP address from the RNIC interface 273 */ 274int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask) 275{ 276 struct c2_vq_req *vq_req; 277 struct c2wr_rnic_setconfig_req *wr; 278 struct c2wr_rnic_setconfig_rep *reply; 279 struct c2_netaddr netaddr; 280 int err, len; 281 282 vq_req = vq_req_alloc(c2dev); 283 if (!vq_req) 284 return -ENOMEM; 285 286 len = sizeof(struct c2_netaddr); 287 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); 288 if (!wr) { 289 err = -ENOMEM; 290 goto bail0; 291 } 292 293 c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG); 294 wr->hdr.context = (unsigned long) vq_req; 295 wr->rnic_handle = c2dev->adapter_handle; 296 wr->option = cpu_to_be32(C2_CFG_DEL_ADDR); 297 298 netaddr.ip_addr = inaddr; 299 netaddr.netmask = inmask; 300 netaddr.mtu = 0; 301 302 memcpy(wr->data, &netaddr, len); 303 304 vq_req_get(c2dev, vq_req); 305 306 err = vq_send_wr(c2dev, (union c2wr *) wr); 307 if (err) { 308 vq_req_put(c2dev, vq_req); 309 goto bail1; 310 } 311 312 err = vq_wait_for_reply(c2dev, vq_req); 313 if (err) 314 goto bail1; 315 316 reply = 317 (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg); 318 if (!reply) { 319 err = -ENOMEM; 320 goto bail1; 321 } 322 323 err = c2_errno(reply); 324 vq_repbuf_free(c2dev, reply); 325 326 bail1: 327 kfree(wr); 328 bail0: 329 vq_req_free(c2dev, vq_req); 330 return err; 331} 332 333/* 334 * Open a single RNIC instance to use with all 335 * low level openib calls 336 */ 337static int c2_rnic_open(struct c2_dev *c2dev) 338{ 339 struct c2_vq_req *vq_req; 340 union c2wr wr; 341 struct c2wr_rnic_open_rep *reply; 342 int err; 343 344 vq_req = vq_req_alloc(c2dev); 345 if (vq_req == NULL) { 346 return -ENOMEM; 347 } 348 349 memset(&wr, 0, sizeof(wr)); 350 c2_wr_set_id(&wr, CCWR_RNIC_OPEN); 351 wr.rnic_open.req.hdr.context = (unsigned long) (vq_req); 352 wr.rnic_open.req.flags = cpu_to_be16(RNIC_PRIV_MODE); 353 wr.rnic_open.req.port_num = cpu_to_be16(0); 354 wr.rnic_open.req.user_context = (unsigned long) c2dev; 355 356 vq_req_get(c2dev, vq_req); 357 358 err = vq_send_wr(c2dev, &wr); 359 if (err) { 360 vq_req_put(c2dev, vq_req); 361 goto bail0; 362 } 363 364 err = vq_wait_for_reply(c2dev, vq_req); 365 if (err) { 366 goto bail0; 367 } 368 369 reply = (struct c2wr_rnic_open_rep *) (unsigned long) (vq_req->reply_msg); 370 if (!reply) { 371 err = -ENOMEM; 372 goto bail0; 373 } 374 375 if ((err = c2_errno(reply)) != 0) { 376 goto bail1; 377 } 378 379 c2dev->adapter_handle = reply->rnic_handle; 380 381 bail1: 382 vq_repbuf_free(c2dev, reply); 383 bail0: 384 vq_req_free(c2dev, vq_req); 385 return err; 386} 387 388/* 389 * Close the RNIC instance 390 */ 391static int c2_rnic_close(struct c2_dev *c2dev) 392{ 393 struct c2_vq_req *vq_req; 394 union c2wr wr; 395 struct c2wr_rnic_close_rep *reply; 396 int err; 397 398 vq_req = vq_req_alloc(c2dev); 399 if (vq_req == NULL) { 400 return -ENOMEM; 401 } 402 403 memset(&wr, 0, sizeof(wr)); 404 c2_wr_set_id(&wr, CCWR_RNIC_CLOSE); 405 wr.rnic_close.req.hdr.context = (unsigned long) vq_req; 406 wr.rnic_close.req.rnic_handle = c2dev->adapter_handle; 407 408 vq_req_get(c2dev, vq_req); 409 410 err = vq_send_wr(c2dev, &wr); 411 if (err) { 412 vq_req_put(c2dev, vq_req); 413 goto bail0; 414 } 415 416 err = vq_wait_for_reply(c2dev, vq_req); 417 if (err) { 418 goto bail0; 419 } 420 421 reply = (struct c2wr_rnic_close_rep *) (unsigned long) (vq_req->reply_msg); 422 if (!reply) { 423 err = -ENOMEM; 424 goto bail0; 425 } 426 427 if ((err = c2_errno(reply)) != 0) { 428 goto bail1; 429 } 430 431 c2dev->adapter_handle = 0; 432 433 bail1: 434 vq_repbuf_free(c2dev, reply); 435 bail0: 436 vq_req_free(c2dev, vq_req); 437 return err; 438} 439 440/* 441 * Called by c2_probe to initialize the RNIC. This principally 442 * involves initalizing the various limits and resouce pools that 443 * comprise the RNIC instance. 444 */ 445int __devinit c2_rnic_init(struct c2_dev *c2dev) 446{ 447 int err; 448 u32 qsize, msgsize; 449 void *q1_pages; 450 void *q2_pages; 451 void __iomem *mmio_regs; 452 453 /* Device capabilities */ 454 c2dev->device_cap_flags = 455 (IB_DEVICE_RESIZE_MAX_WR | 456 IB_DEVICE_CURR_QP_STATE_MOD | 457 IB_DEVICE_SYS_IMAGE_GUID | 458 IB_DEVICE_LOCAL_DMA_LKEY | 459 IB_DEVICE_MEM_WINDOW); 460 461 /* Allocate the qptr_array */ 462 c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *)); 463 if (!c2dev->qptr_array) { 464 return -ENOMEM; 465 } 466 467 /* Inialize the qptr_array */ 468 memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *)); 469 c2dev->qptr_array[0] = (void *) &c2dev->req_vq; 470 c2dev->qptr_array[1] = (void *) &c2dev->rep_vq; 471 c2dev->qptr_array[2] = (void *) &c2dev->aeq; 472 473 /* Initialize data structures */ 474 init_waitqueue_head(&c2dev->req_vq_wo); 475 spin_lock_init(&c2dev->vqlock); 476 spin_lock_init(&c2dev->lock); 477 478 /* Allocate MQ shared pointer pool for kernel clients. User 479 * mode client pools are hung off the user context 480 */ 481 err = c2_init_mqsp_pool(c2dev, GFP_KERNEL, &c2dev->kern_mqsp_pool); 482 if (err) { 483 goto bail0; 484 } 485 486 /* Allocate shared pointers for Q0, Q1, and Q2 from 487 * the shared pointer pool. 488 */ 489 490 c2dev->hint_count = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, 491 &c2dev->hint_count_dma, 492 GFP_KERNEL); 493 c2dev->req_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, 494 &c2dev->req_vq.shared_dma, 495 GFP_KERNEL); 496 c2dev->rep_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, 497 &c2dev->rep_vq.shared_dma, 498 GFP_KERNEL); 499 c2dev->aeq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, 500 &c2dev->aeq.shared_dma, GFP_KERNEL); 501 if (!c2dev->hint_count || !c2dev->req_vq.shared || 502 !c2dev->rep_vq.shared || !c2dev->aeq.shared) { 503 err = -ENOMEM; 504 goto bail1; 505 } 506 507 mmio_regs = c2dev->kva; 508 /* Initialize the Verbs Request Queue */ 509 c2_mq_req_init(&c2dev->req_vq, 0, 510 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)), 511 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)), 512 mmio_regs + 513 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)), 514 mmio_regs + 515 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)), 516 C2_MQ_ADAPTER_TARGET); 517 518 /* Initialize the Verbs Reply Queue */ 519 qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE)); 520 msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); 521 q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, 522 &c2dev->rep_vq.host_dma, GFP_KERNEL); 523 if (!q1_pages) { 524 err = -ENOMEM; 525 goto bail1; 526 } 527 dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); 528 pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages, 529 (unsigned long long) c2dev->rep_vq.host_dma); 530 c2_mq_rep_init(&c2dev->rep_vq, 531 1, 532 qsize, 533 msgsize, 534 q1_pages, 535 mmio_regs + 536 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)), 537 C2_MQ_HOST_TARGET); 538 539 /* Initialize the Asynchronus Event Queue */ 540 qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE)); 541 msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); 542 q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, 543 &c2dev->aeq.host_dma, GFP_KERNEL); 544 if (!q2_pages) { 545 err = -ENOMEM; 546 goto bail2; 547 } 548 dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); 549 pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages, 550 (unsigned long long) c2dev->aeq.host_dma); 551 c2_mq_rep_init(&c2dev->aeq, 552 2, 553 qsize, 554 msgsize, 555 q2_pages, 556 mmio_regs + 557 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)), 558 C2_MQ_HOST_TARGET); 559 560 /* Initialize the verbs request allocator */ 561 err = vq_init(c2dev); 562 if (err) 563 goto bail3; 564 565 /* Enable interrupts on the adapter */ 566 writel(0, c2dev->regs + C2_IDIS); 567 568 /* create the WR init message */ 569 err = c2_adapter_init(c2dev); 570 if (err) 571 goto bail4; 572 c2dev->init++; 573 574 /* open an adapter instance */ 575 err = c2_rnic_open(c2dev); 576 if (err) 577 goto bail4; 578 579 /* Initialize cached the adapter limits */ 580 if (c2_rnic_query(c2dev, &c2dev->props)) 581 goto bail5; 582 583 /* Initialize the PD pool */ 584 err = c2_init_pd_table(c2dev); 585 if (err) 586 goto bail5; 587 588 /* Initialize the QP pool */ 589 c2_init_qp_table(c2dev); 590 return 0; 591 592 bail5: 593 c2_rnic_close(c2dev); 594 bail4: 595 vq_term(c2dev); 596 bail3: 597 dma_free_coherent(&c2dev->pcidev->dev, 598 c2dev->aeq.q_size * c2dev->aeq.msg_size, 599 q2_pages, dma_unmap_addr(&c2dev->aeq, mapping)); 600 bail2: 601 dma_free_coherent(&c2dev->pcidev->dev, 602 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 603 q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping)); 604 bail1: 605 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 606 bail0: 607 vfree(c2dev->qptr_array); 608 609 return err; 610} 611 612/* 613 * Called by c2_remove to cleanup the RNIC resources. 614 */ 615void __devexit c2_rnic_term(struct c2_dev *c2dev) 616{ 617 618 /* Close the open adapter instance */ 619 c2_rnic_close(c2dev); 620 621 /* Send the TERM message to the adapter */ 622 c2_adapter_term(c2dev); 623 624 /* Disable interrupts on the adapter */ 625 writel(1, c2dev->regs + C2_IDIS); 626 627 /* Free the QP pool */ 628 c2_cleanup_qp_table(c2dev); 629 630 /* Free the PD pool */ 631 c2_cleanup_pd_table(c2dev); 632 633 /* Free the verbs request allocator */ 634 vq_term(c2dev); 635 636 /* Free the asynchronus event queue */ 637 dma_free_coherent(&c2dev->pcidev->dev, 638 c2dev->aeq.q_size * c2dev->aeq.msg_size, 639 c2dev->aeq.msg_pool.host, 640 dma_unmap_addr(&c2dev->aeq, mapping)); 641 642 /* Free the verbs reply queue */ 643 dma_free_coherent(&c2dev->pcidev->dev, 644 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 645 c2dev->rep_vq.msg_pool.host, 646 dma_unmap_addr(&c2dev->rep_vq, mapping)); 647 648 /* Free the MQ shared pointer pool */ 649 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 650 651 /* Free the qptr_array */ 652 vfree(c2dev->qptr_array); 653 654 return; 655} 656