1/*- 2 * Copyright (c) 2009-2012 Microsoft Corp. 3 * Copyright (c) 2012 NetApp Inc. 4 * Copyright (c) 2012 Citrix Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: releng/10.3/sys/dev/hyperv/vmbus/hv_channel_mgmt.c 303984 2016-08-12 04:01:16Z glebius $"); 31 32#include <sys/param.h> 33#include <sys/mbuf.h> 34 35#include "hv_vmbus_priv.h" 36 37/* 38 * Internal functions 39 */ 40 41static void vmbus_channel_on_offer(hv_vmbus_channel_msg_header* hdr); 42static void vmbus_channel_on_open_result(hv_vmbus_channel_msg_header* hdr); 43static void vmbus_channel_on_offer_rescind(hv_vmbus_channel_msg_header* hdr); 44static void vmbus_channel_on_gpadl_created(hv_vmbus_channel_msg_header* hdr); 45static void vmbus_channel_on_gpadl_torndown(hv_vmbus_channel_msg_header* hdr); 46static void vmbus_channel_on_offers_delivered(hv_vmbus_channel_msg_header* hdr); 47static void vmbus_channel_on_version_response(hv_vmbus_channel_msg_header* hdr); 48 49/** 50 * Channel message dispatch table 51 */ 52hv_vmbus_channel_msg_table_entry 53 g_channel_message_table[HV_CHANNEL_MESSAGE_COUNT] = { 54 { HV_CHANNEL_MESSAGE_INVALID, 55 0, NULL }, 56 { HV_CHANNEL_MESSAGE_OFFER_CHANNEL, 57 0, vmbus_channel_on_offer }, 58 { HV_CHANNEL_MESSAGE_RESCIND_CHANNEL_OFFER, 59 0, vmbus_channel_on_offer_rescind }, 60 { HV_CHANNEL_MESSAGE_REQUEST_OFFERS, 61 0, NULL }, 62 { HV_CHANNEL_MESSAGE_ALL_OFFERS_DELIVERED, 63 1, vmbus_channel_on_offers_delivered }, 64 { HV_CHANNEL_MESSAGE_OPEN_CHANNEL, 65 0, NULL }, 66 { HV_CHANNEL_MESSAGE_OPEN_CHANNEL_RESULT, 67 1, vmbus_channel_on_open_result }, 68 { HV_CHANNEL_MESSAGE_CLOSE_CHANNEL, 69 0, NULL }, 70 { HV_CHANNEL_MESSAGEL_GPADL_HEADER, 71 0, NULL }, 72 { HV_CHANNEL_MESSAGE_GPADL_BODY, 73 0, NULL }, 74 { HV_CHANNEL_MESSAGE_GPADL_CREATED, 75 1, vmbus_channel_on_gpadl_created }, 76 { HV_CHANNEL_MESSAGE_GPADL_TEARDOWN, 77 0, NULL }, 78 { HV_CHANNEL_MESSAGE_GPADL_TORNDOWN, 79 1, vmbus_channel_on_gpadl_torndown }, 80 { HV_CHANNEL_MESSAGE_REL_ID_RELEASED, 81 0, NULL }, 82 { HV_CHANNEL_MESSAGE_INITIATED_CONTACT, 83 0, NULL }, 84 { HV_CHANNEL_MESSAGE_VERSION_RESPONSE, 85 1, vmbus_channel_on_version_response }, 86 { HV_CHANNEL_MESSAGE_UNLOAD, 87 0, NULL } 88}; 89 90 91/** 92 * Implementation of the work abstraction. 93 */ 94static void 95work_item_callback(void *work, int pending) 96{ 97 struct hv_work_item *w = (struct hv_work_item *)work; 98 99 /* 100 * Serialize work execution. 101 */ 102 if (w->wq->work_sema != NULL) { 103 sema_wait(w->wq->work_sema); 104 } 105 106 w->callback(w->context); 107 108 if (w->wq->work_sema != NULL) { 109 sema_post(w->wq->work_sema); 110 } 111 112 free(w, M_DEVBUF); 113} 114 115struct hv_work_queue* 116hv_work_queue_create(char* name) 117{ 118 static unsigned int qid = 0; 119 char qname[64]; 120 int pri; 121 struct hv_work_queue* wq; 122 123 wq = malloc(sizeof(struct hv_work_queue), M_DEVBUF, M_NOWAIT | M_ZERO); 124 KASSERT(wq != NULL, ("Error VMBUS: Failed to allocate work_queue\n")); 125 if (wq == NULL) 126 return (NULL); 127 128 /* 129 * We use work abstraction to handle messages 130 * coming from the host and these are typically offers. 131 * Some FreeBsd drivers appear to have a concurrency issue 132 * where probe/attach needs to be serialized. We ensure that 133 * by having only one thread process work elements in a 134 * specific queue by serializing work execution. 135 * 136 */ 137 if (strcmp(name, "vmbusQ") == 0) { 138 pri = PI_DISK; 139 } else { /* control */ 140 pri = PI_NET; 141 /* 142 * Initialize semaphore for this queue by pointing 143 * to the globale semaphore used for synchronizing all 144 * control messages. 145 */ 146 wq->work_sema = &hv_vmbus_g_connection.control_sema; 147 } 148 149 sprintf(qname, "hv_%s_%u", name, qid); 150 151 /* 152 * Fixme: FreeBSD 8.2 has a different prototype for 153 * taskqueue_create(), and for certain other taskqueue functions. 154 * We need to research the implications of these changes. 155 * Fixme: Not sure when the changes were introduced. 156 */ 157 wq->queue = taskqueue_create(qname, M_NOWAIT, taskqueue_thread_enqueue, 158 &wq->queue 159 #if __FreeBSD_version < 800000 160 , &wq->proc 161 #endif 162 ); 163 164 if (wq->queue == NULL) { 165 free(wq, M_DEVBUF); 166 return (NULL); 167 } 168 169 if (taskqueue_start_threads(&wq->queue, 1, pri, "%s taskq", qname)) { 170 taskqueue_free(wq->queue); 171 free(wq, M_DEVBUF); 172 return (NULL); 173 } 174 175 qid++; 176 177 return (wq); 178} 179 180void 181hv_work_queue_close(struct hv_work_queue *wq) 182{ 183 /* 184 * KYS: Need to drain the taskqueue 185 * before we close the hv_work_queue. 186 */ 187 /*KYS: taskqueue_drain(wq->tq, ); */ 188 taskqueue_free(wq->queue); 189 free(wq, M_DEVBUF); 190} 191 192/** 193 * @brief Create work item 194 */ 195int 196hv_queue_work_item( 197 struct hv_work_queue *wq, 198 void (*callback)(void *), void *context) 199{ 200 struct hv_work_item *w = malloc(sizeof(struct hv_work_item), 201 M_DEVBUF, M_NOWAIT | M_ZERO); 202 KASSERT(w != NULL, ("Error VMBUS: Failed to allocate WorkItem\n")); 203 if (w == NULL) 204 return (ENOMEM); 205 206 w->callback = callback; 207 w->context = context; 208 w->wq = wq; 209 210 TASK_INIT(&w->work, 0, work_item_callback, w); 211 212 return (taskqueue_enqueue(wq->queue, &w->work)); 213} 214 215 216/** 217 * @brief Allocate and initialize a vmbus channel object 218 */ 219hv_vmbus_channel* 220hv_vmbus_allocate_channel(void) 221{ 222 hv_vmbus_channel* channel; 223 224 channel = (hv_vmbus_channel*) malloc( 225 sizeof(hv_vmbus_channel), 226 M_DEVBUF, 227 M_NOWAIT | M_ZERO); 228 KASSERT(channel != NULL, ("Error VMBUS: Failed to allocate channel!")); 229 if (channel == NULL) 230 return (NULL); 231 232 mtx_init(&channel->inbound_lock, "channel inbound", NULL, MTX_DEF); 233 mtx_init(&channel->sc_lock, "vmbus multi channel", NULL, MTX_DEF); 234 235 TAILQ_INIT(&channel->sc_list_anchor); 236 237 return (channel); 238} 239 240/** 241 * @brief Release the vmbus channel object itself 242 */ 243static inline void 244ReleaseVmbusChannel(void *context) 245{ 246 hv_vmbus_channel* channel = (hv_vmbus_channel*) context; 247 free(channel, M_DEVBUF); 248} 249 250/** 251 * @brief Release the resources used by the vmbus channel object 252 */ 253void 254hv_vmbus_free_vmbus_channel(hv_vmbus_channel* channel) 255{ 256 mtx_destroy(&channel->sc_lock); 257 mtx_destroy(&channel->inbound_lock); 258 /* 259 * We have to release the channel's workqueue/thread in 260 * the vmbus's workqueue/thread context 261 * ie we can't destroy ourselves 262 */ 263 hv_queue_work_item(hv_vmbus_g_connection.work_queue, 264 ReleaseVmbusChannel, (void *) channel); 265} 266 267/** 268 * @brief Process the offer by creating a channel/device 269 * associated with this offer 270 */ 271static void 272vmbus_channel_process_offer(hv_vmbus_channel *new_channel) 273{ 274 boolean_t f_new; 275 hv_vmbus_channel* channel; 276 int ret; 277 uint32_t relid; 278 279 f_new = TRUE; 280 channel = NULL; 281 relid = new_channel->offer_msg.child_rel_id; 282 /* 283 * Make sure this is a new offer 284 */ 285 mtx_lock(&hv_vmbus_g_connection.channel_lock); 286 hv_vmbus_g_connection.channels[relid] = new_channel; 287 288 TAILQ_FOREACH(channel, &hv_vmbus_g_connection.channel_anchor, 289 list_entry) 290 { 291 if (memcmp(&channel->offer_msg.offer.interface_type, 292 &new_channel->offer_msg.offer.interface_type, 293 sizeof(hv_guid)) == 0 && 294 memcmp(&channel->offer_msg.offer.interface_instance, 295 &new_channel->offer_msg.offer.interface_instance, 296 sizeof(hv_guid)) == 0) { 297 f_new = FALSE; 298 break; 299 } 300 } 301 302 if (f_new) { 303 /* Insert at tail */ 304 TAILQ_INSERT_TAIL( 305 &hv_vmbus_g_connection.channel_anchor, 306 new_channel, 307 list_entry); 308 } 309 mtx_unlock(&hv_vmbus_g_connection.channel_lock); 310 311 /*XXX add new channel to percpu_list */ 312 313 if (!f_new) { 314 /* 315 * Check if this is a sub channel. 316 */ 317 if (new_channel->offer_msg.offer.sub_channel_index != 0) { 318 /* 319 * It is a sub channel offer, process it. 320 */ 321 new_channel->primary_channel = channel; 322 mtx_lock(&channel->sc_lock); 323 TAILQ_INSERT_TAIL( 324 &channel->sc_list_anchor, 325 new_channel, 326 sc_list_entry); 327 mtx_unlock(&channel->sc_lock); 328 329 /* Insert new channel into channel_anchor. */ 330 printf("VMBUS get multi-channel offer, rel=%u,sub=%u\n", 331 new_channel->offer_msg.child_rel_id, 332 new_channel->offer_msg.offer.sub_channel_index); 333 mtx_lock(&hv_vmbus_g_connection.channel_lock); 334 TAILQ_INSERT_TAIL(&hv_vmbus_g_connection.channel_anchor, 335 new_channel, list_entry); 336 mtx_unlock(&hv_vmbus_g_connection.channel_lock); 337 338 if(bootverbose) 339 printf("VMBUS: new multi-channel offer <%p>, " 340 "its primary channel is <%p>.\n", 341 new_channel, new_channel->primary_channel); 342 343 /*XXX add it to percpu_list */ 344 345 new_channel->state = HV_CHANNEL_OPEN_STATE; 346 if (channel->sc_creation_callback != NULL) { 347 channel->sc_creation_callback(new_channel); 348 } 349 return; 350 } 351 352 hv_vmbus_free_vmbus_channel(new_channel); 353 return; 354 } 355 356 new_channel->state = HV_CHANNEL_OPEN_STATE; 357 358 /* 359 * Start the process of binding this offer to the driver 360 * (We need to set the device field before calling 361 * hv_vmbus_child_device_add()) 362 */ 363 new_channel->device = hv_vmbus_child_device_create( 364 new_channel->offer_msg.offer.interface_type, 365 new_channel->offer_msg.offer.interface_instance, new_channel); 366 367 /* 368 * Add the new device to the bus. This will kick off device-driver 369 * binding which eventually invokes the device driver's AddDevice() 370 * method. 371 */ 372 ret = hv_vmbus_child_device_register(new_channel->device); 373 if (ret != 0) { 374 mtx_lock(&hv_vmbus_g_connection.channel_lock); 375 TAILQ_REMOVE( 376 &hv_vmbus_g_connection.channel_anchor, 377 new_channel, 378 list_entry); 379 mtx_unlock(&hv_vmbus_g_connection.channel_lock); 380 hv_vmbus_free_vmbus_channel(new_channel); 381 } 382} 383 384/** 385 * Array of device guids that are performance critical. We try to distribute 386 * the interrupt load for these devices across all online cpus. 387 */ 388static const hv_guid high_perf_devices[] = { 389 {HV_NIC_GUID, }, 390 {HV_IDE_GUID, }, 391 {HV_SCSI_GUID, }, 392}; 393 394enum { 395 PERF_CHN_NIC = 0, 396 PERF_CHN_IDE, 397 PERF_CHN_SCSI, 398 MAX_PERF_CHN, 399}; 400 401/* 402 * We use this static number to distribute the channel interrupt load. 403 */ 404static uint32_t next_vcpu; 405 406/** 407 * Starting with Win8, we can statically distribute the incoming 408 * channel interrupt load by binding a channel to VCPU. We 409 * implement here a simple round robin scheme for distributing 410 * the interrupt load. 411 * We will bind channels that are not performance critical to cpu 0 and 412 * performance critical channels (IDE, SCSI and Network) will be uniformly 413 * distributed across all available CPUs. 414 */ 415static void 416vmbus_channel_select_cpu(hv_vmbus_channel *channel, hv_guid *guid) 417{ 418 uint32_t current_cpu; 419 int i; 420 boolean_t is_perf_channel = FALSE; 421 422 for (i = PERF_CHN_NIC; i < MAX_PERF_CHN; i++) { 423 if (memcmp(guid->data, high_perf_devices[i].data, 424 sizeof(hv_guid)) == 0) { 425 is_perf_channel = TRUE; 426 break; 427 } 428 } 429 430 if ((hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008) || 431 (hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7) || 432 (!is_perf_channel)) { 433 /* Host's view of guest cpu */ 434 channel->target_vcpu = 0; 435 /* Guest's own view of cpu */ 436 channel->target_cpu = 0; 437 return; 438 } 439 /* mp_ncpus should have the number cpus currently online */ 440 current_cpu = (++next_vcpu % mp_ncpus); 441 channel->target_cpu = current_cpu; 442 channel->target_vcpu = 443 hv_vmbus_g_context.hv_vcpu_index[current_cpu]; 444 if (bootverbose) 445 printf("VMBUS: Total online cpus %d, assign perf channel %d " 446 "to vcpu %d, cpu %d\n", mp_ncpus, i, channel->target_vcpu, 447 current_cpu); 448} 449 450/** 451 * @brief Handler for channel offers from Hyper-V/Azure 452 * 453 * Handler for channel offers from vmbus in parent partition. We ignore 454 * all offers except network and storage offers. For each network and storage 455 * offers, we create a channel object and queue a work item to the channel 456 * object to process the offer synchronously 457 */ 458static void 459vmbus_channel_on_offer(hv_vmbus_channel_msg_header* hdr) 460{ 461 hv_vmbus_channel_offer_channel* offer; 462 hv_vmbus_channel* new_channel; 463 464 offer = (hv_vmbus_channel_offer_channel*) hdr; 465 466 hv_guid *guidType; 467 hv_guid *guidInstance; 468 469 guidType = &offer->offer.interface_type; 470 guidInstance = &offer->offer.interface_instance; 471 472 /* Allocate the channel object and save this offer */ 473 new_channel = hv_vmbus_allocate_channel(); 474 if (new_channel == NULL) 475 return; 476 477 /* 478 * By default we setup state to enable batched 479 * reading. A specific service can choose to 480 * disable this prior to opening the channel. 481 */ 482 new_channel->batched_reading = TRUE; 483 484 new_channel->signal_event_param = 485 (hv_vmbus_input_signal_event *) 486 (HV_ALIGN_UP((unsigned long) 487 &new_channel->signal_event_buffer, 488 HV_HYPERCALL_PARAM_ALIGN)); 489 490 new_channel->signal_event_param->connection_id.as_uint32_t = 0; 491 new_channel->signal_event_param->connection_id.u.id = 492 HV_VMBUS_EVENT_CONNECTION_ID; 493 new_channel->signal_event_param->flag_number = 0; 494 new_channel->signal_event_param->rsvd_z = 0; 495 496 if (hv_vmbus_protocal_version != HV_VMBUS_VERSION_WS2008) { 497 new_channel->is_dedicated_interrupt = 498 (offer->is_dedicated_interrupt != 0); 499 new_channel->signal_event_param->connection_id.u.id = 500 offer->connection_id; 501 } 502 503 /* 504 * Bind the channel to a chosen cpu. 505 */ 506 vmbus_channel_select_cpu(new_channel, 507 &offer->offer.interface_type); 508 509 memcpy(&new_channel->offer_msg, offer, 510 sizeof(hv_vmbus_channel_offer_channel)); 511 new_channel->monitor_group = (uint8_t) offer->monitor_id / 32; 512 new_channel->monitor_bit = (uint8_t) offer->monitor_id % 32; 513 514 vmbus_channel_process_offer(new_channel); 515} 516 517/** 518 * @brief Rescind offer handler. 519 * 520 * We queue a work item to process this offer 521 * synchronously 522 */ 523static void 524vmbus_channel_on_offer_rescind(hv_vmbus_channel_msg_header* hdr) 525{ 526 hv_vmbus_channel_rescind_offer* rescind; 527 hv_vmbus_channel* channel; 528 529 rescind = (hv_vmbus_channel_rescind_offer*) hdr; 530 531 channel = hv_vmbus_g_connection.channels[rescind->child_rel_id]; 532 if (channel == NULL) 533 return; 534 535 hv_vmbus_child_device_unregister(channel->device); 536 mtx_lock(&hv_vmbus_g_connection.channel_lock); 537 hv_vmbus_g_connection.channels[rescind->child_rel_id] = NULL; 538 mtx_unlock(&hv_vmbus_g_connection.channel_lock); 539} 540 541/** 542 * 543 * @brief Invoked when all offers have been delivered. 544 */ 545static void 546vmbus_channel_on_offers_delivered(hv_vmbus_channel_msg_header* hdr) 547{ 548} 549 550/** 551 * @brief Open result handler. 552 * 553 * This is invoked when we received a response 554 * to our channel open request. Find the matching request, copy the 555 * response and signal the requesting thread. 556 */ 557static void 558vmbus_channel_on_open_result(hv_vmbus_channel_msg_header* hdr) 559{ 560 hv_vmbus_channel_open_result* result; 561 hv_vmbus_channel_msg_info* msg_info; 562 hv_vmbus_channel_msg_header* requestHeader; 563 hv_vmbus_channel_open_channel* openMsg; 564 565 result = (hv_vmbus_channel_open_result*) hdr; 566 567 /* 568 * Find the open msg, copy the result and signal/unblock the wait event 569 */ 570 mtx_lock(&hv_vmbus_g_connection.channel_msg_lock); 571 572 TAILQ_FOREACH(msg_info, &hv_vmbus_g_connection.channel_msg_anchor, 573 msg_list_entry) { 574 requestHeader = (hv_vmbus_channel_msg_header*) msg_info->msg; 575 576 if (requestHeader->message_type == 577 HV_CHANNEL_MESSAGE_OPEN_CHANNEL) { 578 openMsg = (hv_vmbus_channel_open_channel*) msg_info->msg; 579 if (openMsg->child_rel_id == result->child_rel_id 580 && openMsg->open_id == result->open_id) { 581 memcpy(&msg_info->response.open_result, result, 582 sizeof(hv_vmbus_channel_open_result)); 583 sema_post(&msg_info->wait_sema); 584 break; 585 } 586 } 587 } 588 mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock); 589 590} 591 592/** 593 * @brief GPADL created handler. 594 * 595 * This is invoked when we received a response 596 * to our gpadl create request. Find the matching request, copy the 597 * response and signal the requesting thread. 598 */ 599static void 600vmbus_channel_on_gpadl_created(hv_vmbus_channel_msg_header* hdr) 601{ 602 hv_vmbus_channel_gpadl_created* gpadl_created; 603 hv_vmbus_channel_msg_info* msg_info; 604 hv_vmbus_channel_msg_header* request_header; 605 hv_vmbus_channel_gpadl_header* gpadl_header; 606 607 gpadl_created = (hv_vmbus_channel_gpadl_created*) hdr; 608 609 /* Find the establish msg, copy the result and signal/unblock 610 * the wait event 611 */ 612 mtx_lock(&hv_vmbus_g_connection.channel_msg_lock); 613 TAILQ_FOREACH(msg_info, &hv_vmbus_g_connection.channel_msg_anchor, 614 msg_list_entry) { 615 request_header = (hv_vmbus_channel_msg_header*) msg_info->msg; 616 if (request_header->message_type == 617 HV_CHANNEL_MESSAGEL_GPADL_HEADER) { 618 gpadl_header = 619 (hv_vmbus_channel_gpadl_header*) request_header; 620 621 if ((gpadl_created->child_rel_id == gpadl_header->child_rel_id) 622 && (gpadl_created->gpadl == gpadl_header->gpadl)) { 623 memcpy(&msg_info->response.gpadl_created, 624 gpadl_created, 625 sizeof(hv_vmbus_channel_gpadl_created)); 626 sema_post(&msg_info->wait_sema); 627 break; 628 } 629 } 630 } 631 mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock); 632} 633 634/** 635 * @brief GPADL torndown handler. 636 * 637 * This is invoked when we received a respons 638 * to our gpadl teardown request. Find the matching request, copy the 639 * response and signal the requesting thread 640 */ 641static void 642vmbus_channel_on_gpadl_torndown(hv_vmbus_channel_msg_header* hdr) 643{ 644 hv_vmbus_channel_gpadl_torndown* gpadl_torndown; 645 hv_vmbus_channel_msg_info* msg_info; 646 hv_vmbus_channel_msg_header* requestHeader; 647 hv_vmbus_channel_gpadl_teardown* gpadlTeardown; 648 649 gpadl_torndown = (hv_vmbus_channel_gpadl_torndown*)hdr; 650 651 /* 652 * Find the open msg, copy the result and signal/unblock the 653 * wait event. 654 */ 655 656 mtx_lock(&hv_vmbus_g_connection.channel_msg_lock); 657 658 TAILQ_FOREACH(msg_info, &hv_vmbus_g_connection.channel_msg_anchor, 659 msg_list_entry) { 660 requestHeader = (hv_vmbus_channel_msg_header*) msg_info->msg; 661 662 if (requestHeader->message_type 663 == HV_CHANNEL_MESSAGE_GPADL_TEARDOWN) { 664 gpadlTeardown = 665 (hv_vmbus_channel_gpadl_teardown*) requestHeader; 666 667 if (gpadl_torndown->gpadl == gpadlTeardown->gpadl) { 668 memcpy(&msg_info->response.gpadl_torndown, 669 gpadl_torndown, 670 sizeof(hv_vmbus_channel_gpadl_torndown)); 671 sema_post(&msg_info->wait_sema); 672 break; 673 } 674 } 675 } 676 mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock); 677} 678 679/** 680 * @brief Version response handler. 681 * 682 * This is invoked when we received a response 683 * to our initiate contact request. Find the matching request, copy th 684 * response and signal the requesting thread. 685 */ 686static void 687vmbus_channel_on_version_response(hv_vmbus_channel_msg_header* hdr) 688{ 689 hv_vmbus_channel_msg_info* msg_info; 690 hv_vmbus_channel_msg_header* requestHeader; 691 hv_vmbus_channel_initiate_contact* initiate; 692 hv_vmbus_channel_version_response* versionResponse; 693 694 versionResponse = (hv_vmbus_channel_version_response*)hdr; 695 696 mtx_lock(&hv_vmbus_g_connection.channel_msg_lock); 697 TAILQ_FOREACH(msg_info, &hv_vmbus_g_connection.channel_msg_anchor, 698 msg_list_entry) { 699 requestHeader = (hv_vmbus_channel_msg_header*) msg_info->msg; 700 if (requestHeader->message_type 701 == HV_CHANNEL_MESSAGE_INITIATED_CONTACT) { 702 initiate = 703 (hv_vmbus_channel_initiate_contact*) requestHeader; 704 memcpy(&msg_info->response.version_response, 705 versionResponse, 706 sizeof(hv_vmbus_channel_version_response)); 707 sema_post(&msg_info->wait_sema); 708 } 709 } 710 mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock); 711 712} 713 714/** 715 * @brief Handler for channel protocol messages. 716 * 717 * This is invoked in the vmbus worker thread context. 718 */ 719void 720hv_vmbus_on_channel_message(void *context) 721{ 722 hv_vmbus_message* msg; 723 hv_vmbus_channel_msg_header* hdr; 724 int size; 725 726 msg = (hv_vmbus_message*) context; 727 hdr = (hv_vmbus_channel_msg_header*) msg->u.payload; 728 size = msg->header.payload_size; 729 730 if (hdr->message_type >= HV_CHANNEL_MESSAGE_COUNT) { 731 free(msg, M_DEVBUF); 732 return; 733 } 734 735 if (g_channel_message_table[hdr->message_type].messageHandler) { 736 g_channel_message_table[hdr->message_type].messageHandler(hdr); 737 } 738 739 /* Free the msg that was allocated in VmbusOnMsgDPC() */ 740 free(msg, M_DEVBUF); 741} 742 743/** 744 * @brief Send a request to get all our pending offers. 745 */ 746int 747hv_vmbus_request_channel_offers(void) 748{ 749 int ret; 750 hv_vmbus_channel_msg_header* msg; 751 hv_vmbus_channel_msg_info* msg_info; 752 753 msg_info = (hv_vmbus_channel_msg_info *) 754 malloc(sizeof(hv_vmbus_channel_msg_info) 755 + sizeof(hv_vmbus_channel_msg_header), M_DEVBUF, M_NOWAIT); 756 757 if (msg_info == NULL) { 758 if(bootverbose) 759 printf("Error VMBUS: malloc failed for Request Offers\n"); 760 return (ENOMEM); 761 } 762 763 msg = (hv_vmbus_channel_msg_header*) msg_info->msg; 764 msg->message_type = HV_CHANNEL_MESSAGE_REQUEST_OFFERS; 765 766 ret = hv_vmbus_post_message(msg, sizeof(hv_vmbus_channel_msg_header)); 767 768 if (msg_info) 769 free(msg_info, M_DEVBUF); 770 771 return (ret); 772} 773 774/** 775 * @brief Release channels that are unattached/unconnected (i.e., no drivers associated) 776 */ 777void 778hv_vmbus_release_unattached_channels(void) 779{ 780 hv_vmbus_channel *channel; 781 782 mtx_lock(&hv_vmbus_g_connection.channel_lock); 783 784 while (!TAILQ_EMPTY(&hv_vmbus_g_connection.channel_anchor)) { 785 channel = TAILQ_FIRST(&hv_vmbus_g_connection.channel_anchor); 786 TAILQ_REMOVE(&hv_vmbus_g_connection.channel_anchor, 787 channel, list_entry); 788 789 hv_vmbus_child_device_unregister(channel->device); 790 hv_vmbus_free_vmbus_channel(channel); 791 } 792 bzero(hv_vmbus_g_connection.channels, 793 sizeof(hv_vmbus_channel*) * HV_CHANNEL_MAX_COUNT); 794 mtx_unlock(&hv_vmbus_g_connection.channel_lock); 795} 796 797/** 798 * @brief Select the best outgoing channel 799 * 800 * The channel whose vcpu binding is closest to the currect vcpu will 801 * be selected. 802 * If no multi-channel, always select primary channel 803 * 804 * @param primary - primary channel 805 */ 806struct hv_vmbus_channel * 807vmbus_select_outgoing_channel(struct hv_vmbus_channel *primary) 808{ 809 hv_vmbus_channel *new_channel = NULL; 810 hv_vmbus_channel *outgoing_channel = primary; 811 int old_cpu_distance = 0; 812 int new_cpu_distance = 0; 813 int cur_vcpu = 0; 814 int smp_pro_id = PCPU_GET(cpuid); 815 816 if (TAILQ_EMPTY(&primary->sc_list_anchor)) { 817 return outgoing_channel; 818 } 819 820 if (smp_pro_id >= MAXCPU) { 821 return outgoing_channel; 822 } 823 824 cur_vcpu = hv_vmbus_g_context.hv_vcpu_index[smp_pro_id]; 825 826 TAILQ_FOREACH(new_channel, &primary->sc_list_anchor, sc_list_entry) { 827 if (new_channel->state != HV_CHANNEL_OPENED_STATE){ 828 continue; 829 } 830 831 if (new_channel->target_vcpu == cur_vcpu){ 832 return new_channel; 833 } 834 835 old_cpu_distance = ((outgoing_channel->target_vcpu > cur_vcpu) ? 836 (outgoing_channel->target_vcpu - cur_vcpu) : 837 (cur_vcpu - outgoing_channel->target_vcpu)); 838 839 new_cpu_distance = ((new_channel->target_vcpu > cur_vcpu) ? 840 (new_channel->target_vcpu - cur_vcpu) : 841 (cur_vcpu - new_channel->target_vcpu)); 842 843 if (old_cpu_distance < new_cpu_distance) { 844 continue; 845 } 846 847 outgoing_channel = new_channel; 848 } 849 850 return(outgoing_channel); 851} 852