1/* 2 * 8253/8254 interval timer emulation 3 * 4 * Copyright (c) 2003-2004 Fabrice Bellard 5 * Copyright (c) 2006 Intel Corporation 6 * Copyright (c) 2007 Keir Fraser, XenSource Inc 7 * Copyright (c) 2008 Intel Corporation 8 * Copyright 2009 Red Hat, Inc. and/or its affilates. 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 * 28 * Authors: 29 * Sheng Yang <sheng.yang@intel.com> 30 * Based on QEMU and Xen. 31 */ 32 33#define pr_fmt(fmt) "pit: " fmt 34 35#include <linux/kvm_host.h> 36#include <linux/slab.h> 37#include <linux/workqueue.h> 38 39#include "irq.h" 40#include "i8254.h" 41 42#ifndef CONFIG_X86_64 43#define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) 44#else 45#define mod_64(x, y) ((x) % (y)) 46#endif 47 48#define RW_STATE_LSB 1 49#define RW_STATE_MSB 2 50#define RW_STATE_WORD0 3 51#define RW_STATE_WORD1 4 52 53/* Compute with 96 bit intermediate result: (a*b)/c */ 54static u64 muldiv64(u64 a, u32 b, u32 c) 55{ 56 union { 57 u64 ll; 58 struct { 59 u32 low, high; 60 } l; 61 } u, res; 62 u64 rl, rh; 63 64 u.ll = a; 65 rl = (u64)u.l.low * (u64)b; 66 rh = (u64)u.l.high * (u64)b; 67 rh += (rl >> 32); 68 res.l.high = div64_u64(rh, c); 69 res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c); 70 return res.ll; 71} 72 73static void pit_set_gate(struct kvm *kvm, int channel, u32 val) 74{ 75 struct kvm_kpit_channel_state *c = 76 &kvm->arch.vpit->pit_state.channels[channel]; 77 78 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 79 80 switch (c->mode) { 81 default: 82 case 0: 83 case 4: 84 break; 85 case 1: 86 case 2: 87 case 3: 88 case 5: 89 /* Restart counting on rising edge. */ 90 if (c->gate < val) 91 c->count_load_time = ktime_get(); 92 break; 93 } 94 95 c->gate = val; 96} 97 98static int pit_get_gate(struct kvm *kvm, int channel) 99{ 100 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 101 102 return kvm->arch.vpit->pit_state.channels[channel].gate; 103} 104 105static s64 __kpit_elapsed(struct kvm *kvm) 106{ 107 s64 elapsed; 108 ktime_t remaining; 109 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; 110 111 if (!ps->pit_timer.period) 112 return 0; 113 114 /* 115 * The Counter does not stop when it reaches zero. In 116 * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to 117 * the highest count, either FFFF hex for binary counting 118 * or 9999 for BCD counting, and continues counting. 119 * Modes 2 and 3 are periodic; the Counter reloads 120 * itself with the initial count and continues counting 121 * from there. 122 */ 123 remaining = hrtimer_get_remaining(&ps->pit_timer.timer); 124 elapsed = ps->pit_timer.period - ktime_to_ns(remaining); 125 elapsed = mod_64(elapsed, ps->pit_timer.period); 126 127 return elapsed; 128} 129 130static s64 kpit_elapsed(struct kvm *kvm, struct kvm_kpit_channel_state *c, 131 int channel) 132{ 133 if (channel == 0) 134 return __kpit_elapsed(kvm); 135 136 return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time)); 137} 138 139static int pit_get_count(struct kvm *kvm, int channel) 140{ 141 struct kvm_kpit_channel_state *c = 142 &kvm->arch.vpit->pit_state.channels[channel]; 143 s64 d, t; 144 int counter; 145 146 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 147 148 t = kpit_elapsed(kvm, c, channel); 149 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); 150 151 switch (c->mode) { 152 case 0: 153 case 1: 154 case 4: 155 case 5: 156 counter = (c->count - d) & 0xffff; 157 break; 158 case 3: 159 counter = c->count - (mod_64((2 * d), c->count)); 160 break; 161 default: 162 counter = c->count - mod_64(d, c->count); 163 break; 164 } 165 return counter; 166} 167 168static int pit_get_out(struct kvm *kvm, int channel) 169{ 170 struct kvm_kpit_channel_state *c = 171 &kvm->arch.vpit->pit_state.channels[channel]; 172 s64 d, t; 173 int out; 174 175 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 176 177 t = kpit_elapsed(kvm, c, channel); 178 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); 179 180 switch (c->mode) { 181 default: 182 case 0: 183 out = (d >= c->count); 184 break; 185 case 1: 186 out = (d < c->count); 187 break; 188 case 2: 189 out = ((mod_64(d, c->count) == 0) && (d != 0)); 190 break; 191 case 3: 192 out = (mod_64(d, c->count) < ((c->count + 1) >> 1)); 193 break; 194 case 4: 195 case 5: 196 out = (d == c->count); 197 break; 198 } 199 200 return out; 201} 202 203static void pit_latch_count(struct kvm *kvm, int channel) 204{ 205 struct kvm_kpit_channel_state *c = 206 &kvm->arch.vpit->pit_state.channels[channel]; 207 208 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 209 210 if (!c->count_latched) { 211 c->latched_count = pit_get_count(kvm, channel); 212 c->count_latched = c->rw_mode; 213 } 214} 215 216static void pit_latch_status(struct kvm *kvm, int channel) 217{ 218 struct kvm_kpit_channel_state *c = 219 &kvm->arch.vpit->pit_state.channels[channel]; 220 221 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 222 223 if (!c->status_latched) { 224 /* TODO: Return NULL COUNT (bit 6). */ 225 c->status = ((pit_get_out(kvm, channel) << 7) | 226 (c->rw_mode << 4) | 227 (c->mode << 1) | 228 c->bcd); 229 c->status_latched = 1; 230 } 231} 232 233int pit_has_pending_timer(struct kvm_vcpu *vcpu) 234{ 235 struct kvm_pit *pit = vcpu->kvm->arch.vpit; 236 237 if (pit && kvm_vcpu_is_bsp(vcpu) && pit->pit_state.irq_ack) 238 return atomic_read(&pit->pit_state.pit_timer.pending); 239 return 0; 240} 241 242static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) 243{ 244 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, 245 irq_ack_notifier); 246 int value; 247 248 spin_lock(&ps->inject_lock); 249 value = atomic_dec_return(&ps->pit_timer.pending); 250 if (value < 0) 251 /* spurious acks can be generated if, for example, the 252 * PIC is being reset. Handle it gracefully here 253 */ 254 atomic_inc(&ps->pit_timer.pending); 255 else if (value > 0) 256 /* in this case, we had multiple outstanding pit interrupts 257 * that we needed to inject. Reinject 258 */ 259 queue_work(ps->pit->wq, &ps->pit->expired); 260 ps->irq_ack = 1; 261 spin_unlock(&ps->inject_lock); 262} 263 264void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) 265{ 266 struct kvm_pit *pit = vcpu->kvm->arch.vpit; 267 struct hrtimer *timer; 268 269 if (!kvm_vcpu_is_bsp(vcpu) || !pit) 270 return; 271 272 timer = &pit->pit_state.pit_timer.timer; 273 if (hrtimer_cancel(timer)) 274 hrtimer_start_expires(timer, HRTIMER_MODE_ABS); 275} 276 277static void destroy_pit_timer(struct kvm_pit *pit) 278{ 279 hrtimer_cancel(&pit->pit_state.pit_timer.timer); 280 cancel_work_sync(&pit->expired); 281} 282 283static bool kpit_is_periodic(struct kvm_timer *ktimer) 284{ 285 struct kvm_kpit_state *ps = container_of(ktimer, struct kvm_kpit_state, 286 pit_timer); 287 return ps->is_periodic; 288} 289 290static struct kvm_timer_ops kpit_ops = { 291 .is_periodic = kpit_is_periodic, 292}; 293 294static void pit_do_work(struct work_struct *work) 295{ 296 struct kvm_pit *pit = container_of(work, struct kvm_pit, expired); 297 struct kvm *kvm = pit->kvm; 298 struct kvm_vcpu *vcpu; 299 int i; 300 struct kvm_kpit_state *ps = &pit->pit_state; 301 int inject = 0; 302 303 /* Try to inject pending interrupts when 304 * last one has been acked. 305 */ 306 spin_lock(&ps->inject_lock); 307 if (ps->irq_ack) { 308 ps->irq_ack = 0; 309 inject = 1; 310 } 311 spin_unlock(&ps->inject_lock); 312 if (inject) { 313 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); 314 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); 315 316 /* 317 * Provides NMI watchdog support via Virtual Wire mode. 318 * The route is: PIT -> PIC -> LVT0 in NMI mode. 319 * 320 * Note: Our Virtual Wire implementation is simplified, only 321 * propagating PIT interrupts to all VCPUs when they have set 322 * LVT0 to NMI delivery. Other PIC interrupts are just sent to 323 * VCPU0, and only if its LVT0 is in EXTINT mode. 324 */ 325 if (kvm->arch.vapics_in_nmi_mode > 0) 326 kvm_for_each_vcpu(i, vcpu, kvm) 327 kvm_apic_nmi_wd_deliver(vcpu); 328 } 329} 330 331static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) 332{ 333 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); 334 struct kvm_pit *pt = ktimer->kvm->arch.vpit; 335 336 if (ktimer->reinject || !atomic_read(&ktimer->pending)) { 337 atomic_inc(&ktimer->pending); 338 queue_work(pt->wq, &pt->expired); 339 } 340 341 if (ktimer->t_ops->is_periodic(ktimer)) { 342 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period); 343 return HRTIMER_RESTART; 344 } else 345 return HRTIMER_NORESTART; 346} 347 348static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period) 349{ 350 struct kvm_timer *pt = &ps->pit_timer; 351 s64 interval; 352 353 interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); 354 355 pr_debug("create pit timer, interval is %llu nsec\n", interval); 356 357 /* TODO The new value only affected after the retriggered */ 358 hrtimer_cancel(&pt->timer); 359 cancel_work_sync(&ps->pit->expired); 360 pt->period = interval; 361 ps->is_periodic = is_period; 362 363 pt->timer.function = pit_timer_fn; 364 pt->t_ops = &kpit_ops; 365 pt->kvm = ps->pit->kvm; 366 367 atomic_set(&pt->pending, 0); 368 ps->irq_ack = 1; 369 370 hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval), 371 HRTIMER_MODE_ABS); 372} 373 374static void pit_load_count(struct kvm *kvm, int channel, u32 val) 375{ 376 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; 377 378 WARN_ON(!mutex_is_locked(&ps->lock)); 379 380 pr_debug("load_count val is %d, channel is %d\n", val, channel); 381 382 /* 383 * The largest possible initial count is 0; this is equivalent 384 * to 216 for binary counting and 104 for BCD counting. 385 */ 386 if (val == 0) 387 val = 0x10000; 388 389 ps->channels[channel].count = val; 390 391 if (channel != 0) { 392 ps->channels[channel].count_load_time = ktime_get(); 393 return; 394 } 395 396 /* Two types of timer 397 * mode 1 is one shot, mode 2 is period, otherwise del timer */ 398 switch (ps->channels[0].mode) { 399 case 0: 400 case 1: 401 case 4: 402 if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) { 403 create_pit_timer(ps, val, 0); 404 } 405 break; 406 case 2: 407 case 3: 408 if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){ 409 create_pit_timer(ps, val, 1); 410 } 411 break; 412 default: 413 destroy_pit_timer(kvm->arch.vpit); 414 } 415} 416 417void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start) 418{ 419 u8 saved_mode; 420 if (hpet_legacy_start) { 421 /* save existing mode for later reenablement */ 422 saved_mode = kvm->arch.vpit->pit_state.channels[0].mode; 423 kvm->arch.vpit->pit_state.channels[0].mode = 0xff; /* disable timer */ 424 pit_load_count(kvm, channel, val); 425 kvm->arch.vpit->pit_state.channels[0].mode = saved_mode; 426 } else { 427 pit_load_count(kvm, channel, val); 428 } 429} 430 431static inline struct kvm_pit *dev_to_pit(struct kvm_io_device *dev) 432{ 433 return container_of(dev, struct kvm_pit, dev); 434} 435 436static inline struct kvm_pit *speaker_to_pit(struct kvm_io_device *dev) 437{ 438 return container_of(dev, struct kvm_pit, speaker_dev); 439} 440 441static inline int pit_in_range(gpa_t addr) 442{ 443 return ((addr >= KVM_PIT_BASE_ADDRESS) && 444 (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH)); 445} 446 447static int pit_ioport_write(struct kvm_io_device *this, 448 gpa_t addr, int len, const void *data) 449{ 450 struct kvm_pit *pit = dev_to_pit(this); 451 struct kvm_kpit_state *pit_state = &pit->pit_state; 452 struct kvm *kvm = pit->kvm; 453 int channel, access; 454 struct kvm_kpit_channel_state *s; 455 u32 val = *(u32 *) data; 456 if (!pit_in_range(addr)) 457 return -EOPNOTSUPP; 458 459 val &= 0xff; 460 addr &= KVM_PIT_CHANNEL_MASK; 461 462 mutex_lock(&pit_state->lock); 463 464 if (val != 0) 465 pr_debug("write addr is 0x%x, len is %d, val is 0x%x\n", 466 (unsigned int)addr, len, val); 467 468 if (addr == 3) { 469 channel = val >> 6; 470 if (channel == 3) { 471 /* Read-Back Command. */ 472 for (channel = 0; channel < 3; channel++) { 473 s = &pit_state->channels[channel]; 474 if (val & (2 << channel)) { 475 if (!(val & 0x20)) 476 pit_latch_count(kvm, channel); 477 if (!(val & 0x10)) 478 pit_latch_status(kvm, channel); 479 } 480 } 481 } else { 482 /* Select Counter <channel>. */ 483 s = &pit_state->channels[channel]; 484 access = (val >> 4) & KVM_PIT_CHANNEL_MASK; 485 if (access == 0) { 486 pit_latch_count(kvm, channel); 487 } else { 488 s->rw_mode = access; 489 s->read_state = access; 490 s->write_state = access; 491 s->mode = (val >> 1) & 7; 492 if (s->mode > 5) 493 s->mode -= 4; 494 s->bcd = val & 1; 495 } 496 } 497 } else { 498 /* Write Count. */ 499 s = &pit_state->channels[addr]; 500 switch (s->write_state) { 501 default: 502 case RW_STATE_LSB: 503 pit_load_count(kvm, addr, val); 504 break; 505 case RW_STATE_MSB: 506 pit_load_count(kvm, addr, val << 8); 507 break; 508 case RW_STATE_WORD0: 509 s->write_latch = val; 510 s->write_state = RW_STATE_WORD1; 511 break; 512 case RW_STATE_WORD1: 513 pit_load_count(kvm, addr, s->write_latch | (val << 8)); 514 s->write_state = RW_STATE_WORD0; 515 break; 516 } 517 } 518 519 mutex_unlock(&pit_state->lock); 520 return 0; 521} 522 523static int pit_ioport_read(struct kvm_io_device *this, 524 gpa_t addr, int len, void *data) 525{ 526 struct kvm_pit *pit = dev_to_pit(this); 527 struct kvm_kpit_state *pit_state = &pit->pit_state; 528 struct kvm *kvm = pit->kvm; 529 int ret, count; 530 struct kvm_kpit_channel_state *s; 531 if (!pit_in_range(addr)) 532 return -EOPNOTSUPP; 533 534 addr &= KVM_PIT_CHANNEL_MASK; 535 if (addr == 3) 536 return 0; 537 538 s = &pit_state->channels[addr]; 539 540 mutex_lock(&pit_state->lock); 541 542 if (s->status_latched) { 543 s->status_latched = 0; 544 ret = s->status; 545 } else if (s->count_latched) { 546 switch (s->count_latched) { 547 default: 548 case RW_STATE_LSB: 549 ret = s->latched_count & 0xff; 550 s->count_latched = 0; 551 break; 552 case RW_STATE_MSB: 553 ret = s->latched_count >> 8; 554 s->count_latched = 0; 555 break; 556 case RW_STATE_WORD0: 557 ret = s->latched_count & 0xff; 558 s->count_latched = RW_STATE_MSB; 559 break; 560 } 561 } else { 562 switch (s->read_state) { 563 default: 564 case RW_STATE_LSB: 565 count = pit_get_count(kvm, addr); 566 ret = count & 0xff; 567 break; 568 case RW_STATE_MSB: 569 count = pit_get_count(kvm, addr); 570 ret = (count >> 8) & 0xff; 571 break; 572 case RW_STATE_WORD0: 573 count = pit_get_count(kvm, addr); 574 ret = count & 0xff; 575 s->read_state = RW_STATE_WORD1; 576 break; 577 case RW_STATE_WORD1: 578 count = pit_get_count(kvm, addr); 579 ret = (count >> 8) & 0xff; 580 s->read_state = RW_STATE_WORD0; 581 break; 582 } 583 } 584 585 if (len > sizeof(ret)) 586 len = sizeof(ret); 587 memcpy(data, (char *)&ret, len); 588 589 mutex_unlock(&pit_state->lock); 590 return 0; 591} 592 593static int speaker_ioport_write(struct kvm_io_device *this, 594 gpa_t addr, int len, const void *data) 595{ 596 struct kvm_pit *pit = speaker_to_pit(this); 597 struct kvm_kpit_state *pit_state = &pit->pit_state; 598 struct kvm *kvm = pit->kvm; 599 u32 val = *(u32 *) data; 600 if (addr != KVM_SPEAKER_BASE_ADDRESS) 601 return -EOPNOTSUPP; 602 603 mutex_lock(&pit_state->lock); 604 pit_state->speaker_data_on = (val >> 1) & 1; 605 pit_set_gate(kvm, 2, val & 1); 606 mutex_unlock(&pit_state->lock); 607 return 0; 608} 609 610static int speaker_ioport_read(struct kvm_io_device *this, 611 gpa_t addr, int len, void *data) 612{ 613 struct kvm_pit *pit = speaker_to_pit(this); 614 struct kvm_kpit_state *pit_state = &pit->pit_state; 615 struct kvm *kvm = pit->kvm; 616 unsigned int refresh_clock; 617 int ret; 618 if (addr != KVM_SPEAKER_BASE_ADDRESS) 619 return -EOPNOTSUPP; 620 621 /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */ 622 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1; 623 624 mutex_lock(&pit_state->lock); 625 ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) | 626 (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4)); 627 if (len > sizeof(ret)) 628 len = sizeof(ret); 629 memcpy(data, (char *)&ret, len); 630 mutex_unlock(&pit_state->lock); 631 return 0; 632} 633 634void kvm_pit_reset(struct kvm_pit *pit) 635{ 636 int i; 637 struct kvm_kpit_channel_state *c; 638 639 mutex_lock(&pit->pit_state.lock); 640 pit->pit_state.flags = 0; 641 for (i = 0; i < 3; i++) { 642 c = &pit->pit_state.channels[i]; 643 c->mode = 0xff; 644 c->gate = (i != 2); 645 pit_load_count(pit->kvm, i, 0); 646 } 647 mutex_unlock(&pit->pit_state.lock); 648 649 atomic_set(&pit->pit_state.pit_timer.pending, 0); 650 pit->pit_state.irq_ack = 1; 651} 652 653static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) 654{ 655 struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); 656 657 if (!mask) { 658 atomic_set(&pit->pit_state.pit_timer.pending, 0); 659 pit->pit_state.irq_ack = 1; 660 } 661} 662 663static const struct kvm_io_device_ops pit_dev_ops = { 664 .read = pit_ioport_read, 665 .write = pit_ioport_write, 666}; 667 668static const struct kvm_io_device_ops speaker_dev_ops = { 669 .read = speaker_ioport_read, 670 .write = speaker_ioport_write, 671}; 672 673/* Caller must hold slots_lock */ 674struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) 675{ 676 struct kvm_pit *pit; 677 struct kvm_kpit_state *pit_state; 678 int ret; 679 680 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL); 681 if (!pit) 682 return NULL; 683 684 pit->irq_source_id = kvm_request_irq_source_id(kvm); 685 if (pit->irq_source_id < 0) { 686 kfree(pit); 687 return NULL; 688 } 689 690 mutex_init(&pit->pit_state.lock); 691 mutex_lock(&pit->pit_state.lock); 692 spin_lock_init(&pit->pit_state.inject_lock); 693 694 pit->wq = create_singlethread_workqueue("kvm-pit-wq"); 695 if (!pit->wq) { 696 mutex_unlock(&pit->pit_state.lock); 697 kvm_free_irq_source_id(kvm, pit->irq_source_id); 698 kfree(pit); 699 return NULL; 700 } 701 INIT_WORK(&pit->expired, pit_do_work); 702 703 kvm->arch.vpit = pit; 704 pit->kvm = kvm; 705 706 pit_state = &pit->pit_state; 707 pit_state->pit = pit; 708 hrtimer_init(&pit_state->pit_timer.timer, 709 CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 710 pit_state->irq_ack_notifier.gsi = 0; 711 pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; 712 kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); 713 pit_state->pit_timer.reinject = true; 714 mutex_unlock(&pit->pit_state.lock); 715 716 kvm_pit_reset(pit); 717 718 pit->mask_notifier.func = pit_mask_notifer; 719 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); 720 721 kvm_iodevice_init(&pit->dev, &pit_dev_ops); 722 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &pit->dev); 723 if (ret < 0) 724 goto fail; 725 726 if (flags & KVM_PIT_SPEAKER_DUMMY) { 727 kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops); 728 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 729 &pit->speaker_dev); 730 if (ret < 0) 731 goto fail_unregister; 732 } 733 734 return pit; 735 736fail_unregister: 737 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev); 738 739fail: 740 kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); 741 kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); 742 kvm_free_irq_source_id(kvm, pit->irq_source_id); 743 destroy_workqueue(pit->wq); 744 kfree(pit); 745 return NULL; 746} 747 748void kvm_free_pit(struct kvm *kvm) 749{ 750 struct hrtimer *timer; 751 752 if (kvm->arch.vpit) { 753 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &kvm->arch.vpit->dev); 754 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, 755 &kvm->arch.vpit->speaker_dev); 756 kvm_unregister_irq_mask_notifier(kvm, 0, 757 &kvm->arch.vpit->mask_notifier); 758 kvm_unregister_irq_ack_notifier(kvm, 759 &kvm->arch.vpit->pit_state.irq_ack_notifier); 760 mutex_lock(&kvm->arch.vpit->pit_state.lock); 761 timer = &kvm->arch.vpit->pit_state.pit_timer.timer; 762 hrtimer_cancel(timer); 763 cancel_work_sync(&kvm->arch.vpit->expired); 764 kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id); 765 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 766 destroy_workqueue(kvm->arch.vpit->wq); 767 kfree(kvm->arch.vpit); 768 } 769} 770