Lines Matching refs:vm

278 static void virtio_mem_retry(struct virtio_mem *vm);
279 static int virtio_mem_create_resource(struct virtio_mem *vm);
280 static void virtio_mem_delete_resource(struct virtio_mem *vm);
286 static int register_virtio_mem_device(struct virtio_mem *vm)
295 list_add_rcu(&vm->next, &virtio_mem_devices);
305 static void unregister_virtio_mem_device(struct virtio_mem *vm)
309 list_del_rcu(&vm->next);
336 static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm,
339 return addr / vm->bbm.bb_size;
345 static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm,
348 return bb_id * vm->bbm.bb_size;
354 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
360 return (addr - mb_addr) / vm->sbm.sb_size;
366 static void virtio_mem_bbm_set_bb_state(struct virtio_mem *vm,
370 const unsigned long idx = bb_id - vm->bbm.first_bb_id;
373 old_state = vm->bbm.bb_states[idx];
374 vm->bbm.bb_states[idx] = state;
376 BUG_ON(vm->bbm.bb_count[old_state] == 0);
377 vm->bbm.bb_count[old_state]--;
378 vm->bbm.bb_count[state]++;
384 static enum virtio_mem_bbm_bb_state virtio_mem_bbm_get_bb_state(struct virtio_mem *vm,
387 return vm->bbm.bb_states[bb_id - vm->bbm.first_bb_id];
393 static int virtio_mem_bbm_bb_states_prepare_next_bb(struct virtio_mem *vm)
395 unsigned long old_bytes = vm->bbm.next_bb_id - vm->bbm.first_bb_id;
401 if (vm->bbm.bb_states && old_pages == new_pages)
408 mutex_lock(&vm->hotplug_mutex);
409 if (vm->bbm.bb_states)
410 memcpy(new_array, vm->bbm.bb_states, old_pages * PAGE_SIZE);
411 vfree(vm->bbm.bb_states);
412 vm->bbm.bb_states = new_array;
413 mutex_unlock(&vm->hotplug_mutex);
419 for (_bb_id = vm->bbm.first_bb_id; \
420 _bb_id < vm->bbm.next_bb_id && _vm->bbm.bb_count[_state]; \
425 for (_bb_id = vm->bbm.next_bb_id - 1; \
426 _bb_id >= vm->bbm.first_bb_id && _vm->bbm.bb_count[_state]; \
433 static void virtio_mem_sbm_set_mb_state(struct virtio_mem *vm,
436 const unsigned long idx = mb_id - vm->sbm.first_mb_id;
439 old_state = vm->sbm.mb_states[idx];
440 vm->sbm.mb_states[idx] = state;
442 BUG_ON(vm->sbm.mb_count[old_state] == 0);
443 vm->sbm.mb_count[old_state]--;
444 vm->sbm.mb_count[state]++;
450 static uint8_t virtio_mem_sbm_get_mb_state(struct virtio_mem *vm,
453 const unsigned long idx = mb_id - vm->sbm.first_mb_id;
455 return vm->sbm.mb_states[idx];
461 static int virtio_mem_sbm_mb_states_prepare_next_mb(struct virtio_mem *vm)
463 int old_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id);
464 int new_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id + 1);
467 if (vm->sbm.mb_states && old_pages == new_pages)
474 mutex_lock(&vm->hotplug_mutex);
475 if (vm->sbm.mb_states)
476 memcpy(new_array, vm->sbm.mb_states, old_pages * PAGE_SIZE);
477 vfree(vm->sbm.mb_states);
478 vm->sbm.mb_states = new_array;
479 mutex_unlock(&vm->hotplug_mutex);
500 static int virtio_mem_sbm_sb_state_bit_nr(struct virtio_mem *vm,
503 return (mb_id - vm->sbm.first_mb_id) * vm->sbm.sbs_per_mb + sb_id;
511 static void virtio_mem_sbm_set_sb_plugged(struct virtio_mem *vm,
515 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
517 __bitmap_set(vm->sbm.sb_states, bit, count);
525 static void virtio_mem_sbm_set_sb_unplugged(struct virtio_mem *vm,
529 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
531 __bitmap_clear(vm->sbm.sb_states, bit, count);
537 static bool virtio_mem_sbm_test_sb_plugged(struct virtio_mem *vm,
541 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
544 return test_bit(bit, vm->sbm.sb_states);
547 return find_next_zero_bit(vm->sbm.sb_states, bit + count, bit) >=
554 static bool virtio_mem_sbm_test_sb_unplugged(struct virtio_mem *vm,
558 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
561 return find_next_bit(vm->sbm.sb_states, bit + count, bit) >=
566 * Find the first unplugged subblock. Returns vm->sbm.sbs_per_mb in case there is
569 static int virtio_mem_sbm_first_unplugged_sb(struct virtio_mem *vm,
572 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, 0);
574 return find_next_zero_bit(vm->sbm.sb_states,
575 bit + vm->sbm.sbs_per_mb, bit) - bit;
581 static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm)
583 const unsigned long old_nb_mb = vm->sbm.next_mb_id - vm->sbm.first_mb_id;
584 const unsigned long old_nb_bits = old_nb_mb * vm->sbm.sbs_per_mb;
585 const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->sbm.sbs_per_mb;
590 if (vm->sbm.sb_states && old_pages == new_pages)
597 mutex_lock(&vm->hotplug_mutex);
598 if (vm->sbm.sb_states)
599 memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE);
601 old_bitmap = vm->sbm.sb_states;
602 vm->sbm.sb_states = new_bitmap;
603 mutex_unlock(&vm->hotplug_mutex);
613 static bool virtio_mem_could_add_memory(struct virtio_mem *vm, uint64_t size)
615 if (WARN_ON_ONCE(size > vm->offline_threshold))
618 return atomic64_read(&vm->offline_size) + size <= vm->offline_threshold;
624 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
629 static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr,
638 if (!vm->resource_name) {
639 vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
641 if (!vm->resource_name)
645 dev_dbg(&vm->vdev->dev, "adding memory: 0x%llx - 0x%llx\n", addr,
648 atomic64_add(size, &vm->offline_size);
649 rc = add_memory_driver_managed(vm->mgid, addr, size, vm->resource_name,
652 atomic64_sub(size, &vm->offline_size);
653 dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc);
665 static int virtio_mem_sbm_add_mb(struct virtio_mem *vm, unsigned long mb_id)
670 return virtio_mem_add_memory(vm, addr, size);
676 static int virtio_mem_bbm_add_bb(struct virtio_mem *vm, unsigned long bb_id)
678 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
679 const uint64_t size = vm->bbm.bb_size;
681 return virtio_mem_add_memory(vm, addr, size);
688 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
693 static int virtio_mem_remove_memory(struct virtio_mem *vm, uint64_t addr,
698 dev_dbg(&vm->vdev->dev, "removing memory: 0x%llx - 0x%llx\n", addr,
702 atomic64_sub(size, &vm->offline_size);
707 virtio_mem_retry(vm);
709 dev_dbg(&vm->vdev->dev, "removing memory failed: %d\n", rc);
717 static int virtio_mem_sbm_remove_mb(struct virtio_mem *vm, unsigned long mb_id)
722 return virtio_mem_remove_memory(vm, addr, size);
728 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
733 static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm,
739 dev_dbg(&vm->vdev->dev,
745 atomic64_sub(size, &vm->offline_size);
750 virtio_mem_retry(vm);
753 dev_dbg(&vm->vdev->dev, "offlining and removing memory failed: %d\n", rc);
766 static int virtio_mem_sbm_offline_and_remove_mb(struct virtio_mem *vm,
772 return virtio_mem_offline_and_remove_memory(vm, addr, size);
781 static int virtio_mem_sbm_try_remove_unplugged_mb(struct virtio_mem *vm,
790 if (!virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
794 mutex_unlock(&vm->hotplug_mutex);
795 rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id);
796 mutex_lock(&vm->hotplug_mutex);
798 virtio_mem_sbm_set_mb_state(vm, mb_id,
807 static int virtio_mem_bbm_offline_and_remove_bb(struct virtio_mem *vm,
810 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
811 const uint64_t size = vm->bbm.bb_size;
813 return virtio_mem_offline_and_remove_memory(vm, addr, size);
819 static void virtio_mem_retry(struct virtio_mem *vm)
823 spin_lock_irqsave(&vm->removal_lock, flags);
824 if (!vm->removing)
825 queue_work(system_freezable_wq, &vm->wq);
826 spin_unlock_irqrestore(&vm->removal_lock, flags);
829 static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
834 if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM))
844 static bool virtio_mem_overlaps_range(struct virtio_mem *vm, uint64_t start,
847 return start < vm->addr + vm->region_size && vm->addr < start + size;
854 static bool virtio_mem_contains_range(struct virtio_mem *vm, uint64_t start,
857 return start >= vm->addr && start + size <= vm->addr + vm->region_size;
860 static int virtio_mem_sbm_notify_going_online(struct virtio_mem *vm,
863 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
870 dev_warn_ratelimited(&vm->vdev->dev,
875 static void virtio_mem_sbm_notify_offline(struct virtio_mem *vm,
878 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
881 virtio_mem_sbm_set_mb_state(vm, mb_id,
886 virtio_mem_sbm_set_mb_state(vm, mb_id,
895 static void virtio_mem_sbm_notify_online(struct virtio_mem *vm,
902 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
917 virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
920 static void virtio_mem_sbm_notify_going_offline(struct virtio_mem *vm,
923 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
927 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
928 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
931 sb_id * vm->sbm.sb_size);
936 static void virtio_mem_sbm_notify_cancel_offline(struct virtio_mem *vm,
939 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
943 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
944 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
947 sb_id * vm->sbm.sb_size);
952 static void virtio_mem_bbm_notify_going_offline(struct virtio_mem *vm,
961 if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
967 static void virtio_mem_bbm_notify_cancel_offline(struct virtio_mem *vm,
972 if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
986 struct virtio_mem *vm = container_of(nb, struct virtio_mem,
994 if (!virtio_mem_overlaps_range(vm, start, size))
997 if (vm->in_sbm) {
1008 id = virtio_mem_phys_to_bb_id(vm, start);
1015 if (WARN_ON_ONCE(id != virtio_mem_phys_to_bb_id(vm, start + size - 1)))
1029 mutex_lock(&vm->hotplug_mutex);
1030 if (vm->removing) {
1032 mutex_unlock(&vm->hotplug_mutex);
1035 vm->hotplug_active = true;
1036 if (vm->in_sbm)
1037 virtio_mem_sbm_notify_going_offline(vm, id);
1039 virtio_mem_bbm_notify_going_offline(vm, id,
1044 mutex_lock(&vm->hotplug_mutex);
1045 if (vm->removing) {
1047 mutex_unlock(&vm->hotplug_mutex);
1050 vm->hotplug_active = true;
1051 if (vm->in_sbm)
1052 rc = virtio_mem_sbm_notify_going_online(vm, id);
1055 if (vm->in_sbm)
1056 virtio_mem_sbm_notify_offline(vm, id);
1058 atomic64_add(size, &vm->offline_size);
1064 virtio_mem_retry(vm);
1066 vm->hotplug_active = false;
1067 mutex_unlock(&vm->hotplug_mutex);
1070 if (vm->in_sbm)
1071 virtio_mem_sbm_notify_online(vm, id, mhp->start_pfn);
1073 atomic64_sub(size, &vm->offline_size);
1080 if (!atomic_read(&vm->wq_active) &&
1081 virtio_mem_could_add_memory(vm, vm->offline_threshold / 2))
1082 virtio_mem_retry(vm);
1084 vm->hotplug_active = false;
1085 mutex_unlock(&vm->hotplug_mutex);
1088 if (!vm->hotplug_active)
1090 if (vm->in_sbm)
1091 virtio_mem_sbm_notify_cancel_offline(vm, id);
1093 virtio_mem_bbm_notify_cancel_offline(vm, id,
1096 vm->hotplug_active = false;
1097 mutex_unlock(&vm->hotplug_mutex);
1100 if (!vm->hotplug_active)
1102 vm->hotplug_active = false;
1103 mutex_unlock(&vm->hotplug_mutex);
1192 static int virtio_mem_fake_offline(struct virtio_mem *vm, unsigned long pfn,
1211 if (atomic_read(&vm->config_changed))
1274 static void virtio_mem_online_page(struct virtio_mem *vm,
1292 if (vm->in_sbm) {
1294 sb_id = virtio_mem_phys_to_sb_id(vm, addr);
1295 count = virtio_mem_phys_to_sb_id(vm, next - 1) - sb_id + 1;
1297 if (virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, count)) {
1301 virtio_mem_sbm_test_sb_unplugged(vm, id, sb_id, count)) {
1310 order = ilog2(vm->sbm.sb_size) - PAGE_SHIFT;
1311 do_online = virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, 1);
1319 id = virtio_mem_phys_to_bb_id(vm, addr);
1320 do_online = virtio_mem_bbm_get_bb_state(vm, id) !=
1336 struct virtio_mem *vm;
1339 list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
1344 if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
1355 virtio_mem_online_page(vm, page, order);
1364 static uint64_t virtio_mem_send_request(struct virtio_mem *vm,
1372 vm->req = *req;
1375 sg_init_one(&sg_req, &vm->req, sizeof(vm->req));
1379 sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp));
1382 rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL);
1386 virtqueue_kick(vm->vq);
1389 wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len));
1391 return virtio16_to_cpu(vm->vdev, vm->resp.type);
1394 static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
1397 const uint64_t nb_vm_blocks = size / vm->device_block_size;
1399 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG),
1400 .u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
1401 .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
1405 if (atomic_read(&vm->config_changed))
1408 dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr,
1411 switch (virtio_mem_send_request(vm, &req)) {
1413 vm->plugged_size += size;
1428 dev_dbg(&vm->vdev->dev, "plugging memory failed: %d\n", rc);
1432 static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
1435 const uint64_t nb_vm_blocks = size / vm->device_block_size;
1437 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG),
1438 .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
1439 .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
1443 if (atomic_read(&vm->config_changed))
1446 dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr,
1449 switch (virtio_mem_send_request(vm, &req)) {
1451 vm->plugged_size -= size;
1463 dev_dbg(&vm->vdev->dev, "unplugging memory failed: %d\n", rc);
1467 static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
1470 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
1474 dev_dbg(&vm->vdev->dev, "unplugging all memory");
1476 switch (virtio_mem_send_request(vm, &req)) {
1478 vm->unplug_all_required = false;
1479 vm->plugged_size = 0;
1481 atomic_set(&vm->config_changed, 1);
1490 dev_dbg(&vm->vdev->dev, "unplugging all memory failed: %d\n", rc);
1498 static int virtio_mem_sbm_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
1502 sb_id * vm->sbm.sb_size;
1503 const uint64_t size = count * vm->sbm.sb_size;
1506 rc = virtio_mem_send_plug_request(vm, addr, size);
1508 virtio_mem_sbm_set_sb_plugged(vm, mb_id, sb_id, count);
1516 static int virtio_mem_sbm_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
1520 sb_id * vm->sbm.sb_size;
1521 const uint64_t size = count * vm->sbm.sb_size;
1524 rc = virtio_mem_send_unplug_request(vm, addr, size);
1526 virtio_mem_sbm_set_sb_unplugged(vm, mb_id, sb_id, count);
1535 static int virtio_mem_bbm_unplug_bb(struct virtio_mem *vm, unsigned long bb_id)
1537 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
1538 const uint64_t size = vm->bbm.bb_size;
1540 return virtio_mem_send_unplug_request(vm, addr, size);
1548 static int virtio_mem_bbm_plug_bb(struct virtio_mem *vm, unsigned long bb_id)
1550 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
1551 const uint64_t size = vm->bbm.bb_size;
1553 return virtio_mem_send_plug_request(vm, addr, size);
1565 static int virtio_mem_sbm_unplug_any_sb_raw(struct virtio_mem *vm,
1571 sb_id = vm->sbm.sbs_per_mb - 1;
1575 virtio_mem_sbm_test_sb_unplugged(vm, mb_id, sb_id, 1))
1582 virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
1587 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
1604 static int virtio_mem_sbm_unplug_mb(struct virtio_mem *vm, unsigned long mb_id)
1606 uint64_t nb_sb = vm->sbm.sbs_per_mb;
1608 return virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, &nb_sb);
1614 static int virtio_mem_sbm_prepare_next_mb(struct virtio_mem *vm,
1619 if (vm->sbm.next_mb_id > vm->sbm.last_usable_mb_id)
1623 rc = virtio_mem_sbm_mb_states_prepare_next_mb(vm);
1628 rc = virtio_mem_sbm_sb_states_prepare_next_mb(vm);
1632 vm->sbm.mb_count[VIRTIO_MEM_SBM_MB_UNUSED]++;
1633 *mb_id = vm->sbm.next_mb_id++;
1643 static int virtio_mem_sbm_plug_and_add_mb(struct virtio_mem *vm,
1646 const int count = min_t(int, *nb_sb, vm->sbm.sbs_per_mb);
1656 rc = virtio_mem_sbm_plug_sb(vm, mb_id, 0, count);
1664 if (count == vm->sbm.sbs_per_mb)
1665 virtio_mem_sbm_set_mb_state(vm, mb_id,
1668 virtio_mem_sbm_set_mb_state(vm, mb_id,
1672 rc = virtio_mem_sbm_add_mb(vm, mb_id);
1676 if (virtio_mem_sbm_unplug_sb(vm, mb_id, 0, count))
1678 virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
1694 static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm,
1697 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
1706 sb_id = virtio_mem_sbm_first_unplugged_sb(vm, mb_id);
1707 if (sb_id >= vm->sbm.sbs_per_mb)
1711 sb_id + count < vm->sbm.sbs_per_mb &&
1712 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id + count, 1))
1715 rc = virtio_mem_sbm_plug_sb(vm, mb_id, sb_id, count);
1724 sb_id * vm->sbm.sb_size);
1725 nr_pages = PFN_DOWN(count * vm->sbm.sb_size);
1729 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
1730 virtio_mem_sbm_set_mb_state(vm, mb_id, old_state - 1);
1735 static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff)
1742 uint64_t nb_sb = diff / vm->sbm.sb_size;
1750 mutex_lock(&vm->hotplug_mutex);
1753 virtio_mem_sbm_for_each_mb(vm, mb_id, mb_states[i]) {
1754 rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb);
1765 mutex_unlock(&vm->hotplug_mutex);
1768 virtio_mem_sbm_for_each_mb(vm, mb_id, VIRTIO_MEM_SBM_MB_UNUSED) {
1769 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
1772 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
1780 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
1783 rc = virtio_mem_sbm_prepare_next_mb(vm, &mb_id);
1786 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
1794 mutex_unlock(&vm->hotplug_mutex);
1803 static int virtio_mem_bbm_plug_and_add_bb(struct virtio_mem *vm,
1808 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
1812 rc = virtio_mem_bbm_plug_bb(vm, bb_id);
1815 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
1817 rc = virtio_mem_bbm_add_bb(vm, bb_id);
1819 if (!virtio_mem_bbm_unplug_bb(vm, bb_id))
1820 virtio_mem_bbm_set_bb_state(vm, bb_id,
1824 virtio_mem_bbm_set_bb_state(vm, bb_id,
1834 static int virtio_mem_bbm_prepare_next_bb(struct virtio_mem *vm,
1839 if (vm->bbm.next_bb_id > vm->bbm.last_usable_bb_id)
1843 rc = virtio_mem_bbm_bb_states_prepare_next_bb(vm);
1847 vm->bbm.bb_count[VIRTIO_MEM_BBM_BB_UNUSED]++;
1848 *bb_id = vm->bbm.next_bb_id;
1849 vm->bbm.next_bb_id++;
1853 static int virtio_mem_bbm_plug_request(struct virtio_mem *vm, uint64_t diff)
1855 uint64_t nb_bb = diff / vm->bbm.bb_size;
1863 virtio_mem_bbm_for_each_bb(vm, bb_id, VIRTIO_MEM_BBM_BB_UNUSED) {
1864 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
1867 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
1877 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
1880 rc = virtio_mem_bbm_prepare_next_bb(vm, &bb_id);
1883 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
1897 static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
1899 if (vm->in_sbm)
1900 return virtio_mem_sbm_plug_request(vm, diff);
1901 return virtio_mem_bbm_plug_request(vm, diff);
1913 static int virtio_mem_sbm_unplug_any_sb_offline(struct virtio_mem *vm,
1919 rc = virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, nb_sb);
1922 if (!virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
1923 virtio_mem_sbm_set_mb_state(vm, mb_id,
1928 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1935 virtio_mem_sbm_set_mb_state(vm, mb_id,
1938 mutex_unlock(&vm->hotplug_mutex);
1939 rc = virtio_mem_sbm_remove_mb(vm, mb_id);
1941 mutex_lock(&vm->hotplug_mutex);
1951 static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm,
1955 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size) * count;
1956 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
1961 sb_id * vm->sbm.sb_size);
1963 rc = virtio_mem_fake_offline(vm, start_pfn, nr_pages);
1968 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
1977 virtio_mem_sbm_set_mb_state(vm, mb_id,
1981 virtio_mem_sbm_set_mb_state(vm, mb_id,
1999 static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm,
2006 if (*nb_sb >= vm->sbm.sbs_per_mb &&
2007 virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
2008 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, 0,
2009 vm->sbm.sbs_per_mb);
2011 *nb_sb -= vm->sbm.sbs_per_mb;
2018 for (sb_id = vm->sbm.sbs_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
2021 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
2026 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, sb_id, 1);
2035 rc = virtio_mem_sbm_try_remove_unplugged_mb(vm, mb_id);
2037 vm->sbm.have_unplugged_mb = 1;
2054 static int virtio_mem_sbm_unplug_any_sb(struct virtio_mem *vm,
2058 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
2065 return virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, nb_sb);
2068 return virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, nb_sb);
2073 static int virtio_mem_sbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
2083 uint64_t nb_sb = diff / vm->sbm.sb_size;
2095 mutex_lock(&vm->hotplug_mutex);
2105 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, mb_states[i]) {
2106 rc = virtio_mem_sbm_unplug_any_sb(vm, mb_id, &nb_sb);
2109 mutex_unlock(&vm->hotplug_mutex);
2111 mutex_lock(&vm->hotplug_mutex);
2114 mutex_unlock(&vm->hotplug_mutex);
2119 mutex_unlock(&vm->hotplug_mutex);
2122 mutex_unlock(&vm->hotplug_mutex);
2133 static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm,
2136 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2137 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2143 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
2153 mutex_lock(&vm->hotplug_mutex);
2154 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_FAKE_OFFLINE);
2161 rc = virtio_mem_fake_offline(vm, pfn, PAGES_PER_SECTION);
2167 mutex_unlock(&vm->hotplug_mutex);
2169 rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id);
2171 mutex_lock(&vm->hotplug_mutex);
2175 rc = virtio_mem_bbm_unplug_bb(vm, bb_id);
2177 virtio_mem_bbm_set_bb_state(vm, bb_id,
2180 virtio_mem_bbm_set_bb_state(vm, bb_id,
2191 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
2192 mutex_unlock(&vm->hotplug_mutex);
2199 static bool virtio_mem_bbm_bb_is_offline(struct virtio_mem *vm,
2202 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2203 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2218 static bool virtio_mem_bbm_bb_is_movable(struct virtio_mem *vm,
2221 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2222 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2238 static int virtio_mem_bbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
2240 uint64_t nb_bb = diff / vm->bbm.bb_size;
2252 virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) {
2259 if (i == 0 && !virtio_mem_bbm_bb_is_offline(vm, bb_id))
2261 if (i == 1 && !virtio_mem_bbm_bb_is_movable(vm, bb_id))
2263 rc = virtio_mem_bbm_offline_remove_and_unplug_bb(vm, bb_id);
2281 static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
2283 if (vm->in_sbm)
2284 return virtio_mem_sbm_unplug_request(vm, diff);
2285 return virtio_mem_bbm_unplug_request(vm, diff);
2293 static int virtio_mem_cleanup_pending_mb(struct virtio_mem *vm)
2298 if (!vm->in_sbm) {
2299 virtio_mem_bbm_for_each_bb(vm, id,
2301 rc = virtio_mem_bbm_unplug_bb(vm, id);
2304 virtio_mem_bbm_set_bb_state(vm, id,
2310 virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_PLUGGED) {
2311 rc = virtio_mem_sbm_unplug_mb(vm, id);
2314 virtio_mem_sbm_set_mb_state(vm, id,
2318 if (!vm->sbm.have_unplugged_mb)
2325 vm->sbm.have_unplugged_mb = false;
2327 mutex_lock(&vm->hotplug_mutex);
2328 virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL)
2329 rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id);
2330 virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL)
2331 rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id);
2332 virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL)
2333 rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id);
2334 mutex_unlock(&vm->hotplug_mutex);
2337 vm->sbm.have_unplugged_mb = true;
2345 static void virtio_mem_refresh_config(struct virtio_mem *vm)
2351 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
2353 if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
2354 vm->plugged_size = new_plugged_size;
2357 virtio_cread_le(vm->vdev, struct virtio_mem_config,
2359 end_addr = min(vm->addr + usable_region_size - 1,
2362 if (vm->in_sbm) {
2363 vm->sbm.last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr);
2365 vm->sbm.last_usable_mb_id--;
2367 vm->bbm.last_usable_bb_id = virtio_mem_phys_to_bb_id(vm,
2369 if (!IS_ALIGNED(end_addr + 1, vm->bbm.bb_size))
2370 vm->bbm.last_usable_bb_id--;
2380 virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
2381 &vm->requested_size);
2383 dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
2384 dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
2392 struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
2396 if (unlikely(vm->in_kdump)) {
2397 dev_warn_once(&vm->vdev->dev,
2402 hrtimer_cancel(&vm->retry_timer);
2404 if (vm->broken)
2407 atomic_set(&vm->wq_active, 1);
2412 if (unlikely(vm->unplug_all_required))
2413 rc = virtio_mem_send_unplug_all_request(vm);
2415 if (atomic_read(&vm->config_changed)) {
2416 atomic_set(&vm->config_changed, 0);
2417 virtio_mem_refresh_config(vm);
2422 rc = virtio_mem_cleanup_pending_mb(vm);
2424 if (!rc && vm->requested_size != vm->plugged_size) {
2425 if (vm->requested_size > vm->plugged_size) {
2426 diff = vm->requested_size - vm->plugged_size;
2427 rc = virtio_mem_plug_request(vm, diff);
2429 diff = vm->plugged_size - vm->requested_size;
2430 rc = virtio_mem_unplug_request(vm, diff);
2438 if (!rc && vm->in_sbm && vm->sbm.have_unplugged_mb)
2443 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
2463 hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms),
2471 dev_err(&vm->vdev->dev,
2473 vm->broken = true;
2476 atomic_set(&vm->wq_active, 0);
2481 struct virtio_mem *vm = container_of(timer, struct virtio_mem,
2484 virtio_mem_retry(vm);
2485 vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2,
2492 struct virtio_mem *vm = vq->vdev->priv;
2494 wake_up(&vm->host_resp);
2497 static int virtio_mem_init_vq(struct virtio_mem *vm)
2501 vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response,
2505 vm->vq = vq;
2510 static int virtio_mem_init_hotplug(struct virtio_mem *vm)
2517 if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
2518 dev_warn(&vm->vdev->dev,
2520 if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
2521 dev_warn(&vm->vdev->dev,
2523 if (vm->addr < pluggable_range.start ||
2524 vm->addr + vm->region_size - 1 > pluggable_range.end)
2525 dev_warn(&vm->vdev->dev,
2529 vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(),
2537 sb_size = max_t(uint64_t, vm->device_block_size, sb_size);
2541 vm->in_sbm = true;
2542 vm->sbm.sb_size = sb_size;
2543 vm->sbm.sbs_per_mb = memory_block_size_bytes() /
2544 vm->sbm.sb_size;
2547 addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
2549 vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(addr);
2550 vm->sbm.next_mb_id = vm->sbm.first_mb_id;
2553 vm->bbm.bb_size = max_t(uint64_t, vm->device_block_size,
2558 dev_warn(&vm->vdev->dev,
2560 } else if (bbm_block_size < vm->bbm.bb_size) {
2561 dev_warn(&vm->vdev->dev,
2564 vm->bbm.bb_size = bbm_block_size;
2569 addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
2570 vm->bbm.bb_size - 1;
2571 vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr);
2572 vm->bbm.next_bb_id = vm->bbm.first_bb_id;
2575 vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size,
2576 vm->offline_threshold);
2579 dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
2581 if (vm->in_sbm)
2582 dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
2583 (unsigned long long)vm->sbm.sb_size);
2585 dev_info(&vm->vdev->dev, "big block size: 0x%llx",
2586 (unsigned long long)vm->bbm.bb_size);
2589 rc = virtio_mem_create_resource(vm);
2594 if (vm->in_sbm)
2597 unit_pages = PHYS_PFN(vm->bbm.bb_size);
2598 rc = memory_group_register_dynamic(vm->nid, unit_pages);
2601 vm->mgid = rc;
2608 if (vm->plugged_size) {
2609 vm->unplug_all_required = true;
2610 dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
2614 vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
2615 rc = register_memory_notifier(&vm->memory_notifier);
2618 rc = register_virtio_mem_device(vm);
2624 unregister_memory_notifier(&vm->memory_notifier);
2626 memory_group_unregister(vm->mgid);
2628 virtio_mem_delete_resource(vm);
2633 static int virtio_mem_send_state_request(struct virtio_mem *vm, uint64_t addr,
2636 const uint64_t nb_vm_blocks = size / vm->device_block_size;
2638 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_STATE),
2639 .u.state.addr = cpu_to_virtio64(vm->vdev, addr),
2640 .u.state.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
2644 dev_dbg(&vm->vdev->dev, "requesting state: 0x%llx - 0x%llx\n", addr,
2647 switch (virtio_mem_send_request(vm, &req)) {
2649 return virtio16_to_cpu(vm->vdev, vm->resp.u.state.state);
2657 dev_dbg(&vm->vdev->dev, "requesting state failed: %d\n", rc);
2664 struct virtio_mem *vm = container_of(cb, struct virtio_mem,
2670 if (!virtio_mem_contains_range(vm, addr, PAGE_SIZE))
2672 if (!vm->plugged_size)
2679 mutex_lock(&vm->hotplug_mutex);
2681 addr = ALIGN_DOWN(addr, vm->device_block_size);
2682 if (addr != vm->last_block_addr) {
2683 rc = virtio_mem_send_state_request(vm, addr,
2684 vm->device_block_size);
2687 vm->last_block_plugged = true;
2689 vm->last_block_plugged = false;
2690 vm->last_block_addr = addr;
2693 is_ram = vm->last_block_plugged;
2694 mutex_unlock(&vm->hotplug_mutex);
2699 static int virtio_mem_init_kdump(struct virtio_mem *vm)
2702 dev_info(&vm->vdev->dev, "memory hot(un)plug disabled in kdump kernel\n");
2703 vm->vmcore_cb.pfn_is_ram = virtio_mem_vmcore_pfn_is_ram;
2704 register_vmcore_cb(&vm->vmcore_cb);
2707 dev_warn(&vm->vdev->dev, "disabled in kdump kernel without vmcore\n");
2712 static int virtio_mem_init(struct virtio_mem *vm)
2716 if (!vm->vdev->config->get) {
2717 dev_err(&vm->vdev->dev, "config access disabled\n");
2722 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
2723 &vm->plugged_size);
2724 virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
2725 &vm->device_block_size);
2726 virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
2728 vm->nid = virtio_mem_translate_node_id(vm, node_id);
2729 virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
2730 virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
2731 &vm->region_size);
2734 if (vm->nid == NUMA_NO_NODE)
2735 vm->nid = memory_add_physaddr_to_nid(vm->addr);
2737 dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
2738 dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
2739 dev_info(&vm->vdev->dev, "device block size: 0x%llx",
2740 (unsigned long long)vm->device_block_size);
2741 if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA))
2742 dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
2748 if (vm->in_kdump)
2749 return virtio_mem_init_kdump(vm);
2750 return virtio_mem_init_hotplug(vm);
2753 static int virtio_mem_create_resource(struct virtio_mem *vm)
2759 const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL);
2765 vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
2768 if (!vm->parent_resource) {
2770 dev_warn(&vm->vdev->dev, "could not reserve device region\n");
2771 dev_info(&vm->vdev->dev,
2777 vm->parent_resource->flags &= ~IORESOURCE_BUSY;
2781 static void virtio_mem_delete_resource(struct virtio_mem *vm)
2785 if (!vm->parent_resource)
2788 name = vm->parent_resource->name;
2789 release_resource(vm->parent_resource);
2790 kfree(vm->parent_resource);
2792 vm->parent_resource = NULL;
2800 static bool virtio_mem_has_memory_added(struct virtio_mem *vm)
2804 return walk_iomem_res_desc(IORES_DESC_NONE, flags, vm->addr,
2805 vm->addr + vm->region_size, NULL,
2811 struct virtio_mem *vm;
2817 vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL);
2818 if (!vm)
2821 init_waitqueue_head(&vm->host_resp);
2822 vm->vdev = vdev;
2823 INIT_WORK(&vm->wq, virtio_mem_run_wq);
2824 mutex_init(&vm->hotplug_mutex);
2825 INIT_LIST_HEAD(&vm->next);
2826 spin_lock_init(&vm->removal_lock);
2827 hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2828 vm->retry_timer.function = virtio_mem_timer_expired;
2829 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
2830 vm->in_kdump = is_kdump_kernel();
2833 rc = virtio_mem_init_vq(vm);
2838 rc = virtio_mem_init(vm);
2845 if (!vm->in_kdump) {
2846 atomic_set(&vm->config_changed, 1);
2847 queue_work(system_freezable_wq, &vm->wq);
2854 kfree(vm);
2860 static void virtio_mem_deinit_hotplug(struct virtio_mem *vm)
2869 mutex_lock(&vm->hotplug_mutex);
2870 spin_lock_irq(&vm->removal_lock);
2871 vm->removing = true;
2872 spin_unlock_irq(&vm->removal_lock);
2873 mutex_unlock(&vm->hotplug_mutex);
2876 cancel_work_sync(&vm->wq);
2877 hrtimer_cancel(&vm->retry_timer);
2879 if (vm->in_sbm) {
2884 virtio_mem_sbm_for_each_mb(vm, mb_id,
2886 rc = virtio_mem_sbm_remove_mb(vm, mb_id);
2888 virtio_mem_sbm_set_mb_state(vm, mb_id,
2899 unregister_virtio_mem_device(vm);
2900 unregister_memory_notifier(&vm->memory_notifier);
2907 if (virtio_mem_has_memory_added(vm)) {
2908 dev_warn(&vm->vdev->dev,
2911 virtio_mem_delete_resource(vm);
2912 kfree_const(vm->resource_name);
2913 memory_group_unregister(vm->mgid);
2917 if (vm->in_sbm) {
2918 vfree(vm->sbm.mb_states);
2919 vfree(vm->sbm.sb_states);
2921 vfree(vm->bbm.bb_states);
2925 static void virtio_mem_deinit_kdump(struct virtio_mem *vm)
2928 unregister_vmcore_cb(&vm->vmcore_cb);
2934 struct virtio_mem *vm = vdev->priv;
2936 if (vm->in_kdump)
2937 virtio_mem_deinit_kdump(vm);
2939 virtio_mem_deinit_hotplug(vm);
2945 kfree(vm);
2951 struct virtio_mem *vm = vdev->priv;
2953 if (unlikely(vm->in_kdump))
2956 atomic_set(&vm->config_changed, 1);
2957 virtio_mem_retry(vm);