• Home
  • Raw
  • Download

Lines Matching refs:b

259 	void (*add_page)(struct vmballoon *b, int idx, struct page *p);
260 int (*lock)(struct vmballoon *b, unsigned int num_pages,
262 int (*unlock)(struct vmballoon *b, unsigned int num_pages,
323 static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps) in vmballoon_send_start() argument
328 STATS_INC(b->stats.start); in vmballoon_send_start()
334 b->capabilities = capabilities; in vmballoon_send_start()
338 b->capabilities = VMW_BALLOON_BASIC_CMDS; in vmballoon_send_start()
350 if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) && in vmballoon_send_start()
351 (b->capabilities & VMW_BALLOON_BATCHED_CMDS)) in vmballoon_send_start()
352 b->supported_page_sizes = 2; in vmballoon_send_start()
354 b->supported_page_sizes = 1; in vmballoon_send_start()
358 STATS_INC(b->stats.start_fail); in vmballoon_send_start()
363 static bool vmballoon_check_status(struct vmballoon *b, unsigned long status) in vmballoon_check_status() argument
370 b->reset_required = true; in vmballoon_check_status()
384 static bool vmballoon_send_guest_id(struct vmballoon *b) in vmballoon_send_guest_id() argument
391 STATS_INC(b->stats.guest_type); in vmballoon_send_guest_id()
393 if (vmballoon_check_status(b, status)) in vmballoon_send_guest_id()
397 STATS_INC(b->stats.guest_type_fail); in vmballoon_send_guest_id()
412 static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target) in vmballoon_send_get_target() argument
425 si_meminfo(&b->sysinfo); in vmballoon_send_get_target()
426 limit = b->sysinfo.totalram; in vmballoon_send_get_target()
434 STATS_INC(b->stats.target); in vmballoon_send_get_target()
437 if (vmballoon_check_status(b, status)) { in vmballoon_send_get_target()
443 STATS_INC(b->stats.target_fail); in vmballoon_send_get_target()
452 static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, in vmballoon_send_lock_page() argument
462 STATS_INC(b->stats.lock[false]); in vmballoon_send_lock_page()
465 if (vmballoon_check_status(b, status)) in vmballoon_send_lock_page()
469 STATS_INC(b->stats.lock_fail[false]); in vmballoon_send_lock_page()
473 static int vmballoon_send_batched_lock(struct vmballoon *b, in vmballoon_send_batched_lock() argument
477 unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); in vmballoon_send_batched_lock()
479 STATS_INC(b->stats.lock[is_2m_pages]); in vmballoon_send_batched_lock()
488 if (vmballoon_check_status(b, status)) in vmballoon_send_batched_lock()
492 STATS_INC(b->stats.lock_fail[is_2m_pages]); in vmballoon_send_batched_lock()
500 static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn, in vmballoon_send_unlock_page() argument
510 STATS_INC(b->stats.unlock[false]); in vmballoon_send_unlock_page()
513 if (vmballoon_check_status(b, status)) in vmballoon_send_unlock_page()
517 STATS_INC(b->stats.unlock_fail[false]); in vmballoon_send_unlock_page()
521 static bool vmballoon_send_batched_unlock(struct vmballoon *b, in vmballoon_send_batched_unlock() argument
525 unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); in vmballoon_send_batched_unlock()
527 STATS_INC(b->stats.unlock[is_2m_pages]); in vmballoon_send_batched_unlock()
536 if (vmballoon_check_status(b, status)) in vmballoon_send_batched_unlock()
540 STATS_INC(b->stats.unlock_fail[is_2m_pages]); in vmballoon_send_batched_unlock()
566 static void vmballoon_pop(struct vmballoon *b) in vmballoon_pop() argument
574 &b->page_sizes[is_2m_pages]; in vmballoon_pop()
580 STATS_INC(b->stats.free[is_2m_pages]); in vmballoon_pop()
581 b->size -= size_per_page; in vmballoon_pop()
587 free_page((unsigned long)b->batch_page); in vmballoon_pop()
588 b->batch_page = NULL; in vmballoon_pop()
596 static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, in vmballoon_lock_page() argument
600 struct page *page = b->page; in vmballoon_lock_page()
601 struct vmballoon_page_size *page_size = &b->page_sizes[false]; in vmballoon_lock_page()
605 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status, in vmballoon_lock_page()
608 STATS_INC(b->stats.refused_alloc[false]); in vmballoon_lock_page()
635 b->size++; in vmballoon_lock_page()
640 static int vmballoon_lock_batched_page(struct vmballoon *b, in vmballoon_lock_batched_page() argument
646 locked = vmballoon_send_batched_lock(b, num_pages, is_2m_pages, in vmballoon_lock_batched_page()
650 u64 pa = vmballoon_batch_get_pa(b->batch_page, i); in vmballoon_lock_batched_page()
660 u64 pa = vmballoon_batch_get_pa(b->batch_page, i); in vmballoon_lock_batched_page()
663 &b->page_sizes[is_2m_pages]; in vmballoon_lock_batched_page()
665 locked = vmballoon_batch_get_status(b->batch_page, i); in vmballoon_lock_batched_page()
670 b->size += size_per_page; in vmballoon_lock_batched_page()
699 static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages, in vmballoon_unlock_page() argument
702 struct page *page = b->page; in vmballoon_unlock_page()
703 struct vmballoon_page_size *page_size = &b->page_sizes[false]; in vmballoon_unlock_page()
707 if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) { in vmballoon_unlock_page()
714 STATS_INC(b->stats.free[false]); in vmballoon_unlock_page()
717 b->size--; in vmballoon_unlock_page()
722 static int vmballoon_unlock_batched_page(struct vmballoon *b, in vmballoon_unlock_batched_page() argument
730 hv_success = vmballoon_send_batched_unlock(b, num_pages, is_2m_pages, in vmballoon_unlock_batched_page()
736 u64 pa = vmballoon_batch_get_pa(b->batch_page, i); in vmballoon_unlock_batched_page()
739 &b->page_sizes[is_2m_pages]; in vmballoon_unlock_batched_page()
741 locked = vmballoon_batch_get_status(b->batch_page, i); in vmballoon_unlock_batched_page()
752 STATS_INC(b->stats.free[is_2m_pages]); in vmballoon_unlock_batched_page()
755 b->size -= size_per_page; in vmballoon_unlock_batched_page()
766 static void vmballoon_release_refused_pages(struct vmballoon *b, in vmballoon_release_refused_pages() argument
771 &b->page_sizes[is_2m_pages]; in vmballoon_release_refused_pages()
776 STATS_INC(b->stats.refused_free[is_2m_pages]); in vmballoon_release_refused_pages()
782 static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p) in vmballoon_add_page() argument
784 b->page = p; in vmballoon_add_page()
787 static void vmballoon_add_batched_page(struct vmballoon *b, int idx, in vmballoon_add_batched_page() argument
790 vmballoon_batch_set_pa(b->batch_page, idx, in vmballoon_add_batched_page()
799 static void vmballoon_inflate(struct vmballoon *b) in vmballoon_inflate() argument
808 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); in vmballoon_inflate()
829 if (b->slow_allocation_cycles) { in vmballoon_inflate()
830 rate = b->rate_alloc; in vmballoon_inflate()
835 b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES; in vmballoon_inflate()
839 __func__, b->target - b->size, rate, b->rate_alloc); in vmballoon_inflate()
841 while (!b->reset_required && in vmballoon_inflate()
842 b->size + num_pages * vmballoon_page_size(is_2m_pages) in vmballoon_inflate()
843 < b->target) { in vmballoon_inflate()
847 STATS_INC(b->stats.alloc[is_2m_pages]); in vmballoon_inflate()
849 STATS_INC(b->stats.sleep_alloc); in vmballoon_inflate()
853 STATS_INC(b->stats.alloc_fail[is_2m_pages]); in vmballoon_inflate()
856 b->ops->lock(b, num_pages, true, &b->target); in vmballoon_inflate()
875 b->rate_alloc = max(b->rate_alloc / 2, in vmballoon_inflate()
877 STATS_INC(b->stats.sleep_alloc_fail); in vmballoon_inflate()
889 b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES; in vmballoon_inflate()
891 if (allocations >= b->rate_alloc) in vmballoon_inflate()
896 rate = b->rate_alloc; in vmballoon_inflate()
900 b->ops->add_page(b, num_pages++, page); in vmballoon_inflate()
901 if (num_pages == b->batch_max_pages) { in vmballoon_inflate()
902 error = b->ops->lock(b, num_pages, is_2m_pages, in vmballoon_inflate()
903 &b->target); in vmballoon_inflate()
918 b->ops->lock(b, num_pages, is_2m_pages, &b->target); in vmballoon_inflate()
924 if (error == 0 && allocations >= b->rate_alloc) { in vmballoon_inflate()
925 unsigned int mult = allocations / b->rate_alloc; in vmballoon_inflate()
927 b->rate_alloc = in vmballoon_inflate()
928 min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC, in vmballoon_inflate()
932 vmballoon_release_refused_pages(b, true); in vmballoon_inflate()
933 vmballoon_release_refused_pages(b, false); in vmballoon_inflate()
939 static void vmballoon_deflate(struct vmballoon *b) in vmballoon_deflate() argument
943 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); in vmballoon_deflate()
946 for (is_2m_pages = 0; is_2m_pages < b->supported_page_sizes; in vmballoon_deflate()
951 &b->page_sizes[is_2m_pages]; in vmballoon_deflate()
954 if (b->reset_required || in vmballoon_deflate()
955 (b->target > 0 && in vmballoon_deflate()
956 b->size - num_pages in vmballoon_deflate()
958 < b->target + vmballoon_page_size(true))) in vmballoon_deflate()
962 b->ops->add_page(b, num_pages++, page); in vmballoon_deflate()
964 if (num_pages == b->batch_max_pages) { in vmballoon_deflate()
967 error = b->ops->unlock(b, num_pages, in vmballoon_deflate()
968 is_2m_pages, &b->target); in vmballoon_deflate()
978 b->ops->unlock(b, num_pages, is_2m_pages, &b->target); in vmballoon_deflate()
994 static bool vmballoon_init_batching(struct vmballoon *b) in vmballoon_init_batching() argument
1002 b->batch_page = page_address(page); in vmballoon_init_batching()
1011 struct vmballoon *b = client_data; in vmballoon_doorbell() local
1013 STATS_INC(b->stats.doorbell); in vmballoon_doorbell()
1015 mod_delayed_work(system_freezable_wq, &b->dwork, 0); in vmballoon_doorbell()
1021 static void vmballoon_vmci_cleanup(struct vmballoon *b) in vmballoon_vmci_cleanup() argument
1027 STATS_INC(b->stats.doorbell_unset); in vmballoon_vmci_cleanup()
1029 if (!vmci_handle_is_invalid(b->vmci_doorbell)) { in vmballoon_vmci_cleanup()
1030 vmci_doorbell_destroy(b->vmci_doorbell); in vmballoon_vmci_cleanup()
1031 b->vmci_doorbell = VMCI_INVALID_HANDLE; in vmballoon_vmci_cleanup()
1038 static int vmballoon_vmci_init(struct vmballoon *b) in vmballoon_vmci_init() argument
1042 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0) in vmballoon_vmci_init()
1045 error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB, in vmballoon_vmci_init()
1047 vmballoon_doorbell, b); in vmballoon_vmci_init()
1052 error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context, in vmballoon_vmci_init()
1053 b->vmci_doorbell.resource, dummy); in vmballoon_vmci_init()
1055 STATS_INC(b->stats.doorbell_set); in vmballoon_vmci_init()
1062 vmballoon_vmci_cleanup(b); in vmballoon_vmci_init()
1071 static void vmballoon_reset(struct vmballoon *b) in vmballoon_reset() argument
1075 vmballoon_vmci_cleanup(b); in vmballoon_reset()
1078 vmballoon_pop(b); in vmballoon_reset()
1080 if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES)) in vmballoon_reset()
1083 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) { in vmballoon_reset()
1084 b->ops = &vmballoon_batched_ops; in vmballoon_reset()
1085 b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES; in vmballoon_reset()
1086 if (!vmballoon_init_batching(b)) { in vmballoon_reset()
1093 vmballoon_send_start(b, 0); in vmballoon_reset()
1096 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) { in vmballoon_reset()
1097 b->ops = &vmballoon_basic_ops; in vmballoon_reset()
1098 b->batch_max_pages = 1; in vmballoon_reset()
1101 b->reset_required = false; in vmballoon_reset()
1103 error = vmballoon_vmci_init(b); in vmballoon_reset()
1107 if (!vmballoon_send_guest_id(b)) in vmballoon_reset()
1118 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork); in vmballoon_work() local
1121 STATS_INC(b->stats.timer); in vmballoon_work()
1123 if (b->reset_required) in vmballoon_work()
1124 vmballoon_reset(b); in vmballoon_work()
1126 if (b->slow_allocation_cycles > 0) in vmballoon_work()
1127 b->slow_allocation_cycles--; in vmballoon_work()
1129 if (!b->reset_required && vmballoon_send_get_target(b, &target)) { in vmballoon_work()
1131 b->target = target; in vmballoon_work()
1133 if (b->size < target) in vmballoon_work()
1134 vmballoon_inflate(b); in vmballoon_work()
1136 b->size > target + vmballoon_page_size(true)) in vmballoon_work()
1137 vmballoon_deflate(b); in vmballoon_work()
1155 struct vmballoon *b = f->private; in vmballoon_debug_show() local
1156 struct vmballoon_stats *stats = &b->stats; in vmballoon_debug_show()
1163 VMW_BALLOON_CAPABILITIES, b->capabilities, in vmballoon_debug_show()
1164 b->reset_required ? 'y' : 'n'); in vmballoon_debug_show()
1170 b->target, b->size); in vmballoon_debug_show()
1175 b->rate_alloc); in vmballoon_debug_show()
1233 static int __init vmballoon_debugfs_init(struct vmballoon *b) in vmballoon_debugfs_init() argument
1237 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b, in vmballoon_debugfs_init()
1239 if (IS_ERR(b->dbg_entry)) { in vmballoon_debugfs_init()
1240 error = PTR_ERR(b->dbg_entry); in vmballoon_debugfs_init()
1248 static void __exit vmballoon_debugfs_exit(struct vmballoon *b) in vmballoon_debugfs_exit() argument
1250 debugfs_remove(b->dbg_entry); in vmballoon_debugfs_exit()
1255 static inline int vmballoon_debugfs_init(struct vmballoon *b) in vmballoon_debugfs_init() argument
1260 static inline void vmballoon_debugfs_exit(struct vmballoon *b) in vmballoon_debugfs_exit() argument