1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * Copyright 2016 Red Hat, Inc. and/or its affiliates.
6 */
7 #include <linux/kvm_host.h>
8 #include <linux/debugfs.h>
9 #include "lapic.h"
10 #include "mmu.h"
11 #include "mmu/mmu_internal.h"
12
vcpu_get_timer_advance_ns(void * data,u64 * val)13 static int vcpu_get_timer_advance_ns(void *data, u64 *val)
14 {
15 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
16 *val = vcpu->arch.apic->lapic_timer.timer_advance_ns;
17 return 0;
18 }
19
20 DEFINE_SIMPLE_ATTRIBUTE(vcpu_timer_advance_ns_fops, vcpu_get_timer_advance_ns, NULL, "%llu\n");
21
vcpu_get_guest_mode(void * data,u64 * val)22 static int vcpu_get_guest_mode(void *data, u64 *val)
23 {
24 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
25 *val = vcpu->stat.guest_mode;
26 return 0;
27 }
28
29 DEFINE_SIMPLE_ATTRIBUTE(vcpu_guest_mode_fops, vcpu_get_guest_mode, NULL, "%lld\n");
30
vcpu_get_tsc_offset(void * data,u64 * val)31 static int vcpu_get_tsc_offset(void *data, u64 *val)
32 {
33 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
34 *val = vcpu->arch.tsc_offset;
35 return 0;
36 }
37
38 DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_offset_fops, vcpu_get_tsc_offset, NULL, "%lld\n");
39
vcpu_get_tsc_scaling_ratio(void * data,u64 * val)40 static int vcpu_get_tsc_scaling_ratio(void *data, u64 *val)
41 {
42 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
43 *val = vcpu->arch.tsc_scaling_ratio;
44 return 0;
45 }
46
47 DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_scaling_fops, vcpu_get_tsc_scaling_ratio, NULL, "%llu\n");
48
vcpu_get_tsc_scaling_frac_bits(void * data,u64 * val)49 static int vcpu_get_tsc_scaling_frac_bits(void *data, u64 *val)
50 {
51 *val = kvm_tsc_scaling_ratio_frac_bits;
52 return 0;
53 }
54
55 DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_scaling_frac_fops, vcpu_get_tsc_scaling_frac_bits, NULL, "%llu\n");
56
kvm_arch_create_vcpu_debugfs(struct kvm_vcpu * vcpu,struct dentry * debugfs_dentry)57 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
58 {
59 debugfs_create_file("guest_mode", 0444, debugfs_dentry, vcpu,
60 &vcpu_guest_mode_fops);
61 debugfs_create_file("tsc-offset", 0444, debugfs_dentry, vcpu,
62 &vcpu_tsc_offset_fops);
63
64 if (lapic_in_kernel(vcpu))
65 debugfs_create_file("lapic_timer_advance_ns", 0444,
66 debugfs_dentry, vcpu,
67 &vcpu_timer_advance_ns_fops);
68
69 if (kvm_has_tsc_control) {
70 debugfs_create_file("tsc-scaling-ratio", 0444,
71 debugfs_dentry, vcpu,
72 &vcpu_tsc_scaling_fops);
73 debugfs_create_file("tsc-scaling-ratio-frac-bits", 0444,
74 debugfs_dentry, vcpu,
75 &vcpu_tsc_scaling_frac_fops);
76 }
77 }
78
79 /*
80 * This covers statistics <1024 (11=log(1024)+1), which should be enough to
81 * cover RMAP_RECYCLE_THRESHOLD.
82 */
83 #define RMAP_LOG_SIZE 11
84
85 static const char *kvm_lpage_str[KVM_NR_PAGE_SIZES] = { "4K", "2M", "1G" };
86
kvm_mmu_rmaps_stat_show(struct seq_file * m,void * v)87 static int kvm_mmu_rmaps_stat_show(struct seq_file *m, void *v)
88 {
89 struct kvm_rmap_head *rmap;
90 struct kvm *kvm = m->private;
91 struct kvm_memory_slot *slot;
92 struct kvm_memslots *slots;
93 unsigned int lpage_size, index;
94 /* Still small enough to be on the stack */
95 unsigned int *log[KVM_NR_PAGE_SIZES], *cur;
96 int i, j, k, l, ret;
97
98 if (!kvm_memslots_have_rmaps(kvm))
99 return 0;
100
101 ret = -ENOMEM;
102 memset(log, 0, sizeof(log));
103 for (i = 0; i < KVM_NR_PAGE_SIZES; i++) {
104 log[i] = kcalloc(RMAP_LOG_SIZE, sizeof(unsigned int), GFP_KERNEL);
105 if (!log[i])
106 goto out;
107 }
108
109 mutex_lock(&kvm->slots_lock);
110 write_lock(&kvm->mmu_lock);
111
112 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
113 slots = __kvm_memslots(kvm, i);
114 for (j = 0; j < slots->used_slots; j++) {
115 slot = &slots->memslots[j];
116 for (k = 0; k < KVM_NR_PAGE_SIZES; k++) {
117 rmap = slot->arch.rmap[k];
118 lpage_size = kvm_mmu_slot_lpages(slot, k + 1);
119 cur = log[k];
120 for (l = 0; l < lpage_size; l++) {
121 index = ffs(pte_list_count(&rmap[l]));
122 if (WARN_ON_ONCE(index >= RMAP_LOG_SIZE))
123 index = RMAP_LOG_SIZE - 1;
124 cur[index]++;
125 }
126 }
127 }
128 }
129
130 write_unlock(&kvm->mmu_lock);
131 mutex_unlock(&kvm->slots_lock);
132
133 /* index=0 counts no rmap; index=1 counts 1 rmap */
134 seq_printf(m, "Rmap_Count:\t0\t1\t");
135 for (i = 2; i < RMAP_LOG_SIZE; i++) {
136 j = 1 << (i - 1);
137 k = (1 << i) - 1;
138 seq_printf(m, "%d-%d\t", j, k);
139 }
140 seq_printf(m, "\n");
141
142 for (i = 0; i < KVM_NR_PAGE_SIZES; i++) {
143 seq_printf(m, "Level=%s:\t", kvm_lpage_str[i]);
144 cur = log[i];
145 for (j = 0; j < RMAP_LOG_SIZE; j++)
146 seq_printf(m, "%d\t", cur[j]);
147 seq_printf(m, "\n");
148 }
149
150 ret = 0;
151 out:
152 for (i = 0; i < KVM_NR_PAGE_SIZES; i++)
153 kfree(log[i]);
154
155 return ret;
156 }
157
kvm_mmu_rmaps_stat_open(struct inode * inode,struct file * file)158 static int kvm_mmu_rmaps_stat_open(struct inode *inode, struct file *file)
159 {
160 struct kvm *kvm = inode->i_private;
161
162 if (!kvm_get_kvm_safe(kvm))
163 return -ENOENT;
164
165 return single_open(file, kvm_mmu_rmaps_stat_show, kvm);
166 }
167
kvm_mmu_rmaps_stat_release(struct inode * inode,struct file * file)168 static int kvm_mmu_rmaps_stat_release(struct inode *inode, struct file *file)
169 {
170 struct kvm *kvm = inode->i_private;
171
172 kvm_put_kvm(kvm);
173
174 return single_release(inode, file);
175 }
176
177 static const struct file_operations mmu_rmaps_stat_fops = {
178 .open = kvm_mmu_rmaps_stat_open,
179 .read = seq_read,
180 .llseek = seq_lseek,
181 .release = kvm_mmu_rmaps_stat_release,
182 };
183
kvm_arch_create_vm_debugfs(struct kvm * kvm)184 int kvm_arch_create_vm_debugfs(struct kvm *kvm)
185 {
186 debugfs_create_file("mmu_rmaps_stat", 0644, kvm->debugfs_dentry, kvm,
187 &mmu_rmaps_stat_fops);
188 return 0;
189 }
190