/tools/testing/selftests/kvm/x86_64/ |
D | platform_info_test.c | 40 struct kvm_enable_cap cap = {}; in set_msr_platform_info_enabled() local 42 cap.cap = KVM_CAP_MSR_PLATFORM_INFO; in set_msr_platform_info_enabled() 43 cap.flags = 0; in set_msr_platform_info_enabled() 44 cap.args[0] = (int)enable; in set_msr_platform_info_enabled() 45 vm_enable_cap(vm, &cap); in set_msr_platform_info_enabled()
|
D | kvm_pv_test.c | 209 struct kvm_enable_cap cap = {0}; in main() local 220 cap.cap = KVM_CAP_ENFORCE_PV_FEATURE_CPUID; in main() 221 cap.args[0] = 1; in main() 222 vcpu_enable_cap(vm, VCPU_ID, &cap); in main()
|
D | hyperv_features.c | 183 struct kvm_enable_cap cap = {0}; in guest_test_msrs_access() local 317 cap.cap = KVM_CAP_HYPERV_SYNIC2; in guest_test_msrs_access() 318 vcpu_enable_cap(vm, VCPU_ID, &cap); in guest_test_msrs_access() 630 struct kvm_enable_cap cap = { in main() local 631 .cap = KVM_CAP_HYPERV_ENFORCE_CPUID, in main() 641 vcpu_enable_cap(vm, VCPU_ID, &cap); in main() 671 vcpu_enable_cap(vm, VCPU_ID, &cap); in main()
|
D | userspace_msr_exit_test.c | 554 struct kvm_enable_cap cap = { in test_msr_filter_allow() local 555 .cap = KVM_CAP_X86_USER_SPACE_MSR, in test_msr_filter_allow() 567 vm_enable_cap(vm, &cap); in test_msr_filter_allow() 677 struct kvm_enable_cap cap = { in test_msr_filter_deny() local 678 .cap = KVM_CAP_X86_USER_SPACE_MSR, in test_msr_filter_deny() 694 vm_enable_cap(vm, &cap); in test_msr_filter_deny() 730 struct kvm_enable_cap cap = { in test_msr_permission_bitmap() local 731 .cap = KVM_CAP_X86_USER_SPACE_MSR, in test_msr_permission_bitmap() 743 vm_enable_cap(vm, &cap); in test_msr_permission_bitmap()
|
D | sync_regs_test.c | 92 int rv, cap; in main() local 97 cap = kvm_check_cap(KVM_CAP_SYNC_REGS); in main() 98 if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) { in main() 102 if ((cap & INVALID_SYNC_FIELD) != 0) { in main()
|
/tools/power/x86/x86_energy_perf_policy/ |
D | x86_energy_perf_policy.c | 769 void print_hwp_cap(int cpu, struct msr_hwp_cap *cap, char *str) in print_hwp_cap() argument 775 cap->lowest, cap->efficient, cap->guaranteed, cap->highest); in print_hwp_cap() 777 void read_hwp_cap(int cpu, struct msr_hwp_cap *cap, unsigned int msr_offset) in read_hwp_cap() argument 783 cap->highest = msr_perf_2_ratio(HWP_HIGHEST_PERF(msr)); in read_hwp_cap() 784 cap->guaranteed = msr_perf_2_ratio(HWP_GUARANTEED_PERF(msr)); in read_hwp_cap() 785 cap->efficient = msr_perf_2_ratio(HWP_MOSTEFFICIENT_PERF(msr)); in read_hwp_cap() 786 cap->lowest = msr_perf_2_ratio(HWP_LOWEST_PERF(msr)); in read_hwp_cap() 895 struct msr_hwp_cap cap; in print_cpu_msrs() local 908 read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES); in print_cpu_msrs() 909 print_hwp_cap(cpu, &cap, ""); in print_cpu_msrs() [all …]
|
/tools/perf/util/ |
D | cap.h | 13 bool perf_cap__capable(cap_value_t cap); 20 static inline bool perf_cap__capable(int cap __maybe_unused) in perf_cap__capable()
|
D | cap.c | 12 bool perf_cap__capable(cap_value_t cap) in perf_cap__capable() argument 20 if (cap_get_flag(caps, cap, CAP_EFFECTIVE, &val) != 0) in perf_cap__capable()
|
D | hashmap.c | 46 map->cap = 0; in hashmap__init() 73 map->cap = map->cap_bits = map->sz = 0; in hashmap__clear() 92 return map->cap; in hashmap__capacity() 98 return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap); in hashmap_needs_to_grow() 122 map->cap = new_cap; in hashmap_grow()
|
D | hashmap.h | 58 size_t cap; member 68 .cap = 0, \ 156 for (bkt = 0; bkt < map->cap; bkt++) \ 168 for (bkt = 0; bkt < map->cap; bkt++) \
|
D | python-ext-sources | 10 util/cap.c
|
/tools/testing/selftests/clone3/ |
D | clone3_cap_checkpoint_restore.c | 102 struct libcap *cap; in set_capability() local 121 cap = (struct libcap *) caps; in set_capability() 124 cap->data[1].effective |= 1 << (40 - 32); in set_capability() 125 cap->data[1].permitted |= 1 << (40 - 32); in set_capability()
|
/tools/lib/bpf/ |
D | hashmap.c | 46 map->cap = 0; in hashmap__init() 73 map->cap = map->cap_bits = map->sz = 0; in hashmap__clear() 92 return map->cap; in hashmap__capacity() 98 return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap); in hashmap_needs_to_grow() 122 map->cap = new_cap; in hashmap_grow()
|
D | hashmap.h | 58 size_t cap; member 68 .cap = 0, \ 156 for (bkt = 0; bkt < map->cap; bkt++) \ 168 for (bkt = 0; bkt < map->cap; bkt++) \
|
/tools/testing/selftests/net/mptcp/ |
D | mptcp_connect.c | 420 size_t cap = rand(); in do_rnd_read() local 422 cap &= 0xffff; in do_rnd_read() 424 if (cap == 0) in do_rnd_read() 425 cap = 1; in do_rnd_read() 426 else if (cap > len) in do_rnd_read() 427 cap = len; in do_rnd_read() 430 ret = recv(fd, buf, cap, MSG_PEEK); in do_rnd_read() 433 ret = recv(fd, buf, cap, MSG_PEEK); in do_rnd_read() 434 ret = (ret < 0) ? ret : read(fd, buf, cap); in do_rnd_read() 436 ret = do_recvmsg_cmsg(fd, buf, cap); in do_rnd_read() [all …]
|
/tools/testing/selftests/kvm/ |
D | dirty_log_perf_test.c | 185 struct kvm_enable_cap cap = {}; in run_test() local 201 cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2; in run_test() 202 cap.args[0] = dirty_log_manual_caps; in run_test() 203 vm_enable_cap(vm, &cap); in run_test()
|
D | dirty_log_test.c | 220 struct kvm_enable_cap cap = {}; in clear_log_create_vm_done() local 227 cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2; in clear_log_create_vm_done() 228 cap.args[0] = manual_caps; in clear_log_create_vm_done() 229 vm_enable_cap(vm, &cap); in clear_log_create_vm_done()
|
/tools/testing/selftests/kvm/s390x/ |
D | sync_regs_test.c | 83 int rv, cap; in main() local 88 cap = kvm_check_cap(KVM_CAP_SYNC_REGS); in main() 89 if (!cap) { in main()
|
/tools/testing/selftests/vm/ |
D | mlock-random-test.c | 28 cap_t cap = cap_init(); in set_cap_limits() local 38 if (cap_set_proc(cap)) { in set_cap_limits()
|
D | memfd_secret.c | 214 cap_t cap = cap_init(); in set_cap_limits() local 224 if (cap_set_proc(cap)) { in set_cap_limits()
|
/tools/testing/selftests/bpf/ |
D | test_verifier.c | 947 struct libcap *cap; in set_admin() local 955 cap = (struct libcap *)caps; in set_admin() 969 cap->data[1].effective |= 1 << (38 /* CAP_PERFMON */ - 32); in set_admin() 970 cap->data[1].effective |= 1 << (39 /* CAP_BPF */ - 32); in set_admin() 972 cap->data[1].effective &= ~(1 << (38 - 32)); in set_admin() 973 cap->data[1].effective &= ~(1 << (39 - 32)); in set_admin() 1252 struct libcap *cap; in is_admin() local 1266 cap = (struct libcap *)caps; in is_admin() 1267 bpf_priv = cap->data[1].effective & (1 << (39/* CAP_BPF */ - 32)); in is_admin() 1268 perfmon_priv = cap->data[1].effective & (1 << (38/* CAP_PERFMON */ - 32)); in is_admin()
|
/tools/bpf/bpftool/ |
D | feature.c | 782 cap_value_t cap; in handle_perms() member 814 cap_value_t cap = bpf_caps[i].cap; in handle_perms() local 816 if (cap_get_flag(caps, cap, CAP_EFFECTIVE, &val)) { in handle_perms() 824 cap_list[nb_bpf_caps++] = cap; in handle_perms()
|
/tools/testing/selftests/tpm2/ |
D | tpm2.py | 688 def __get_cap_cnt(self, cap, pt, cnt): argument 696 cap, pt, cnt) 699 more_data, cap, cnt = struct.unpack('>BII', rsp[:9]) 709 def get_cap(self, cap, pt): argument 714 next_handles, more_data = self.__get_cap_cnt(cap, pt, 1)
|
/tools/testing/selftests/kvm/lib/ |
D | kvm_util.c | 78 int kvm_check_cap(long cap) in kvm_check_cap() argument 84 ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap); in kvm_check_cap() 105 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) in vm_enable_cap() argument 109 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); in vm_enable_cap() 130 struct kvm_enable_cap *cap) in vcpu_enable_cap() argument 137 r = ioctl(vcpu->fd, KVM_ENABLE_CAP, cap); in vcpu_enable_cap() 146 struct kvm_enable_cap cap = { 0 }; in vm_enable_dirty_ring() local 148 cap.cap = KVM_CAP_DIRTY_LOG_RING; in vm_enable_dirty_ring() 149 cap.args[0] = ring_size; in vm_enable_dirty_ring() 150 vm_enable_cap(vm, &cap); in vm_enable_dirty_ring()
|
/tools/testing/selftests/kvm/include/ |
D | kvm_util.h | 84 int kvm_check_cap(long cap); 85 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap); 87 struct kvm_enable_cap *cap);
|