Home
last modified time | relevance | path

Searched refs:cap (Results 1 – 12 of 12) sorted by relevance

/kernel/
Dcapability.c171 kdata[i].effective = pE.cap[i]; in SYSCALL_DEFINE2()
172 kdata[i].permitted = pP.cap[i]; in SYSCALL_DEFINE2()
173 kdata[i].inheritable = pI.cap[i]; in SYSCALL_DEFINE2()
250 effective.cap[i] = kdata[i].effective; in SYSCALL_DEFINE2()
251 permitted.cap[i] = kdata[i].permitted; in SYSCALL_DEFINE2()
252 inheritable.cap[i] = kdata[i].inheritable; in SYSCALL_DEFINE2()
255 effective.cap[i] = 0; in SYSCALL_DEFINE2()
256 permitted.cap[i] = 0; in SYSCALL_DEFINE2()
257 inheritable.cap[i] = 0; in SYSCALL_DEFINE2()
261 effective.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK; in SYSCALL_DEFINE2()
[all …]
Dumh.c519 cap_array[i] = usermodehelper_bset.cap[i]; in proc_cap_handler()
521 cap_array[i] = usermodehelper_inheritable.cap[i]; in proc_cap_handler()
543 new_cap.cap[i] = cap_array[i]; in proc_cap_handler()
Dauditsc.c1292 kernel_cap_t *cap) in audit_log_cap() argument
1296 if (cap_isclear(*cap)) { in audit_log_cap()
1302 audit_log_format(ab, "%08x", cap->cap[CAP_LAST_U32 - i]); in audit_log_cap()
1453 audit_log_cap(ab, "cap_pi", &context->capset.cap.inheritable); in show_special()
1454 audit_log_cap(ab, "cap_pp", &context->capset.cap.permitted); in show_special()
1455 audit_log_cap(ab, "cap_pe", &context->capset.cap.effective); in show_special()
1456 audit_log_cap(ab, "cap_pa", &context->capset.cap.ambient); in show_special()
2843 context->capset.cap.effective = new->cap_effective; in __audit_log_capset()
2844 context->capset.cap.inheritable = new->cap_effective; in __audit_log_capset()
2845 context->capset.cap.permitted = new->cap_permitted; in __audit_log_capset()
[all …]
Daudit.h191 struct audit_cap_data cap; member
/kernel/sched/
Dcpudeadline.c123 unsigned long cap, max_cap = 0; in cpudl_find() local
134 cap = capacity_orig_of(cpu); in cpudl_find()
136 if (cap > max_cap || in cpudl_find()
137 (cpu == task_cpu(p) && cap == max_cap)) { in cpudl_find()
138 max_cap = cap; in cpudl_find()
Ddeadline.c131 unsigned long cap = 0; in __dl_bw_capacity() local
135 cap += capacity_orig_of(i); in __dl_bw_capacity()
137 return cap; in __dl_bw_capacity()
227 __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw) in __dl_overflow() argument
230 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; in __dl_overflow()
2848 unsigned long cap; in sched_dl_overflow() local
2864 cap = dl_bw_capacity(cpu); in sched_dl_overflow()
2867 !__dl_overflow(dl_b, cap, 0, new_bw)) { in sched_dl_overflow()
2873 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { in sched_dl_overflow()
3026 unsigned long flags, cap; in dl_cpuset_cpumask_can_shrink() local
[all …]
Dsched.h2960 unsigned long cap = arch_scale_cpu_capacity(cpu); in dl_task_fits_capacity() local
2962 return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT); in dl_task_fits_capacity()
Dfair.c163 #define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024) argument
844 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; in post_init_entity_util_avg() local
861 if (cap > 0) { in post_init_entity_util_avg()
866 if (sa->util_avg > cap) in post_init_entity_util_avg()
867 sa->util_avg = cap; in post_init_entity_util_avg()
869 sa->util_avg = cap; in post_init_entity_util_avg()
/kernel/cgroup/
Dmisc.c325 unsigned long cap; in misc_cg_capacity_show() local
328 cap = READ_ONCE(misc_res_capacity[i]); in misc_cg_capacity_show()
329 if (cap) in misc_cg_capacity_show()
330 seq_printf(sf, "%s %lu\n", misc_res_name[i], cap); in misc_cg_capacity_show()
/kernel/power/
Denergy_model.c341 unsigned long cap, prev_cap = 0; in em_dev_register_perf_domain() local
377 cap = arch_scale_cpu_capacity(cpu); in em_dev_register_perf_domain()
378 if (prev_cap && prev_cap != cap) { in em_dev_register_perf_domain()
385 prev_cap = cap; in em_dev_register_perf_domain()
/kernel/rcu/
Dtree.c4821 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; in rcu_init_geometry() local
4822 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap); in rcu_init_geometry()
/kernel/bpf/
Dverifier.c3778 u32 cap = bpf_map_flags_to_cap(map); in check_map_access_type() local
3780 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { in check_map_access_type()
3786 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { in check_map_access_type()