Home
last modified time | relevance | path

Searched refs:cap (Results 1 – 12 of 12) sorted by relevance

/kernel/
Dcapability.c171 kdata[i].effective = pE.cap[i]; in SYSCALL_DEFINE2()
172 kdata[i].permitted = pP.cap[i]; in SYSCALL_DEFINE2()
173 kdata[i].inheritable = pI.cap[i]; in SYSCALL_DEFINE2()
250 effective.cap[i] = kdata[i].effective; in SYSCALL_DEFINE2()
251 permitted.cap[i] = kdata[i].permitted; in SYSCALL_DEFINE2()
252 inheritable.cap[i] = kdata[i].inheritable; in SYSCALL_DEFINE2()
255 effective.cap[i] = 0; in SYSCALL_DEFINE2()
256 permitted.cap[i] = 0; in SYSCALL_DEFINE2()
257 inheritable.cap[i] = 0; in SYSCALL_DEFINE2()
261 effective.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK; in SYSCALL_DEFINE2()
[all …]
Dumh.c507 cap_array[i] = usermodehelper_bset.cap[i]; in proc_cap_handler()
509 cap_array[i] = usermodehelper_inheritable.cap[i]; in proc_cap_handler()
531 new_cap.cap[i] = cap_array[i]; in proc_cap_handler()
Dauditsc.c1196 kernel_cap_t *cap) in audit_log_cap() argument
1200 if (cap_isclear(*cap)) { in audit_log_cap()
1206 audit_log_format(ab, "%08x", cap->cap[CAP_LAST_U32 - i]); in audit_log_cap()
1357 audit_log_cap(ab, "cap_pi", &context->capset.cap.inheritable); in show_special()
1358 audit_log_cap(ab, "cap_pp", &context->capset.cap.permitted); in show_special()
1359 audit_log_cap(ab, "cap_pe", &context->capset.cap.effective); in show_special()
1360 audit_log_cap(ab, "cap_pa", &context->capset.cap.ambient); in show_special()
2587 context->capset.cap.effective = new->cap_effective; in __audit_log_capset()
2588 context->capset.cap.inheritable = new->cap_effective; in __audit_log_capset()
2589 context->capset.cap.permitted = new->cap_permitted; in __audit_log_capset()
[all …]
Daudit.h185 struct audit_cap_data cap; member
/kernel/sched/
Dcpudeadline.c124 unsigned long cap, max_cap = 0; in cpudl_find() local
135 cap = capacity_orig_of(cpu); in cpudl_find()
137 if (cap > max_cap || in cpudl_find()
138 (cpu == task_cpu(p) && cap == max_cap)) { in cpudl_find()
139 max_cap = cap; in cpudl_find()
Ddeadline.c100 unsigned long cap = 0; in __dl_bw_capacity() local
106 cap += capacity_orig_of(i); in __dl_bw_capacity()
108 return cap; in __dl_bw_capacity()
2706 unsigned long cap; in sched_dl_overflow() local
2722 cap = dl_bw_capacity(cpu); in sched_dl_overflow()
2725 !__dl_overflow(dl_b, cap, 0, new_bw)) { in sched_dl_overflow()
2731 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { in sched_dl_overflow()
2929 unsigned long cap = dl_bw_capacity(cpu); in dl_bw_manage() local
2931 overflow = __dl_overflow(dl_b, cap, 0, dl_bw); in dl_bw_manage()
Dsched.h323 static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap, in __dl_overflow() argument
327 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; in __dl_overflow()
340 unsigned long cap = arch_scale_cpu_capacity(cpu); in dl_task_fits_capacity() local
342 return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime; in dl_task_fits_capacity()
Dfair.c120 #define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024) argument
784 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; in post_init_entity_util_avg() local
786 if (cap > 0) { in post_init_entity_util_avg()
791 if (sa->util_avg > cap) in post_init_entity_util_avg()
792 sa->util_avg = cap; in post_init_entity_util_avg()
794 sa->util_avg = cap; in post_init_entity_util_avg()
/kernel/cgroup/
Dmisc.c327 unsigned long cap; in misc_cg_capacity_show() local
330 cap = READ_ONCE(misc_res_capacity[i]); in misc_cg_capacity_show()
331 if (cap) in misc_cg_capacity_show()
332 seq_printf(sf, "%s %lu\n", misc_res_name[i], cap); in misc_cg_capacity_show()
/kernel/power/
Denergy_model.c278 unsigned long cap, prev_cap = 0; in em_dev_register_perf_domain() local
318 cap = arch_scale_cpu_capacity(cpu); in em_dev_register_perf_domain()
319 if (prev_cap && prev_cap != cap) { in em_dev_register_perf_domain()
326 prev_cap = cap; in em_dev_register_perf_domain()
/kernel/rcu/
Dtree.c4782 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; in rcu_init_geometry() local
4783 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap); in rcu_init_geometry()
/kernel/bpf/
Dverifier.c3353 u32 cap = bpf_map_flags_to_cap(map); in check_map_access_type() local
3355 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { in check_map_access_type()
3361 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { in check_map_access_type()