| /kernel/cgroup/ |
| D | pids.c | 57 atomic64_t limit; member 87 atomic64_set(&pids->limit, PIDS_MAX); in pids_css_alloc() 172 int64_t limit = atomic64_read(&p->limit); in pids_try_charge() local 179 if (new > limit) { in pids_try_charge() 306 int64_t limit; in pids_max_write() local 311 limit = PIDS_MAX; in pids_max_write() 315 err = kstrtoll(buf, 0, &limit); in pids_max_write() 319 if (limit < 0 || limit >= PIDS_MAX) in pids_max_write() 327 atomic64_set(&pids->limit, limit); in pids_max_write() 335 int64_t limit = atomic64_read(&pids->limit); in pids_max_show() local [all …]
|
| /kernel/ |
| D | kexec_core.c | 873 int limit; member 878 .limit = -1, 883 .limit = -1, 894 struct kexec_load_limit *limit = table->data; in kexec_limit_handler() local 911 mutex_lock(&limit->mutex); in kexec_limit_handler() 912 if (limit->limit != -1 && val >= limit->limit) in kexec_limit_handler() 915 limit->limit = val; in kexec_limit_handler() 916 mutex_unlock(&limit->mutex); in kexec_limit_handler() 921 mutex_lock(&limit->mutex); in kexec_limit_handler() 922 val = limit->limit; in kexec_limit_handler() [all …]
|
| D | panic.c | 240 unsigned int limit; in check_panic_on_warn() local 245 limit = READ_ONCE(warn_limit); in check_panic_on_warn() 246 if (atomic_inc_return(&warn_count) >= limit && limit) in check_panic_on_warn() 248 origin, limit); in check_panic_on_warn()
|
| D | exit.c | 1011 unsigned int limit; in make_task_dead() local 1040 limit = READ_ONCE(oops_limit); in make_task_dead() 1041 if (atomic_inc_return(&oops_count) >= limit && limit) in make_task_dead() 1042 panic("Oopsed too often (kernel.oops_limit is %d)", limit); in make_task_dead()
|
| D | user_namespace.c | 64 unsigned long limit = RLIM_INFINITY; in enforced_nproc_rlimit() local 69 limit = rlimit(RLIMIT_NPROC); in enforced_nproc_rlimit() 71 return limit; in enforced_nproc_rlimit()
|
| D | audit.c | 428 static int audit_set_rate_limit(u32 limit) in audit_set_rate_limit() argument 430 return audit_do_config_change("audit_rate_limit", &audit_rate_limit, limit); in audit_set_rate_limit() 433 static int audit_set_backlog_limit(u32 limit) in audit_set_backlog_limit() argument 435 return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, limit); in audit_set_backlog_limit()
|
| D | signal.c | 3320 unsigned char limit, layout; member 3341 if (si_code <= sig_sicodes[sig].limit) in known_siginfo_layout() 3359 (si_code <= sig_sicodes[sig].limit)) { in siginfo_layout()
|
| /kernel/dma/ |
| D | contiguous.c | 212 void __init dma_contiguous_reserve(phys_addr_t limit) in dma_contiguous_reserve() argument 216 phys_addr_t selected_limit = limit; in dma_contiguous_reserve() 221 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); in dma_contiguous_reserve() 226 selected_limit = min_not_zero(limit_cmdline, limit); in dma_contiguous_reserve() 275 phys_addr_t limit, struct cma **res_cma, in dma_contiguous_reserve_area() argument 281 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, in dma_contiguous_reserve_area()
|
| D | debug.c | 356 int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1); in bucket_find_contain() local 358 for (int i = 0; i < limit; i++) { in bucket_find_contain()
|
| /kernel/bpf/ |
| D | lpm_trie.c | 170 u32 limit = min(node->prefixlen, key->prefixlen); in __longest_prefix_match() local 186 if (prefixlen >= limit) in __longest_prefix_match() 187 return limit; in __longest_prefix_match() 199 if (prefixlen >= limit) in __longest_prefix_match() 200 return limit; in __longest_prefix_match() 211 if (prefixlen >= limit) in __longest_prefix_match() 212 return limit; in __longest_prefix_match() 221 if (prefixlen >= limit) in __longest_prefix_match() 222 return limit; in __longest_prefix_match()
|
| /kernel/futex/ |
| D | core.c | 808 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; in exit_robust_list() local 854 if (!--limit) in exit_robust_list() 902 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; in compat_exit_robust_list() local 954 if (!--limit) in compat_exit_robust_list()
|
| /kernel/debug/ |
| D | gdbstub.c | 407 unsigned char *limit; in pack_threadid() local 410 limit = id + (BUF_THREAD_ID_SIZE / 2); in pack_threadid() 411 while (id < limit) { in pack_threadid()
|
| /kernel/livepatch/ |
| D | core.c | 1200 struct klp_patch *limit) in klp_cleanup_module_patches_limited() argument 1206 if (patch == limit) in klp_cleanup_module_patches_limited()
|
| /kernel/time/ |
| D | posix-cpu-timers.c | 841 static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard) in check_rlimit() argument 843 if (time < limit) in check_rlimit()
|
| /kernel/rcu/ |
| D | Kconfig | 346 jiffy, and overrides the 32-callback batching if this limit 349 Say Y here if you need tighter callback-limit enforcement.
|
| /kernel/irq/ |
| D | msi.c | 111 struct xa_limit limit = { .min = 0, .max = hwsize - 1 }; in msi_insert_desc() local 115 ret = xa_alloc(xa, &index, desc, limit, GFP_KERNEL); in msi_insert_desc()
|
| /kernel/power/ |
| D | Kconfig | 195 int "Maximum number of user space wakeup sources (0 = no limit)"
|
| /kernel/trace/ |
| D | Kconfig | 971 This defines the limit of number of functions that can be 974 This file can be reset, but the limit can not change in
|
| /kernel/sched/ |
| D | fair.c | 728 s64 vlag, limit; in entity_lag() local 731 limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se); in entity_lag() 733 return clamp(vlag, -limit, limit); in entity_lag()
|