/kernel/sched/ |
D | cpufreq_schedutil.c | 57 unsigned long max; member 170 unsigned long util, unsigned long max) in get_next_freq() argument 176 freq = map_util_freq(util, freq, max); in get_next_freq() 207 unsigned long max, enum schedutil_type type, in schedutil_cpu_util() argument 215 return max; in schedutil_cpu_util() 224 if (unlikely(irq >= max)) in schedutil_cpu_util() 225 return max; in schedutil_cpu_util() 254 if (util + dl_util >= max) in schedutil_cpu_util() 255 return max; in schedutil_cpu_util() 273 util = scale_irq_capacity(util, irq, max); in schedutil_cpu_util() [all …]
|
D | cputime.c | 262 static inline u64 account_other_time(u64 max) in account_other_time() argument 268 accounted = steal_account_process_time(max); in account_other_time() 270 if (accounted < max) in account_other_time() 271 accounted += irqtime_tick_accounted(max - accounted); in account_other_time()
|
D | sched.h | 2341 min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN)); in uclamp_rq_util_with() 2342 max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX)); in uclamp_rq_util_with() 2425 unsigned long max, enum schedutil_type type, 2456 unsigned long max, enum schedutil_type type, in schedutil_cpu_util() argument 2470 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) in scale_irq_capacity() argument 2472 util *= (max - irq); in scale_irq_capacity() 2473 util /= max; in scale_irq_capacity() 2485 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) in scale_irq_capacity() argument
|
D | fair.c | 103 #define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024) argument 849 max(delta_exec, curr->statistics.exec_max)); in update_curr() 917 max(schedstat_val(se->statistics.wait_max), delta)); in update_stats_wait_end() 1161 return max(smin, period); in task_scan_start() 1184 smax = max(smax, period); in task_scan_max() 1187 return max(smin, smax); in task_scan_max() 1994 int ratio = max(lr_ratio, ps_ratio); in update_task_scan_period() 2576 start = max(start, vma->vm_start); in task_numa_work() 3001 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); in calc_group_shares() 3059 load_avg = max(cfs_rq->avg.load_avg, in calc_group_runnable() [all …]
|
D | stop_task.c | 76 max(curr->se.statistics.exec_max, delta_exec)); in put_prev_task_stop()
|
/kernel/ |
D | latencytop.c | 120 if (lat->time > latency_record[i].max) in account_global_scheduler_latency() 121 latency_record[i].max = lat->time; in account_global_scheduler_latency() 169 lat.max = usecs; in __account_scheduler_latency() 197 if (lat.time > mylat->max) in __account_scheduler_latency() 198 mylat->max = lat.time; in __account_scheduler_latency() 229 lr->count, lr->time, lr->max); in lstats_show()
|
D | ucount.c | 196 int max; in inc_ucount() local 198 max = READ_ONCE(tns->ucount_max[type]); in inc_ucount() 199 if (!atomic_inc_below(&iter->ucount[type], max)) in inc_ucount()
|
D | resource.c | 48 resource_size_t min, max, align; member 385 res->start = max(start, p->start); in find_next_iomem_res() 580 resource_size_t max) in resource_clip() argument 584 if (res->end > max) in resource_clip() 585 res->end = max; in resource_clip() 618 resource_clip(&tmp, constraint->min, constraint->max); in __find_resource() 721 resource_size_t max, resource_size_t align, in allocate_resource() argument 735 constraint.max = max; in allocate_resource()
|
D | params.c | 397 unsigned int min, unsigned int max, in param_array() argument 417 if (*num == max) { in param_array() 418 pr_err("%s: can only take %i arguments\n", name, max); in param_array() 448 return param_array(kp->mod, kp->name, val, 1, arr->max, arr->elem, in param_array_set() 459 for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) { in param_array_get() 480 for (i = 0; i < (arr->num ? *arr->num : arr->max); i++) in param_array_free()
|
D | range.c | 43 common_start = max(range[i].start, start); in add_range_with_merge() 50 end = max(range[i].end, end); in add_range_with_merge()
|
D | sysctl.c | 2670 int *max; member 2691 (param->max && *param->max < tmp)) in do_proc_dointvec_minmax_conv() 2720 .max = (int *) table->extra2, in proc_dointvec_minmax() 2737 unsigned int *max; member 2756 (param->max && *param->max < tmp)) in do_proc_douintvec_minmax_conv() 2789 .max = (unsigned int *) table->extra2, in proc_douintvec_minmax() 2862 unsigned long *i, *min, *max; in __do_proc_doulongvec_minmax() local 2874 max = (unsigned long *) table->extra2; in __do_proc_doulongvec_minmax() 2907 if ((min && val < *min) || (max && val > *max)) { in __do_proc_doulongvec_minmax()
|
D | kexec_file.c | 504 temp_start = max(start, kbuf->buf_min); in locate_mem_hole_bottom_up() 688 kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE); in kexec_add_buffer()
|
/kernel/time/ |
D | test_udelay.c | 30 int min = 0, max = 0, fail_count = 0; in udelay_test_single() local 48 if (i == 0 || time_passed > max) in udelay_test_single() 49 max = time_passed; in udelay_test_single() 60 (usecs * 1000) - allowed_error_ns, min, avg, max); in udelay_test_single()
|
D | ntp.c | 343 time_freq = max(freq_adj, -MAXFREQ_SCALED); in ntp_update_offset() 676 time_freq = max(time_freq, -MAXFREQ_SCALED); in process_adjtimex_modes() 692 time_constant = max(time_constant, 0l); in process_adjtimex_modes()
|
D | clocksource.c | 1151 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), in available_clocksource_show() 1157 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); in available_clocksource_show()
|
D | timekeeping.c | 221 u64 now, last, mask, max, delta; in timekeeping_get_delta() local 236 max = tkr->clock->max_cycles; in timekeeping_get_delta() 251 if (unlikely(delta > max)) { in timekeeping_get_delta() 2089 shift = max(0, shift); in timekeeping_advance()
|
/kernel/cgroup/ |
D | rdma.c | 44 int max; member 86 if (rpool->resources[index].max != S32_MAX) in set_resource_limit() 89 if (rpool->resources[index].max == S32_MAX) in set_resource_limit() 92 rpool->resources[index].max = new_max; in set_resource_limit() 285 if (new > rpool->resources[index].max) { in rdmacg_try_charge() 506 value = rpool->resources[i].max; in print_rpool_values()
|
/kernel/locking/ |
D | locktorture.c | 692 long max = 0, min = statp ? statp[0].n_lock_acquired : 0; in __torture_print_stats() local 700 if (max < statp[i].n_lock_fail) in __torture_print_stats() 701 max = statp[i].n_lock_fail; in __torture_print_stats() 708 sum, max, min, max / 2 > min ? "???" : "", in __torture_print_stats()
|
D | lockdep.c | 204 if (time > lt->max) in lock_time_inc() 205 lt->max = time; in lock_time_inc() 219 if (src->max > dst->max) in lock_time_add() 220 dst->max = src->max; in lock_time_add() 784 count = max(count, class->name_version); in count_matching_names()
|
/kernel/dma/ |
D | contiguous.c | 130 selected_size = max(size_bytes, cma_early_percent_memory()); in dma_contiguous_reserve() 301 phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); in rmem_cma_setup()
|
/kernel/bpf/ |
D | tnum.c | 21 struct tnum tnum_range(u64 min, u64 max) in tnum_range() argument 23 u64 chi = min ^ max, delta; in tnum_range()
|
D | verifier.c | 934 reg->umin_value = max(reg->umin_value, reg->var_off.value); in __update_reg_bounds() 4741 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); in adjust_scalar_min_max_vals() 5111 reg->range = max(reg->range, new_range); in __find_good_pkt_pointers() 5118 reg->range = max(reg->range, new_range); in __find_good_pkt_pointers() 5402 true_reg->umin_value = max(true_reg->umin_value, true_umin); in reg_set_min_max() 5417 true_reg->smin_value = max(true_reg->smin_value, true_smin); in reg_set_min_max() 5430 false_reg->umin_value = max(false_reg->umin_value, false_umin); in reg_set_min_max() 5442 false_reg->smin_value = max(false_reg->smin_value, false_smin); in reg_set_min_max() 5517 false_reg->umin_value = max(false_reg->umin_value, false_umin); in reg_set_min_max_inv() 5529 false_reg->smin_value = max(false_reg->smin_value, false_smin); in reg_set_min_max_inv() [all …]
|
/kernel/events/ |
D | uprobes.c | 1267 find_node_in_range(struct inode *inode, loff_t min, loff_t max) in find_node_in_range() argument 1279 if (max < u->offset) in find_node_in_range() 1299 loff_t min, max; in build_probe_list() local 1305 max = min + (end - start) - 1; in build_probe_list() 1308 n = find_node_in_range(inode, min, max); in build_probe_list() 1319 if (u->inode != inode || u->offset > max) in build_probe_list() 1407 loff_t min, max; in vma_has_uprobes() local 1414 max = min + (end - start) - 1; in vma_has_uprobes() 1417 n = find_node_in_range(inode, min, max); in vma_has_uprobes()
|
/kernel/trace/ |
D | trace_kdb.c | 136 skip_entries = max(cnt + skip_entries, 0); in kdb_ftdump()
|
D | Kconfig | 411 bool "Trace max stack" 698 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712 699 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337 700 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064 701 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411 702 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389 703 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
|