Searched refs:cost (Results 1 – 12 of 12) sorted by relevance
/kernel/power/ |
D | energy_model.c | 44 debugfs_create_ulong("cost", 0444, d, &ps->cost); in em_debug_create_ps() 159 table[i].cost = div64_u64(fmax * power_res, in em_create_perf_table() 161 if (table[i].cost >= prev_cost) { in em_create_perf_table() 165 prev_cost = table[i].cost; in em_create_perf_table()
|
D | Kconfig | 28 Some systems prefer not to pay this cost on every invocation 306 lower power usage at the cost of small performance overhead.
|
/kernel/bpf/ |
D | queue_stack_maps.c | 72 u64 size, queue_size, cost; in queue_stack_map_alloc() local 75 cost = queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc() 77 ret = bpf_map_charge_init(&mem, cost); in queue_stack_map_alloc()
|
D | ringbuf.c | 155 u64 cost; in ringbuf_map_alloc() local 178 cost = sizeof(struct bpf_ringbuf_map) + in ringbuf_map_alloc() 181 err = bpf_map_charge_init(&rb_map->map.memory, cost); in ringbuf_map_alloc()
|
D | bpf_local_storage.c | 556 u64 cost; in bpf_local_storage_map_alloc() local 568 cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap); in bpf_local_storage_map_alloc() 570 ret = bpf_map_charge_init(&smap->map.memory, cost); in bpf_local_storage_map_alloc()
|
D | devmap.c | 112 u64 cost = 0; in dev_map_init_map() local 138 cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets; in dev_map_init_map() 140 cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); in dev_map_init_map() 144 err = bpf_map_charge_init(&dtab->map.memory, cost); in dev_map_init_map()
|
D | lpm_trie.c | 546 u64 cost = sizeof(*trie), cost_per_node; in trie_alloc() local 575 cost += (u64) attr->max_entries * cost_per_node; in trie_alloc() 577 ret = bpf_map_charge_init(&trie->map.memory, cost); in trie_alloc()
|
D | cpumap.c | 87 u64 cost; in cpu_map_alloc() local 113 cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *); in cpu_map_alloc() 116 ret = bpf_map_charge_init(&cmap->map.memory, cost); in cpu_map_alloc()
|
D | stackmap.c | 95 u64 cost, n_buckets; in stack_map_alloc() local 123 cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); in stack_map_alloc() 124 err = bpf_map_charge_init(&mem, cost + attr->max_entries * in stack_map_alloc() 129 smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr)); in stack_map_alloc()
|
D | arraymap.c | 87 u64 cost, array_size, mask64; in array_map_alloc() local 130 cost = array_size; in array_map_alloc() 132 cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); in array_map_alloc() 134 ret = bpf_map_charge_init(&mem, cost); in array_map_alloc()
|
D | hashtab.c | 425 u64 cost; in htab_map_alloc() local 462 cost = (u64) htab->n_buckets * sizeof(struct bucket) + in htab_map_alloc() 466 cost += (u64) round_up(htab->map.value_size, 8) * in htab_map_alloc() 469 cost += (u64) htab->elem_size * num_possible_cpus(); in htab_map_alloc() 472 err = bpf_map_charge_init(&htab->map.memory, cost); in htab_map_alloc()
|
/kernel/ |
D | Kconfig.preempt | 28 at the cost of slightly lower throughput. 51 system is under load, at the cost of slightly lower throughput
|