Lines Matching refs:size
188 u32 size; in bpf_percpu_cgroup_storage_copy() local
201 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_copy()
204 per_cpu_ptr(storage->percpu_buf, cpu), size); in bpf_percpu_cgroup_storage_copy()
205 off += size; in bpf_percpu_cgroup_storage_copy()
217 u32 size; in bpf_percpu_cgroup_storage_update() local
235 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_update()
238 value + off, size); in bpf_percpu_cgroup_storage_update()
239 off += size; in bpf_percpu_cgroup_storage_update()
364 u32 offset, size; in cgroup_storage_check_btf() local
385 size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id); in cgroup_storage_check_btf()
386 if (!btf_member_is_reg_int(btf, key_type, m, 0, size)) in cgroup_storage_check_btf()
394 size = sizeof_field(struct bpf_cgroup_storage_key, attach_type); in cgroup_storage_check_btf()
395 if (!btf_member_is_reg_int(btf, key_type, m, offset, size)) in cgroup_storage_check_btf()
478 size_t size; in bpf_cgroup_storage_calculate_size() local
481 size = sizeof(struct bpf_storage_buffer) + map->value_size; in bpf_cgroup_storage_calculate_size()
482 *pages = round_up(sizeof(struct bpf_cgroup_storage) + size, in bpf_cgroup_storage_calculate_size()
485 size = map->value_size; in bpf_cgroup_storage_calculate_size()
486 *pages = round_up(round_up(size, 8) * num_possible_cpus(), in bpf_cgroup_storage_calculate_size()
490 return size; in bpf_cgroup_storage_calculate_size()
499 size_t size; in bpf_cgroup_storage_alloc() local
506 size = bpf_cgroup_storage_calculate_size(map, &pages); in bpf_cgroup_storage_alloc()
514 storage->buf = bpf_map_kmalloc_node(map, size, gfp, in bpf_cgroup_storage_alloc()
520 storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp); in bpf_cgroup_storage_alloc()