/kernel/irq/ |
D | generic-chip.c | 247 gc = kzalloc(struct_size(gc, chip_types, num_ct), GFP_KERNEL); in irq_alloc_generic_chip() 308 gc_sz = struct_size(gc, chip_types, num_ct); in __irq_alloc_domain_generic_chips() 309 dgc_sz = struct_size(dgc, gc, numchips); in __irq_alloc_domain_generic_chips()
|
D | devres.c | 224 gc = devm_kzalloc(dev, struct_size(gc, chip_types, num_ct), GFP_KERNEL); in devm_irq_alloc_generic_chip()
|
D | irqdomain.c | 146 domain = kzalloc_node(struct_size(domain, revmap, size), in __irq_domain_create()
|
/kernel/module/ |
D | sysfs.c | 83 size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded), in add_sect_attrs() 192 notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes), in add_notes_attrs()
|
/kernel/bpf/ |
D | btf.c | 2044 u32 struct_size = struct_type->size; in btf_int_check_member() local 2065 if (struct_size < bytes_offset || in btf_int_check_member() 2066 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { in btf_int_check_member() 2082 u32 struct_size = struct_type->size; in btf_int_check_kflag_member() local 2121 if (struct_size < bytes_offset || in btf_int_check_kflag_member() 2122 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { in btf_int_check_kflag_member() 2451 u32 struct_size, struct_bits_off, bytes_offset; in btf_ptr_check_member() local 2453 struct_size = struct_type->size; in btf_ptr_check_member() 2463 if (struct_size - bytes_offset < sizeof(void *)) { in btf_ptr_check_member() 2776 u32 struct_size, bytes_offset; in btf_array_check_member() local [all …]
|
D | reuseport_array.c | 158 array = bpf_map_area_alloc(struct_size(array, ptrs, attr->max_entries), numa_node); in reuseport_array_alloc()
|
D | local_storage.c | 167 new = bpf_map_kmalloc_node(map, struct_size(new, data, map->value_size), in cgroup_storage_update_elem()
|
D | core.c | 858 pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)), in alloc_new_pack()
|
/kernel/ |
D | groups.c | 18 gi = kvmalloc(struct_size(gi, gid, gidsetsize), GFP_KERNEL_ACCOUNT); in groups_alloc()
|
D | audit_tree.c | 97 tree = kmalloc(struct_size(tree, pathname, strlen(s) + 1), GFP_KERNEL); in alloc_tree() 193 chunk = kzalloc(struct_size(chunk, owners, count), GFP_KERNEL); in alloc_chunk()
|
D | auditfilter.c | 640 data = kmalloc(struct_size(data, buf, krule->buflen), GFP_KERNEL); in audit_krule_to_data() 1095 struct_size(data, buf, data->buflen)); in audit_list_rules()
|
D | tracepoint.c | 109 struct tp_probes *p = kmalloc(struct_size(p, probes, count), in allocate_probes()
|
D | watch_queue.c | 354 wfilter = kzalloc(struct_size(wfilter, filters, nr_filter), GFP_KERNEL); in watch_queue_set_filter()
|
D | audit.c | 1482 sig_data = kmalloc(struct_size(sig_data, ctx, len), GFP_KERNEL); in audit_receive_msg() 1495 sig_data, struct_size(sig_data, ctx, len)); in audit_receive_msg()
|
D | kcov.c | 723 remote_arg_size = struct_size(remote_arg, handles, in kcov_ioctl()
|
D | workqueue.c | 3974 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL); in apply_wqattrs_prepare()
|
/kernel/gcov/ |
D | fs.c | 119 iter = kvmalloc(struct_size(iter, buffer, size), GFP_KERNEL); in gcov_iter_new()
|
/kernel/trace/ |
D | trace_eprobe.c | 206 ep = kzalloc(struct_size(ep, tp.args, nargs), GFP_KERNEL); in alloc_event_probe()
|
D | trace_events_user.c | 1529 size = struct_size(refs, events, count + 1); in user_events_ref_add()
|
D | trace_uprobe.c | 344 tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL); in alloc_trace_uprobe()
|
D | trace_kprobe.c | 267 tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL); in alloc_trace_kprobe()
|
D | trace_events_synth.c | 500 data_offset = struct_size(entry, fields, event->n_u64); in trace_stack()
|
D | trace.c | 2810 int max_len = PAGE_SIZE - struct_size(entry, array, 1); in trace_event_buffer_lock_reserve()
|
/kernel/cgroup/ |
D | cgroup.c | 5617 cgrp = kzalloc(struct_size(cgrp, ancestors, (level + 1)), GFP_KERNEL); in cgroup_create()
|