/kernel/linux/linux-5.10/arch/mips/ar7/ |
D | prom.c | 149 struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data; in parse_psp_env() local 151 memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE); in parse_psp_env() 156 if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n)) in parse_psp_env() 158 value = chunks[i].data; in parse_psp_env() 159 if (chunks[i].num) { in parse_psp_env() 160 name = lookup_psp_var_map(chunks[i].num); in parse_psp_env() 167 i += chunks[i].len; in parse_psp_env()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/radeon/ |
D | radeon_cs.c | 296 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); in radeon_cs_parser_init() 303 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); in radeon_cs_parser_init() 304 if (p->chunks == NULL) { in radeon_cs_parser_init() 317 p->chunks[i].length_dw = user_chunk.length_dw; in radeon_cs_parser_init() 319 p->chunk_relocs = &p->chunks[i]; in radeon_cs_parser_init() 322 p->chunk_ib = &p->chunks[i]; in radeon_cs_parser_init() 324 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init() 328 p->chunk_const_ib = &p->chunks[i]; in radeon_cs_parser_init() 330 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init() 334 p->chunk_flags = &p->chunks[i]; in radeon_cs_parser_init() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/netronome/nfp/nfpcore/ |
D | nfp_nsp.c | 504 } *chunks; in nfp_nsp_command_buf_dma_sg() local 516 chunks = kzalloc(array_size(sizeof(*chunks), nseg), GFP_KERNEL); in nfp_nsp_command_buf_dma_sg() 517 if (!chunks) in nfp_nsp_command_buf_dma_sg() 525 chunks[i].chunk = kmalloc(chunk_size, in nfp_nsp_command_buf_dma_sg() 527 if (!chunks[i].chunk) in nfp_nsp_command_buf_dma_sg() 530 chunks[i].len = min_t(u64, chunk_size, max_size - off); in nfp_nsp_command_buf_dma_sg() 535 memcpy(chunks[i].chunk, arg->in_buf + off, coff); in nfp_nsp_command_buf_dma_sg() 537 memset(chunks[i].chunk + coff, 0, chunk_size - coff); in nfp_nsp_command_buf_dma_sg() 539 off += chunks[i].len; in nfp_nsp_command_buf_dma_sg() 547 addr = dma_map_single(dev, chunks[i].chunk, chunks[i].len, in nfp_nsp_command_buf_dma_sg() [all …]
|
/kernel/linux/linux-5.10/drivers/staging/comedi/drivers/ni_routing/tools/ |
D | convert_csv_to_c.py | 232 chunks = [ self.output_file_top, 248 chunks.append('\t&{},'.format(dev_table_name)) 277 chunks.append('\tNULL,') # terminate list 278 chunks.append('};') 279 return '\n'.join(chunks) 423 chunks = [ self.output_file_top, 439 chunks.append('\t&{},'.format(fam_table_name)) 469 chunks.append('\tNULL,') # terminate list 470 chunks.append('};') 471 return '\n'.join(chunks)
|
/kernel/linux/linux-5.10/arch/x86/kernel/cpu/resctrl/ |
D | monitor.c | 219 u64 shift = 64 - width, chunks; in mbm_overflow_count() local 221 chunks = (cur_msr << shift) - (prev_msr << shift); in mbm_overflow_count() 222 return chunks >>= shift; in mbm_overflow_count() 228 u64 chunks, tval; in __mon_event_count() local 258 chunks = mbm_overflow_count(m->prev_msr, tval, rr->r->mbm_width); in __mon_event_count() 259 m->chunks += chunks; in __mon_event_count() 262 rr->val += m->chunks; in __mon_event_count() 274 u64 tval, cur_bw, chunks; in mbm_bw_count() local 280 chunks = mbm_overflow_count(m->prev_bw_msr, tval, rr->r->mbm_width); in mbm_bw_count() 281 cur_bw = (chunks * r->mon_scale) >> 20; in mbm_bw_count()
|
/kernel/linux/linux-5.10/net/xdp/ |
D | xdp_umem.c | 160 u64 chunks, npgs; in xdp_umem_reg() local 195 chunks = div_u64_rem(size, chunk_size, &chunks_rem); in xdp_umem_reg() 196 if (!chunks || chunks > U32_MAX) in xdp_umem_reg() 208 umem->chunks = chunks; in xdp_umem_reg()
|
/kernel/linux/linux-5.10/scripts/gdb/linux/ |
D | timerlist.py | 164 chunks = [] 170 chunks.append(buf[start:end]) 172 chunks.append(',') 176 chunks[0] = chunks[0][0] # Cut off the first 0 178 return "".join(chunks)
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/usnic/ |
D | usnic_vnic.c | 45 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member 118 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { in usnic_vnic_dump() 119 chunk = &vnic->chunks[i]; in usnic_vnic_dump() 223 return vnic->chunks[type].cnt; in usnic_vnic_res_cnt() 229 return vnic->chunks[type].free_cnt; in usnic_vnic_res_free_cnt() 255 src = &vnic->chunks[type]; in usnic_vnic_get_resources() 287 vnic->chunks[res->type].free_cnt++; in usnic_vnic_put_resources() 383 &vnic->chunks[res_type]); in usnic_vnic_discover_resources() 392 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_discover_resources() 428 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_release_resources()
|
/kernel/linux/linux-5.10/lib/ |
D | genalloc.c | 159 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 202 list_add_rcu(&chunk->next_chunk, &pool->chunks); in gen_pool_add_owner() 222 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_virt_to_phys() 248 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { in gen_pool_destroy() 296 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_alloc_algo_owner() 502 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_free_owner() 537 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) in gen_pool_for_each_chunk() 560 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { in gen_pool_has_addr() 585 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_avail() 604 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_size()
|
/kernel/linux/linux-5.10/tools/testing/selftests/drivers/net/mlxsw/spectrum/ |
D | devlink_lib_spectrum.sh | 92 devlink_resource_size_set 32000 kvd linear chunks 101 devlink_resource_size_set 32000 kvd linear chunks 110 devlink_resource_size_set 49152 kvd linear chunks
|
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_cs.c | 140 chunk_array_user = u64_to_user_ptr(cs->in.chunks); in amdgpu_cs_parser_init() 148 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), in amdgpu_cs_parser_init() 150 if (!p->chunks) { in amdgpu_cs_parser_init() 167 p->chunks[i].chunk_id = user_chunk.chunk_id; in amdgpu_cs_parser_init() 168 p->chunks[i].length_dw = user_chunk.length_dw; in amdgpu_cs_parser_init() 170 size = p->chunks[i].length_dw; in amdgpu_cs_parser_init() 173 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); in amdgpu_cs_parser_init() 174 if (p->chunks[i].kdata == NULL) { in amdgpu_cs_parser_init() 180 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in amdgpu_cs_parser_init() 185 switch (p->chunks[i].chunk_id) { in amdgpu_cs_parser_init() [all …]
|
/kernel/linux/linux-5.10/net/sctp/ |
D | chunk.c | 43 INIT_LIST_HEAD(&msg->chunks); in sctp_datamsg_init() 65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free() 81 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_destroy() 280 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user() 289 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_from_user()
|
D | auth.c | 186 struct sctp_chunks_param *chunks, in sctp_auth_make_key_vector() argument 197 if (chunks) in sctp_auth_make_key_vector() 198 chunks_len = ntohs(chunks->param_hdr.length); in sctp_auth_make_key_vector() 209 if (chunks) { in sctp_auth_make_key_vector() 210 memcpy(new->data + offset, chunks, chunks_len); in sctp_auth_make_key_vector() 656 switch (param->chunks[i]) { in __sctp_auth_cid() 664 if (param->chunks[i] == chunk) in __sctp_auth_cid() 772 p->chunks[nchunks] = chunk_id; in sctp_auth_ep_add_chunkid()
|
/kernel/linux/linux-5.10/kernel/ |
D | audit_tree.c | 17 struct list_head chunks; member 101 INIT_LIST_HEAD(&tree->chunks); in alloc_tree() 435 list_add(&chunk->owners[0].list, &tree->chunks); in create_chunk() 507 list_add(&p->list, &tree->chunks); in tag_chunk() 572 while (!list_empty(&victim->chunks)) { in prune_tree_chunks() 577 p = list_first_entry(&victim->chunks, struct node, list); in prune_tree_chunks() 618 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked() 623 list_add(p, &tree->chunks); in trim_marked() 706 list_for_each_entry(node, &tree->chunks, list) { in audit_trim_trees() 845 list_for_each_entry(node, &tree->chunks, list) in audit_add_tree_rule() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/efa/ |
D | efa_verbs.c | 92 struct pbl_chunk *chunks; member 1206 chunk_list->chunks = kcalloc(chunk_list_size, in pbl_chunk_list_create() 1207 sizeof(*chunk_list->chunks), in pbl_chunk_list_create() 1209 if (!chunk_list->chunks) in pbl_chunk_list_create() 1218 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL); in pbl_chunk_list_create() 1219 if (!chunk_list->chunks[i].buf) in pbl_chunk_list_create() 1222 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE; in pbl_chunk_list_create() 1224 chunk_list->chunks[chunk_list_size - 1].length = in pbl_chunk_list_create() 1231 cur_chunk_buf = chunk_list->chunks[0].buf; in pbl_chunk_list_create() 1239 cur_chunk_buf = chunk_list->chunks[chunk_idx].buf; in pbl_chunk_list_create() [all …]
|
/kernel/linux/linux-5.10/mm/ |
D | zbud.c | 358 int chunks, i, freechunks; in zbud_alloc() local 367 chunks = size_to_chunks(size); in zbud_alloc() 371 for_each_unbuddied_list(i, chunks) { in zbud_alloc() 396 zhdr->first_chunks = chunks; in zbud_alloc() 398 zhdr->last_chunks = chunks; in zbud_alloc()
|
D | z3fold.c | 637 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks) in get_free_buddy() argument 643 chunks <= zhdr->start_middle - ZHDR_CHUNKS) in get_free_buddy() 710 short chunks = size_to_chunks(sz); in compact_single_buddy() local 720 new_bud = get_free_buddy(new_zhdr, chunks); in compact_single_buddy() 724 new_zhdr->first_chunks = chunks; in compact_single_buddy() 728 new_zhdr->middle_chunks = chunks; in compact_single_buddy() 734 new_zhdr->last_chunks = chunks; in compact_single_buddy() 880 int chunks = size_to_chunks(size), i; in __z3fold_alloc() local 885 for_each_unbuddied_list(i, chunks) { in __z3fold_alloc() 942 l = &unbuddied[chunks]; in __z3fold_alloc() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/ulp/rtrs/ |
D | README | 28 session. A session is associated with a set of memory chunks reserved on the 36 chunks reserved for him on the server side. Their number, size and addresses 45 which of the memory chunks has been accessed and at which offset the message 80 the server (number of memory chunks which are going to be allocated for that 122 1. When processing a write request client selects one of the memory chunks 139 1. When processing a write request client selects one of the memory chunks 144 using the IMM field, Server invalidate rkey associated to the memory chunks 162 1. When processing a read request client selects one of the memory chunks 181 1. When processing a read request client selects one of the memory chunks 186 Server invalidate rkey associated to the memory chunks first, when it finishes,
|
D | rtrs-srv.c | 632 int nr, chunks; in map_cont_bufs() local 634 chunks = chunks_per_mr * mri; in map_cont_bufs() 637 srv->queue_depth - chunks); in map_cont_bufs() 644 sg_set_page(s, srv->chunks[chunks + i], in map_cont_bufs() 679 sess->dma_addr[chunks + i] = sg_dma_address(s); in map_cont_bufs() 1013 data = page_address(srv->chunks[buf_id]); in process_read() 1066 data = page_address(srv->chunks[buf_id]); in process_write() 1140 data = page_address(srv->chunks[msg_id]) + off; in rtrs_srv_inv_rkey_done() 1243 data = page_address(srv->chunks[msg_id]) + off; in rtrs_srv_rdma_done() 1340 mempool_free(srv->chunks[i], chunk_pool); in free_srv() [all …]
|
/kernel/linux/linux-5.10/drivers/md/ |
D | md-bitmap.c | 778 unsigned long chunks, int with_super, in md_bitmap_storage_alloc() argument 785 bytes = DIV_ROUND_UP(chunks, 8); in md_bitmap_storage_alloc() 1058 unsigned long i, chunks, index, oldindex, bit, node_offset = 0; in md_bitmap_init_from_disk() local 1068 chunks = bitmap->counts.chunks; in md_bitmap_init_from_disk() 1075 for (i = 0; i < chunks ; i++) { in md_bitmap_init_from_disk() 1106 for (i = 0; i < chunks; i++) { in md_bitmap_init_from_disk() 1170 bit_cnt, chunks); in md_bitmap_init_from_disk() 1290 for (j = 0; j < counts->chunks; j++) { in md_bitmap_daemon_work() 1997 for (j = 0; j < counts->chunks; j++) { in md_bitmap_copy_from_slot() 2070 unsigned long chunks; in md_bitmap_resize() local [all …]
|
/kernel/linux/linux-5.10/drivers/net/wireless/ti/wlcore/ |
D | boot.c | 240 u32 chunks, addr, len; in wlcore_boot_upload_firmware() local 245 chunks = be32_to_cpup((__be32 *) fw); in wlcore_boot_upload_firmware() 248 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); in wlcore_boot_upload_firmware() 250 while (chunks--) { in wlcore_boot_upload_firmware() 261 chunks, addr, len); in wlcore_boot_upload_firmware()
|
/kernel/linux/linux-5.10/Documentation/admin-guide/device-mapper/ |
D | striped.rst | 6 device across one or more underlying devices. Data is written in "chunks", 7 with consecutive chunks rotating among the underlying devices. This can
|
/kernel/linux/linux-5.10/drivers/dma/sh/ |
D | rcar-dmac.c | 79 struct list_head chunks; member 107 struct rcar_dmac_xfer_chunk chunks[0]; member 115 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \ 358 list_first_entry(&desc->chunks, in rcar_dmac_chan_start_xfer() 482 desc->running = list_first_entry(&desc->chunks, in rcar_dmac_tx_submit() 515 INIT_LIST_HEAD(&desc->chunks); in rcar_dmac_desc_alloc() 546 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); in rcar_dmac_desc_put() 642 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; in rcar_dmac_xfer_chunk_alloc() 740 list_for_each_entry(chunk, &desc->chunks, node) { in rcar_dmac_fill_hwdesc() 998 list_add_tail(&chunk->node, &desc->chunks); in rcar_dmac_chan_prep_sg() [all …]
|
D | shdma-base.c | 97 if (chunk->chunks == 1) { in shdma_tx_submit() 356 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { in __ld_cleanup() 372 BUG_ON(desc->chunks != 1); in __ld_cleanup() 567 int chunks = 0; in shdma_prep_sg() local 572 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); in shdma_prep_sg() 612 new->chunks = 1; in shdma_prep_sg() 614 new->chunks = chunks--; in shdma_prep_sg()
|
/kernel/linux/linux-5.10/drivers/virt/vboxguest/ |
D | vboxguest_core.c | 356 u32 i, chunks; in vbg_balloon_work() local 384 chunks = req->balloon_chunks; in vbg_balloon_work() 385 if (chunks > gdev->mem_balloon.max_chunks) { in vbg_balloon_work() 387 __func__, chunks, gdev->mem_balloon.max_chunks); in vbg_balloon_work() 391 if (chunks > gdev->mem_balloon.chunks) { in vbg_balloon_work() 393 for (i = gdev->mem_balloon.chunks; i < chunks; i++) { in vbg_balloon_work() 398 gdev->mem_balloon.chunks++; in vbg_balloon_work() 402 for (i = gdev->mem_balloon.chunks; i-- > chunks;) { in vbg_balloon_work() 407 gdev->mem_balloon.chunks--; in vbg_balloon_work() 1641 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks; in vbg_ioctl_check_balloon()
|