| /kernel/linux/linux-6.6/mm/ |
| D | zbud.c | 31 * zbud pages are divided into "chunks". The size of the chunks is fixed at 33 * into chunks allows organizing unbuddied zbud pages into a manageable number 34 * of unbuddied lists according to the number of free chunks available in the 63 * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk 65 * 63 which shows the max number of free chunks in zbud page, also there will be 108 * @first_chunks: the size of the first buddy in chunks, 0 if free 109 * @last_chunks: the size of the last buddy in chunks, 0 if free 126 /* Converts an allocation size in bytes to size in zbud chunks */ 180 /* Returns the number of free chunks in a zbud page */ 249 int chunks, i, freechunks; in zbud_alloc() local [all …]
|
| D | z3fold.c | 18 * As in zbud, pages are divided into "chunks". The size of the chunks is 48 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks 51 * which shows the max number of free chunks in z3fold page, also there will 92 * struct z3fold_header - z3fold page metadata occupying first chunks of each 102 * @first_chunks: the size of the first buddy in chunks, 0 if free 103 * @middle_chunks: the size of the middle buddy in chunks, 0 if free 104 * @last_chunks: the size of the last buddy in chunks, 0 if free 185 /* Converts an allocation size in bytes to size in z3fold chunks */ 514 * Returns the number of free chunks in a z3fold page. 523 * of chunks occupied by the first and the last objects. in num_free_chunks() [all …]
|
| /kernel/linux/linux-5.10/net/sctp/ |
| D | chunk.c | 43 INIT_LIST_HEAD(&msg->chunks); in sctp_datamsg_init() 65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free() 81 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_destroy() 140 * down any such message into smaller chunks. Opportunistically, fragment 141 * the chunks down to the current MTU constraints. We may get refragmented 182 /* If the peer requested that we authenticate DATA chunks in sctp_datamsg_from_user() 183 * we need to account for bundling of the AUTH chunks along with in sctp_datamsg_from_user() 235 /* Create chunks for all DATA chunks. */ in sctp_datamsg_from_user() 280 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user() 289 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_from_user()
|
| D | outqueue.c | 11 * bundling and queueing of outgoing SCTP chunks. 204 /* Free the outqueue structure and any related pending chunks. 212 /* Throw away unacknowledged chunks. */ in __sctp_outq_teardown() 224 /* Throw away chunks that have been gap ACKed. */ in __sctp_outq_teardown() 233 /* Throw away any chunks in the retransmit queue. */ in __sctp_outq_teardown() 242 /* Throw away any chunks that are in the abandoned queue. */ in __sctp_outq_teardown() 251 /* Throw away any leftover data chunks. */ in __sctp_outq_teardown() 260 /* Throw away any leftover control chunks. */ in __sctp_outq_teardown() 273 /* Free the outqueue structure and any related pending chunks. */ 276 /* Throw away leftover chunks. */ in sctp_outq_free() [all …]
|
| /kernel/linux/linux-6.6/net/sctp/ |
| D | chunk.c | 43 INIT_LIST_HEAD(&msg->chunks); in sctp_datamsg_init() 65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free() 81 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_destroy() 140 * down any such message into smaller chunks. Opportunistically, fragment 141 * the chunks down to the current MTU constraints. We may get refragmented 182 /* If the peer requested that we authenticate DATA chunks in sctp_datamsg_from_user() 183 * we need to account for bundling of the AUTH chunks along with in sctp_datamsg_from_user() 235 /* Create chunks for all DATA chunks. */ in sctp_datamsg_from_user() 280 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user() 289 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_from_user()
|
| D | outqueue.c | 11 * bundling and queueing of outgoing SCTP chunks. 204 /* Free the outqueue structure and any related pending chunks. 212 /* Throw away unacknowledged chunks. */ in __sctp_outq_teardown() 224 /* Throw away chunks that have been gap ACKed. */ in __sctp_outq_teardown() 233 /* Throw away any chunks in the retransmit queue. */ in __sctp_outq_teardown() 242 /* Throw away any chunks that are in the abandoned queue. */ in __sctp_outq_teardown() 251 /* Throw away any leftover data chunks. */ in __sctp_outq_teardown() 260 /* Throw away any leftover control chunks. */ in __sctp_outq_teardown() 273 /* Free the outqueue structure and any related pending chunks. */ 276 /* Throw away leftover chunks. */ in sctp_outq_free() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/radeon/ |
| D | radeon_cs.c | 284 /* get chunks */ in radeon_cs_parser_init() 296 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); in radeon_cs_parser_init() 303 p->chunks = kvcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); in radeon_cs_parser_init() 304 if (p->chunks == NULL) { in radeon_cs_parser_init() 317 p->chunks[i].length_dw = user_chunk.length_dw; in radeon_cs_parser_init() 319 p->chunk_relocs = &p->chunks[i]; in radeon_cs_parser_init() 322 p->chunk_ib = &p->chunks[i]; in radeon_cs_parser_init() 324 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init() 328 p->chunk_const_ib = &p->chunks[i]; in radeon_cs_parser_init() 330 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/radeon/ |
| D | radeon_cs.c | 285 /* get chunks */ in radeon_cs_parser_init() 297 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); in radeon_cs_parser_init() 304 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); in radeon_cs_parser_init() 305 if (p->chunks == NULL) { in radeon_cs_parser_init() 318 p->chunks[i].length_dw = user_chunk.length_dw; in radeon_cs_parser_init() 320 p->chunk_relocs = &p->chunks[i]; in radeon_cs_parser_init() 323 p->chunk_ib = &p->chunks[i]; in radeon_cs_parser_init() 325 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init() 329 p->chunk_const_ib = &p->chunks[i]; in radeon_cs_parser_init() 331 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init() [all …]
|
| /kernel/linux/linux-6.6/arch/mips/ar7/ |
| D | prom.c | 102 Name=Value pair in 2 chunks (len is the number of chunks) 149 struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data; in parse_psp_env() local 151 memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE); in parse_psp_env() 156 if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n)) in parse_psp_env() 158 value = chunks[i].data; in parse_psp_env() 159 if (chunks[i].num) { in parse_psp_env() 160 name = lookup_psp_var_map(chunks[i].num); in parse_psp_env() 167 i += chunks[i].len; in parse_psp_env()
|
| /kernel/linux/linux-5.10/arch/mips/ar7/ |
| D | prom.c | 102 Name=Value pair in 2 chunks (len is the number of chunks) 149 struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data; in parse_psp_env() local 151 memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE); in parse_psp_env() 156 if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n)) in parse_psp_env() 158 value = chunks[i].data; in parse_psp_env() 159 if (chunks[i].num) { in parse_psp_env() 160 name = lookup_psp_var_map(chunks[i].num); in parse_psp_env() 167 i += chunks[i].len; in parse_psp_env()
|
| /kernel/linux/linux-6.6/drivers/net/ethernet/netronome/nfp/nfpcore/ |
| D | nfp_nsp.c | 505 } *chunks; in nfp_nsp_command_buf_dma_sg() local 517 chunks = kcalloc(nseg, sizeof(*chunks), GFP_KERNEL); in nfp_nsp_command_buf_dma_sg() 518 if (!chunks) in nfp_nsp_command_buf_dma_sg() 526 chunks[i].chunk = kmalloc(chunk_size, in nfp_nsp_command_buf_dma_sg() 528 if (!chunks[i].chunk) in nfp_nsp_command_buf_dma_sg() 531 chunks[i].len = min_t(u64, chunk_size, max_size - off); in nfp_nsp_command_buf_dma_sg() 536 memcpy(chunks[i].chunk, arg->in_buf + off, coff); in nfp_nsp_command_buf_dma_sg() 538 memset(chunks[i].chunk + coff, 0, chunk_size - coff); in nfp_nsp_command_buf_dma_sg() 540 off += chunks[i].len; in nfp_nsp_command_buf_dma_sg() 548 addr = dma_map_single(dev, chunks[i].chunk, chunks[i].len, in nfp_nsp_command_buf_dma_sg() [all …]
|
| /kernel/linux/linux-5.10/drivers/net/ethernet/netronome/nfp/nfpcore/ |
| D | nfp_nsp.c | 504 } *chunks; in nfp_nsp_command_buf_dma_sg() local 516 chunks = kzalloc(array_size(sizeof(*chunks), nseg), GFP_KERNEL); in nfp_nsp_command_buf_dma_sg() 517 if (!chunks) in nfp_nsp_command_buf_dma_sg() 525 chunks[i].chunk = kmalloc(chunk_size, in nfp_nsp_command_buf_dma_sg() 527 if (!chunks[i].chunk) in nfp_nsp_command_buf_dma_sg() 530 chunks[i].len = min_t(u64, chunk_size, max_size - off); in nfp_nsp_command_buf_dma_sg() 535 memcpy(chunks[i].chunk, arg->in_buf + off, coff); in nfp_nsp_command_buf_dma_sg() 537 memset(chunks[i].chunk + coff, 0, chunk_size - coff); in nfp_nsp_command_buf_dma_sg() 539 off += chunks[i].len; in nfp_nsp_command_buf_dma_sg() 547 addr = dma_map_single(dev, chunks[i].chunk, chunks[i].len, in nfp_nsp_command_buf_dma_sg() [all …]
|
| /kernel/linux/linux-5.10/drivers/infiniband/ulp/rtrs/ |
| D | README | 28 session. A session is associated with a set of memory chunks reserved on the 36 chunks reserved for him on the server side. Their number, size and addresses 45 which of the memory chunks has been accessed and at which offset the message 80 the server (number of memory chunks which are going to be allocated for that 122 1. When processing a write request client selects one of the memory chunks 139 1. When processing a write request client selects one of the memory chunks 144 using the IMM field, Server invalidate rkey associated to the memory chunks 162 1. When processing a read request client selects one of the memory chunks 181 1. When processing a read request client selects one of the memory chunks 186 Server invalidate rkey associated to the memory chunks first, when it finishes,
|
| /kernel/linux/linux-6.6/drivers/infiniband/ulp/rtrs/ |
| D | README | 28 session. A session is associated with a set of memory chunks reserved on the 36 chunks reserved for him on the server side. Their number, size and addresses 45 which of the memory chunks has been accessed and at which offset the message 80 the server (number of memory chunks which are going to be allocated for that 122 1. When processing a write request client selects one of the memory chunks 139 1. When processing a write request client selects one of the memory chunks 144 using the IMM field, Server invalidate rkey associated to the memory chunks 162 1. When processing a read request client selects one of the memory chunks 181 1. When processing a read request client selects one of the memory chunks 186 Server invalidate rkey associated to the memory chunks first, when it finishes,
|
| /kernel/linux/linux-5.10/mm/ |
| D | zbud.c | 31 * zbud pages are divided into "chunks". The size of the chunks is fixed at 33 * into chunks allows organizing unbuddied zbud pages into a manageable number 34 * of unbuddied lists according to the number of free chunks available in the 64 * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk 66 * 63 which shows the max number of free chunks in zbud page, also there will be 112 * @first_chunks: the size of the first buddy in chunks, 0 if free 113 * @last_chunks: the size of the last buddy in chunks, 0 if free 230 /* Converts an allocation size in bytes to size in zbud chunks */ 286 /* Returns the number of free chunks in a zbud page */ 358 int chunks, i, freechunks; in zbud_alloc() local [all …]
|
| /kernel/linux/linux-5.10/tools/testing/selftests/drivers/net/mlxsw/spectrum/ |
| D | devlink_lib_spectrum.sh | 15 KVDL_CHILDREN="singles chunks large_chunks" 92 devlink_resource_size_set 32000 kvd linear chunks 101 devlink_resource_size_set 32000 kvd linear chunks 110 devlink_resource_size_set 49152 kvd linear chunks
|
| /kernel/linux/linux-6.6/tools/testing/selftests/drivers/net/mlxsw/spectrum/ |
| D | devlink_lib_spectrum.sh | 13 KVDL_CHILDREN="singles chunks large_chunks" 90 devlink_resource_size_set 32000 kvd linear chunks 99 devlink_resource_size_set 32000 kvd linear chunks 108 devlink_resource_size_set 49152 kvd linear chunks
|
| /kernel/linux/linux-6.6/include/net/sctp/ |
| D | structs.h | 372 struct sctp_chunks_param *chunks; member 521 /* Chunks waiting to be submitted to lower layer. */ 522 struct list_head chunks; member 565 /* This field is used by chunks that hold fragmented data. 642 * spec violates the principle premis that all chunks are processed 703 /* This structure holds lists of chunks as we are assembling for 712 /* This contains the payload chunks. */ 717 /* This is the total size of all chunks INCLUDING padding. */ 783 * chunks sent to this address is currently being 956 /* This is the list of transports that have chunks to send. */ [all …]
|
| /kernel/linux/linux-5.10/include/net/sctp/ |
| D | structs.h | 368 struct sctp_chunks_param *chunks; member 515 /* Chunks waiting to be submitted to lower layer. */ 516 struct list_head chunks; member 559 /* This field is used by chunks that hold fragmented data. 636 * spec violates the principle premis that all chunks are processed 696 /* This structure holds lists of chunks as we are assembling for 705 /* This contains the payload chunks. */ 710 /* This is the total size of all chunks INCLUDING padding. */ 775 * chunks sent to this address is currently being 942 /* This is the list of transports that have chunks to send. */ [all …]
|
| /kernel/linux/linux-5.10/fs/ocfs2/ |
| D | quota.h | 40 struct list_head rc_list; /* List of chunks */ 46 struct list_head r_list[OCFS2_MAXQUOTAS]; /* List of chunks to recover */ 53 unsigned int dqi_chunks; /* Number of chunks in local quota file */ 56 struct list_head dqi_chunk; /* List of chunks */ 78 struct list_head qc_chunk; /* List of quotafile chunks */
|
| /kernel/linux/linux-6.6/fs/ocfs2/ |
| D | quota.h | 40 struct list_head rc_list; /* List of chunks */ 46 struct list_head r_list[OCFS2_MAXQUOTAS]; /* List of chunks to recover */ 53 unsigned int dqi_chunks; /* Number of chunks in local quota file */ 56 struct list_head dqi_chunk; /* List of chunks */ 78 struct list_head qc_chunk; /* List of quotafile chunks */
|
| /kernel/linux/linux-5.10/net/xdp/ |
| D | xdp_umem.c | 160 u64 chunks, npgs; in xdp_umem_reg() local 195 chunks = div_u64_rem(size, chunk_size, &chunks_rem); in xdp_umem_reg() 196 if (!chunks || chunks > U32_MAX) in xdp_umem_reg() 208 umem->chunks = chunks; in xdp_umem_reg()
|
| /kernel/linux/linux-6.6/net/xdp/ |
| D | xdp_umem.c | 157 u64 chunks, npgs; in xdp_umem_reg() local 192 chunks = div_u64_rem(size, chunk_size, &chunks_rem); in xdp_umem_reg() 193 if (!chunks || chunks > U32_MAX) in xdp_umem_reg() 205 umem->chunks = chunks; in xdp_umem_reg()
|
| /kernel/linux/linux-5.10/drivers/infiniband/hw/usnic/ |
| D | usnic_vnic.c | 45 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member 118 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { in usnic_vnic_dump() 119 chunk = &vnic->chunks[i]; in usnic_vnic_dump() 223 return vnic->chunks[type].cnt; in usnic_vnic_res_cnt() 229 return vnic->chunks[type].free_cnt; in usnic_vnic_res_free_cnt() 255 src = &vnic->chunks[type]; in usnic_vnic_get_resources() 287 vnic->chunks[res->type].free_cnt++; in usnic_vnic_put_resources() 383 &vnic->chunks[res_type]); in usnic_vnic_discover_resources() 392 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_discover_resources() 428 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_release_resources()
|
| /kernel/linux/linux-6.6/drivers/infiniband/hw/usnic/ |
| D | usnic_vnic.c | 44 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member 117 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { in usnic_vnic_dump() 118 chunk = &vnic->chunks[i]; in usnic_vnic_dump() 222 return vnic->chunks[type].cnt; in usnic_vnic_res_cnt() 228 return vnic->chunks[type].free_cnt; in usnic_vnic_res_free_cnt() 254 src = &vnic->chunks[type]; in usnic_vnic_get_resources() 286 vnic->chunks[res->type].free_cnt++; in usnic_vnic_put_resources() 382 &vnic->chunks[res_type]); in usnic_vnic_discover_resources() 391 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_discover_resources() 427 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_release_resources()
|