/drivers/staging/most/hdm-dim2/ |
D | dim2_hdm.c | 525 u16 new_size; in configure_channel() local 538 new_size = dim_norm_ctrl_async_buffer_size(buf_size); in configure_channel() 539 if (new_size == 0) { in configure_channel() 543 ccfg->buffer_size = new_size; in configure_channel() 544 if (new_size != buf_size) in configure_channel() 546 hdm_ch->name, buf_size, new_size); in configure_channel() 549 is_tx ? new_size * 2 : new_size); in configure_channel() 552 new_size = dim_norm_ctrl_async_buffer_size(buf_size); in configure_channel() 553 if (new_size == 0) { in configure_channel() 557 ccfg->buffer_size = new_size; in configure_channel() [all …]
|
/drivers/hid/ |
D | hid-gembird.c | 70 size_t new_size = *rsize + delta_size; in gembird_report_fixup() local 75 new_rdesc = devm_kzalloc(&hdev->dev, new_size, GFP_KERNEL); in gembird_report_fixup() 93 *rsize = new_size; in gembird_report_fixup()
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_cotable.c | 388 static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) in vmw_cotable_resize() argument 417 ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement, in vmw_cotable_resize() 465 res->backup_size = new_size; in vmw_cotable_resize() 513 size_t new_size = res->backup_size; in vmw_cotable_create() local 519 while (needed_size > new_size) in vmw_cotable_create() 520 new_size *= 2; in vmw_cotable_create() 522 if (likely(new_size <= res->backup_size)) { in vmw_cotable_create() 532 return vmw_cotable_resize(res, new_size); in vmw_cotable_create()
|
/drivers/staging/comedi/ |
D | comedi_buf.c | 219 unsigned long new_size) in comedi_buf_alloc() argument 224 new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK; in comedi_buf_alloc() 227 if (async->prealloc_buf && async->prealloc_bufsz == new_size) in comedi_buf_alloc() 234 if (new_size) { in comedi_buf_alloc() 235 unsigned int n_pages = new_size >> PAGE_SHIFT; in comedi_buf_alloc() 245 async->prealloc_bufsz = new_size; in comedi_buf_alloc()
|
D | comedi_internal.h | 28 unsigned long new_size);
|
/drivers/scsi/cxlflash/ |
D | vlun.c | 509 u64 *new_size) in grow_lxt() argument 520 u64 delta = *new_size - rhte->lxt_cnt; in grow_lxt() 605 *new_size = my_new_size; in grow_lxt() 627 u64 *new_size) in shrink_lxt() argument 640 u64 delta = rhte->lxt_cnt - *new_size; in shrink_lxt() 713 *new_size = my_new_size; in shrink_lxt() 745 u64 new_size; in _cxlflash_vlun_resize() local 759 new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE); in _cxlflash_vlun_resize() 763 new_size); in _cxlflash_vlun_resize() 793 if (new_size > rhte->lxt_cnt) in _cxlflash_vlun_resize() [all …]
|
/drivers/staging/lustre/lnet/libcfs/ |
D | hash.c | 881 unsigned int old_size, unsigned int new_size) in cfs_hash_buckets_realloc() argument 888 if (old_bkts && old_size == new_size) in cfs_hash_buckets_realloc() 891 LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size); in cfs_hash_buckets_realloc() 897 min(old_size, new_size) * sizeof(*old_bkts)); in cfs_hash_buckets_realloc() 900 for (i = old_size; i < new_size; i++) { in cfs_hash_buckets_realloc() 907 old_size, new_size); in cfs_hash_buckets_realloc() 1878 unsigned int new_size; in cfs_hash_rehash_worker() local 1890 new_size = CFS_HASH_RH_NBKT(hs); in cfs_hash_rehash_worker() 1899 old_size, new_size); in cfs_hash_rehash_worker() 1914 old_size = new_size; in cfs_hash_rehash_worker() [all …]
|
/drivers/md/ |
D | dm-era-target.c | 820 dm_block_t *new_size = arg; in metadata_resize() local 823 if (!valid_nr_blocks(*new_size)) { in metadata_resize() 825 (unsigned long long) *new_size); in metadata_resize() 832 r = writeset_alloc(&md->writesets[0], *new_size); in metadata_resize() 838 r = writeset_alloc(&md->writesets[1], *new_size); in metadata_resize() 847 md->nr_blocks, *new_size, in metadata_resize() 854 md->nr_blocks = *new_size; in metadata_resize() 1571 dm_block_t new_size = calc_nr_blocks(era); in era_preresume() local 1573 if (era->nr_blocks != new_size) { in era_preresume() 1574 r = in_worker1(era, metadata_resize, &new_size); in era_preresume() [all …]
|
D | dm-thin-metadata.h | 207 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); 208 int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
|
D | dm-cache-target.c | 3098 static bool can_resize(struct cache *cache, dm_cblock_t new_size) in can_resize() argument 3100 if (from_cblock(new_size) > from_cblock(cache->cache_size)) { in can_resize() 3111 while (from_cblock(new_size) < from_cblock(cache->cache_size)) { in can_resize() 3112 new_size = to_cblock(from_cblock(new_size) + 1); in can_resize() 3113 if (is_dirty(cache, new_size)) { in can_resize() 3116 (unsigned long long) from_cblock(new_size)); in can_resize() 3124 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size) in resize_cache_dev() argument 3128 r = dm_cache_resize(cache->cmd, new_size); in resize_cache_dev() 3135 set_cache_size(cache, new_size); in resize_cache_dev()
|
D | dm-table.c | 553 unsigned new_size; in realloc_argv() local 557 new_size = *size * 2; in realloc_argv() 560 new_size = 8; in realloc_argv() 563 argv = kmalloc(new_size * sizeof(*argv), gfp); in realloc_argv() 566 *size = new_size; in realloc_argv()
|
/drivers/misc/vmw_vmci/ |
D | vmci_handle_array.c | 62 size_t new_size = handle_arr_calc_size(array->capacity + in vmci_handle_arr_append_entry() local 68 new_array = krealloc(array, new_size, GFP_ATOMIC); in vmci_handle_arr_append_entry()
|
/drivers/uwb/ |
D | hwa-rc.c | 120 size_t *new_size) in hwarc_filter_evt_beacon_WUSB_0100() argument 155 *new_size = sizeof(*newbe) + ielength; in hwarc_filter_evt_beacon_WUSB_0100() 178 size_t *new_size) in hwarc_filter_evt_drp_avail_WUSB_0100() argument 222 *new_size = sizeof(*newda); in hwarc_filter_evt_drp_avail_WUSB_0100() 253 size_t *new_size) in hwarc_filter_evt_drp_WUSB_0100() argument 286 *new_size = sizeof(*newdrpev) + ielength; in hwarc_filter_evt_drp_WUSB_0100()
|
/drivers/xen/ |
D | evtchn.c | 310 unsigned int new_size; in evtchn_resize_ring() local 321 new_size = 64; in evtchn_resize_ring() 323 new_size = 2 * u->ring_size; in evtchn_resize_ring() 325 new_ring = kvmalloc(new_size * sizeof(*new_ring), GFP_KERNEL); in evtchn_resize_ring() 354 u->ring_size = new_size; in evtchn_resize_ring()
|
/drivers/md/persistent-data/ |
D | dm-array.c | 656 uint32_t old_size, uint32_t new_size, in array_resize() argument 662 if (old_size == new_size) { in array_resize() 675 resize.new_nr_full_blocks = new_size / resize.max_entries; in array_resize() 676 resize.new_nr_entries_in_last_block = new_size % resize.max_entries; in array_resize() 679 r = ((new_size > old_size) ? grow : shrink)(&resize); in array_resize() 688 uint32_t old_size, uint32_t new_size, in dm_array_resize() argument 692 int r = array_resize(info, root, old_size, new_size, value, new_root); in dm_array_resize()
|
D | dm-array.h | 110 uint32_t old_size, uint32_t new_size,
|
/drivers/acpi/apei/ |
D | erst.c | 516 int new_size; in __erst_record_id_cache_add_one() local 519 new_size = erst_record_id_cache.size * 2; in __erst_record_id_cache_add_one() 520 new_size = clamp_val(new_size, ERST_RECORD_ID_CACHE_SIZE_MIN, in __erst_record_id_cache_add_one() 522 if (new_size <= erst_record_id_cache.size) { in __erst_record_id_cache_add_one() 527 new_entries = kvmalloc(new_size * sizeof(entries[0]), GFP_KERNEL); in __erst_record_id_cache_add_one() 534 erst_record_id_cache.size = new_size; in __erst_record_id_cache_add_one()
|
/drivers/staging/lustre/lustre/lov/ |
D | lov_pool.c | 317 int new_size; in lov_ost_pool_extend() local 324 new_size = max(min_count, 2 * op->op_size); in lov_ost_pool_extend() 325 new = kcalloc(new_size, sizeof(op->op_array[0]), GFP_NOFS); in lov_ost_pool_extend() 333 op->op_size = new_size; in lov_ost_pool_extend()
|
D | lov_io.c | 84 loff_t new_size = parent->u.ci_setattr.sa_attr.lvb_size; in lov_io_sub_inherit() local 86 new_size = lov_size_to_stripe(lsm, new_size, stripe); in lov_io_sub_inherit() 87 io->u.ci_setattr.sa_attr.lvb_size = new_size; in lov_io_sub_inherit()
|
/drivers/pci/ |
D | setup-res.c | 366 resource_size_t new_size; in pci_reassign_resource() local 381 new_size = resource_size(res) + addsize; in pci_reassign_resource() 382 ret = _pci_assign_resource(dev, resno, new_size, min_align); in pci_reassign_resource()
|
/drivers/gpu/drm/vc4/ |
D | vc4_bo.c | 203 uint32_t new_size = max(vc4->bo_cache.size_list_size * 2, in vc4_get_cache_list_for_size() local 208 new_list = kmalloc_array(new_size, sizeof(struct list_head), in vc4_get_cache_list_for_size() 226 for (i = vc4->bo_cache.size_list_size; i < new_size; i++) in vc4_get_cache_list_for_size() 231 vc4->bo_cache.size_list_size = new_size; in vc4_get_cache_list_for_size()
|
D | vc4_plane.c | 243 u32 new_size = max(4u, vc4_state->dlist_count * 2); in vc4_dlist_write() local 244 u32 *new_dlist = kmalloc(new_size * 4, GFP_KERNEL); in vc4_dlist_write() 252 vc4_state->dlist_size = new_size; in vc4_dlist_write()
|
/drivers/staging/lustre/lustre/llite/ |
D | vvp_io.c | 550 __u64 new_size; in vvp_io_setattr_lock() local 554 new_size = io->u.ci_setattr.sa_attr.lvb_size; in vvp_io_setattr_lock() 555 if (new_size == 0) in vvp_io_setattr_lock() 570 new_size = 0; in vvp_io_setattr_lock() 574 new_size, OBD_OBJECT_EOF); in vvp_io_setattr_lock()
|
/drivers/infiniband/hw/hfi1/ |
D | init.c | 1958 u32 new_size, i, j; in hfi1_setup_eagerbufs() local 1975 new_size = rcd->egrbufs.rcvtid_size / 2; in hfi1_setup_eagerbufs() 1983 rcd->egrbufs.rcvtid_size = new_size; in hfi1_setup_eagerbufs() 2001 new_size) == in hfi1_setup_eagerbufs() 2007 offset += new_size; in hfi1_setup_eagerbufs() 2010 rcd->egrbufs.rcvtid_size = new_size; in hfi1_setup_eagerbufs()
|
/drivers/base/ |
D | component.c | 270 size_t new_size = match->alloc + 16; in component_match_add_release() local 273 ret = component_match_realloc(master, match, new_size); in component_match_add_release()
|