Home
last modified time | relevance | path

Searched refs:nr_zones (Results 1 – 25 of 25) sorted by relevance

/drivers/md/
Ddm-zone.c22 sector_t sector, unsigned int nr_zones, in dm_blk_do_report_zones() argument
42 nr_zones - args.zone_idx); in dm_blk_do_report_zones()
45 } while (args.zone_idx < nr_zones && in dm_blk_do_report_zones()
57 unsigned int nr_zones, report_zones_cb cb, void *data) in dm_blk_report_zones() argument
70 ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data); in dm_blk_report_zones()
112 struct dm_report_zones_args *args, unsigned int nr_zones) in dm_report_zones() argument
120 return blkdev_report_zones(bdev, sector, nr_zones, in dm_report_zones()
152 md->nr_zones = 0; in dm_cleanup_zoned_dev()
186 disk->conv_zones_bitmap = bitmap_zalloc(disk->nr_zones, in dm_zone_revalidate_cb()
196 disk->seq_zones_wlock = bitmap_zalloc(disk->nr_zones, in dm_zone_revalidate_cb()
[all …]
Ddm-zoned-reclaim.c450 unsigned int nr_unmap, nr_zones; in dmz_reclaim_percentage() local
453 nr_zones = nr_cache; in dmz_reclaim_percentage()
456 nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx); in dmz_reclaim_percentage()
461 return nr_unmap * 100 / nr_zones; in dmz_reclaim_percentage()
Ddm-zoned-metadata.c164 unsigned int nr_zones; member
265 return zmd->nr_zones; in dmz_nr_zones()
1429 for (idx = 0; idx < dev->nr_zones; idx++) { in dmz_emulate_zones()
1456 for (idx = 0; idx < zmd->nr_zones; idx++) { in dmz_drop_zones()
1486 zmd->nr_zones = 0; in dmz_init_zones()
1491 zmd->nr_zones += dev->nr_zones; in dmz_init_zones()
1502 if (!zmd->nr_zones) { in dmz_init_zones()
1509 zmd->devname, sizeof(struct dm_zone) * zmd->nr_zones); in dmz_init_zones()
1714 if (dzone_id >= zmd->nr_zones) { in dmz_load_mapping()
1742 if (bzone_id >= zmd->nr_zones) { in dmz_load_mapping()
[all …]
Ddm-linear.c139 struct dm_report_zones_args *args, unsigned int nr_zones) in linear_report_zones() argument
145 args, nr_zones); in linear_report_zones()
Ddm-zoned-target.c799 zoned_dev->nr_zones = bdev_nr_zones(bdev); in dmz_fixup_devices()
814 zoned_dev->nr_zones = bdev_nr_zones(bdev); in dmz_fixup_devices()
821 reg_dev->nr_zones = in dmz_fixup_devices()
825 zone_offset = reg_dev->nr_zones; in dmz_fixup_devices()
828 zone_offset += dmz->dev[i].nr_zones; in dmz_fixup_devices()
Ddm-default-key.c392 struct dm_report_zones_args *args, unsigned int nr_zones) in default_key_report_zones() argument
398 args, nr_zones); in default_key_report_zones()
Ddm.h109 unsigned int nr_zones, report_zones_cb cb, void *data);
Ddm-core.h142 unsigned int nr_zones; member
Ddm-flakey.c657 struct dm_report_zones_args *args, unsigned int nr_zones) in flakey_report_zones() argument
663 args, nr_zones); in flakey_report_zones()
Ddm-zoned.h65 unsigned int nr_zones; member
Ddm-crypt.c3217 struct dm_report_zones_args *args, unsigned int nr_zones) in crypt_report_zones() argument
3223 args, nr_zones); in crypt_report_zones()
/drivers/scsi/
Dsd_zbc.c202 unsigned int nr_zones, size_t *buflen) in sd_zbc_alloc_report_buffer() argument
217 nr_zones = min(nr_zones, sdkp->zone_info.nr_zones); in sd_zbc_alloc_report_buffer()
218 bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE); in sd_zbc_alloc_report_buffer()
257 unsigned int nr_zones, report_zones_cb cb, void *data) in sd_zbc_report_zones() argument
276 buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen); in sd_zbc_report_zones()
280 while (zone_idx < nr_zones && lba < sdkp->capacity) { in sd_zbc_report_zones()
286 nr = min(nr_zones, get_unaligned_be32(&buf[0]) / 64); in sd_zbc_report_zones()
290 for (i = 0; i < nr && zone_idx < nr_zones; i++) { in sd_zbc_report_zones()
379 for (zno = 0; zno < sdkp->zone_info.nr_zones; zno++) { in sd_zbc_update_wp_offset_workfn()
585 sdkp->zone_info.nr_zones * sizeof(unsigned int)); in sd_zbc_zone_wp_update()
[all …]
Dsd.h80 u32 nr_zones; member
256 unsigned int nr_zones, report_zones_cb cb, void *data);
Dscsi_debug.c303 unsigned int nr_zones; member
2715 return devip->nr_zones != 0; in sdebug_dev_is_zoned()
2732 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones); in zbc_zone()
2785 for (i = 0; i < devip->nr_zones; i++, zsp++) { in zbc_close_imp_open_zone()
4611 for (i = 0; i < devip->nr_zones; i++, zsp++) { in zbc_open_all()
4689 for (i = 0; i < devip->nr_zones; i++) in zbc_close_all()
4761 for (i = 0; i < devip->nr_zones; i++) in zbc_finish_all()
4842 for (i = 0; i < devip->nr_zones; i++) in zbc_rwp_all()
4997 devip->nr_zones = div_u64(capacity + devip->zsize - 1, devip->zsize); in sdebug_device_create_zones()
5018 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones; in sdebug_device_create_zones()
[all …]
/drivers/block/null_blk/
Dzoned.c105 dev->nr_zones = DIV_ROUND_UP_SECTOR_T(dev_capacity_sects, in null_init_zoned_dev()
107 dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone), in null_init_zoned_dev()
114 if (dev->zone_nr_conv >= dev->nr_zones) { in null_init_zoned_dev()
115 dev->zone_nr_conv = dev->nr_zones - 1; in null_init_zoned_dev()
121 if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) { in null_init_zoned_dev()
131 } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) { in null_init_zoned_dev()
152 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) { in null_init_zoned_dev()
181 nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0); in null_register_zoned_dev()
199 unsigned int nr_zones, report_zones_cb cb, void *data) in null_report_zones() argument
209 if (first_zone >= dev->nr_zones) in null_report_zones()
[all …]
Dtrace.h57 TP_PROTO(struct nullb *nullb, unsigned int nr_zones),
58 TP_ARGS(nullb, nr_zones),
61 __field(unsigned int, nr_zones)
64 __entry->nr_zones = nr_zones;
68 __print_disk_name(__entry->disk), __entry->nr_zones)
Dnull_blk.h84 unsigned int nr_zones; member
156 unsigned int nr_zones, report_zones_cb cb, void *data);
/drivers/nvme/host/
Dzns.c114 unsigned int nr_zones, size_t *buflen) in nvme_zns_alloc_report_buffer() argument
123 nr_zones = min_t(unsigned int, nr_zones, in nvme_zns_alloc_report_buffer()
127 nr_zones * sizeof(struct nvme_zone_descriptor); in nvme_zns_alloc_report_buffer()
170 unsigned int nr_zones, report_zones_cb cb, void *data) in nvme_ns_report_zones() argument
182 report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen); in nvme_ns_report_zones()
196 while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) { in nvme_ns_report_zones()
207 nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones); in nvme_ns_report_zones()
211 for (i = 0; i < nz && zone_idx < nr_zones; i++) { in nvme_ns_report_zones()
Dmultipath.c431 unsigned int nr_zones, report_zones_cb cb, void *data) in nvme_ns_head_report_zones() argument
440 ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data); in nvme_ns_head_report_zones()
881 ns->head->disk->nr_zones = ns->disk->nr_zones; in nvme_mpath_add_disk()
Dnvme.h1037 unsigned int nr_zones, report_zones_cb cb, void *data);
Dcore.c2155 unsigned int nr_zones, report_zones_cb cb, void *data) in nvme_report_zones() argument
2157 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb, in nvme_report_zones()
/drivers/nvme/target/
Dzns.c210 u64 nr_zones; member
231 if (rz->nr_zones < rz->out_nr_zones) { in nvmet_bdev_report_zone_cb()
250 rz->nr_zones++; in nvmet_bdev_report_zone_cb()
277 __le64 nr_zones; in nvmet_bdev_zone_zmgmt_recv_work() local
285 .nr_zones = 0, in nvmet_bdev_zone_zmgmt_recv_work()
310 rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones); in nvmet_bdev_zone_zmgmt_recv_work()
312 nr_zones = cpu_to_le64(rz_data.nr_zones); in nvmet_bdev_zone_zmgmt_recv_work()
313 status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones)); in nvmet_bdev_zone_zmgmt_recv_work()
403 unsigned int nr_zones = bdev_nr_zones(bdev); in nvmet_bdev_zone_mgmt_emulate_all() local
411 d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)), in nvmet_bdev_zone_mgmt_emulate_all()
[all …]
/drivers/block/
Dublk_drv.c73 __u32 nr_zones; member
234 int nr_zones; in ublk_dev_param_zoned_validate() local
242 nr_zones = ublk_get_nr_zones(ub); in ublk_dev_param_zoned_validate()
244 if (p->max_active_zones > nr_zones) in ublk_dev_param_zoned_validate()
247 if (p->max_open_zones > nr_zones) in ublk_dev_param_zoned_validate()
265 ub->ub_disk->nr_zones = ublk_get_nr_zones(ub); in ublk_dev_param_zoned_apply()
272 unsigned int nr_zones, size_t *buflen) in ublk_alloc_report_buffer() argument
278 nr_zones = min_t(unsigned int, nr_zones, in ublk_alloc_report_buffer()
279 ublk->ub_disk->nr_zones); in ublk_alloc_report_buffer()
281 bufsize = nr_zones * sizeof(struct blk_zone); in ublk_alloc_report_buffer()
[all …]
Dvirtio_blk.c538 unsigned int nr_zones, in virtblk_alloc_report_buffer() argument
545 nr_zones = min_t(unsigned int, nr_zones, in virtblk_alloc_report_buffer()
549 nr_zones * sizeof(struct virtio_blk_zone_descriptor); in virtblk_alloc_report_buffer()
667 unsigned int nr_zones, report_zones_cb cb, in virtblk_report_zones() argument
680 report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen); in virtblk_report_zones()
691 while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) { in virtblk_report_zones()
699 nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones), in virtblk_report_zones()
700 nr_zones); in virtblk_report_zones()
704 for (i = 0; i < nz && zone_idx < nr_zones; i++) { in virtblk_report_zones()
/drivers/powercap/
Dpowercap_sys.c566 control_type->nr_zones++; in powercap_register_zone()
595 control_type->nr_zones--; in powercap_unregister_zone()
648 if (control_type->nr_zones) { in powercap_unregister_control_type()