| /drivers/block/null_blk/ |
| D | zoned.c | 25 struct nullb_zone *zone) in null_init_zone_lock() argument 28 spin_lock_init(&zone->spinlock); in null_init_zone_lock() 30 mutex_init(&zone->mutex); in null_init_zone_lock() 34 struct nullb_zone *zone) in null_lock_zone() argument 37 spin_lock_irq(&zone->spinlock); in null_lock_zone() 39 mutex_lock(&zone->mutex); in null_lock_zone() 43 struct nullb_zone *zone) in null_unlock_zone() argument 46 spin_unlock_irq(&zone->spinlock); in null_unlock_zone() 48 mutex_unlock(&zone->mutex); in null_unlock_zone() 55 struct nullb_zone *zone; in null_init_zoned_dev() local [all …]
|
| /drivers/md/dm-vdo/ |
| D | logical-zone.c | 55 struct logical_zone *zone = &zones->zones[zone_number]; in initialize_zone() local 58 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->lbn_operations); in initialize_zone() 63 zone->next = &zones->zones[zone_number + 1]; in initialize_zone() 65 vdo_initialize_completion(&zone->completion, vdo, in initialize_zone() 67 zone->zones = zones; in initialize_zone() 68 zone->zone_number = zone_number; in initialize_zone() 69 zone->thread_id = vdo->thread_config.logical_threads[zone_number]; in initialize_zone() 70 zone->block_map_zone = &vdo->block_map->zones[zone_number]; in initialize_zone() 71 INIT_LIST_HEAD(&zone->write_vios); in initialize_zone() 72 vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION); in initialize_zone() [all …]
|
| D | block-map.c | 60 struct block_map_zone *zone; member 98 struct block_map_zone *zone; member 199 info->vio->completion.callback_thread_id = cache->zone->thread_id; in initialize_info() 249 VDO_ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id), in assert_on_cache_thread() 251 function_name, cache->zone->thread_id, thread_id); in assert_on_cache_thread() 257 VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state), in assert_io_allowed() 624 static void check_for_drain_complete(struct block_map_zone *zone) in check_for_drain_complete() argument 626 if (vdo_is_state_draining(&zone->state) && in check_for_drain_complete() 627 (zone->active_lookups == 0) && in check_for_drain_complete() 628 !vdo_waitq_has_waiters(&zone->flush_waiters) && in check_for_drain_complete() [all …]
|
| D | physical-zone.c | 329 struct physical_zone *zone = &zones->zones[zone_number]; in initialize_zone() local 331 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->pbn_operations); in initialize_zone() 335 result = make_pbn_lock_pool(LOCK_POOL_CAPACITY, &zone->lock_pool); in initialize_zone() 337 vdo_int_map_free(zone->pbn_operations); in initialize_zone() 341 zone->zone_number = zone_number; in initialize_zone() 342 zone->thread_id = vdo->thread_config.physical_threads[zone_number]; in initialize_zone() 343 zone->allocator = &vdo->depot->allocators[zone_number]; in initialize_zone() 344 zone->next = &zones->zones[(zone_number + 1) % vdo->thread_config.physical_zone_count]; in initialize_zone() 345 result = vdo_make_default_thread(vdo, zone->thread_id); in initialize_zone() 347 free_pbn_lock_pool(vdo_forget(zone->lock_pool)); in initialize_zone() [all …]
|
| D | dedupe.c | 321 static inline void assert_in_hash_zone(struct hash_zone *zone, const char *name) in assert_in_hash_zone() argument 323 VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id), in assert_in_hash_zone() 332 static inline bool change_timer_state(struct hash_zone *zone, int old, int new) in change_timer_state() argument 334 return (atomic_cmpxchg(&zone->timer_state, old, new) == old); in change_timer_state() 342 static void return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *lock) in return_hash_lock_to_pool() argument 348 list_add_tail(&lock->pool_node, &zone->lock_pool); in return_hash_lock_to_pool() 694 vdo_release_physical_zone_pbn_lock(agent->duplicate.zone, agent->duplicate.pbn, in unlock_duplicate_pbn() 718 struct hash_zone *zone = context->zone; in release_context() local 720 WRITE_ONCE(zone->active, zone->active - 1); in release_context() 721 list_move(&context->list_entry, &zone->available); in release_context() [all …]
|
| /drivers/md/dm-vdo/indexer/ |
| D | index.c | 78 static bool is_zone_chapter_sparse(const struct index_zone *zone, u64 virtual_chapter) in is_zone_chapter_sparse() argument 80 return uds_is_chapter_sparse(zone->index->volume->geometry, in is_zone_chapter_sparse() 81 zone->oldest_virtual_chapter, in is_zone_chapter_sparse() 82 zone->newest_virtual_chapter, virtual_chapter); in is_zone_chapter_sparse() 85 static int launch_zone_message(struct uds_zone_message message, unsigned int zone, in launch_zone_message() argument 97 request->zone_number = zone; in launch_zone_message() 110 unsigned int zone; in enqueue_barrier_messages() local 112 for (zone = 0; zone < index->zone_count; zone++) { in enqueue_barrier_messages() 113 int result = launch_zone_message(message, zone, index); in enqueue_barrier_messages() 127 struct index_zone *zone; in triage_index_request() local [all …]
|
| D | volume-index.c | 543 unsigned int zone = in uds_get_volume_index_record() local 545 struct mutex *mutex = &volume_index->zones[zone].hook_mutex; in uds_get_volume_index_record() 627 struct volume_sub_index_zone *zone = &sub_index->zones[zone_number]; in set_volume_sub_index_zone_open_chapter() local 631 zone->virtual_chapter_low = (virtual_chapter >= sub_index->chapter_count ? in set_volume_sub_index_zone_open_chapter() 634 zone->virtual_chapter_high = virtual_chapter; in set_volume_sub_index_zone_open_chapter() 651 (unsigned long long) zone->virtual_chapter_low); in set_volume_sub_index_zone_open_chapter() 652 zone->early_flushes++; in set_volume_sub_index_zone_open_chapter() 653 zone->virtual_chapter_low++; in set_volume_sub_index_zone_open_chapter() 655 u64 first_expired = zone->virtual_chapter_low; in set_volume_sub_index_zone_open_chapter() 657 if (first_expired + expire_count < zone->virtual_chapter_high) { in set_volume_sub_index_zone_open_chapter() [all …]
|
| /drivers/md/ |
| D | dm-zoned-metadata.c | 137 struct dm_zone *zone; member 221 static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_dev_zone_id() argument 223 if (WARN_ON(!zone)) in dmz_dev_zone_id() 226 return zone->id - zone->dev->zone_offset; in dmz_dev_zone_id() 229 sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_start_sect() argument 231 unsigned int zone_id = dmz_dev_zone_id(zmd, zone); in dmz_start_sect() 236 sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_start_block() argument 238 unsigned int zone_id = dmz_dev_zone_id(zmd, zone); in dmz_start_block() 311 struct dm_zone *zone = kzalloc(sizeof(struct dm_zone), GFP_KERNEL); in dmz_insert() local 313 if (!zone) in dmz_insert() [all …]
|
| D | dm-zoned-target.c | 21 struct dm_zone *zone; member 86 struct dm_zone *zone = bioctx->zone; in dmz_bio_endio() local 88 if (zone) { in dmz_bio_endio() 91 dmz_is_seq(zone)) in dmz_bio_endio() 92 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); in dmz_bio_endio() 93 dmz_deactivate_zone(zone); in dmz_bio_endio() 116 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, in dmz_submit_bio() argument 122 struct dmz_dev *dev = zone->dev; in dmz_submit_bio() 134 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); in dmz_submit_bio() 144 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) in dmz_submit_bio() [all …]
|
| D | raid0.c | 68 struct strip_zone *zone; in create_strip_zones() local 155 zone = &conf->strip_zone[0]; in create_strip_zones() 203 zone->nb_dev = cnt; in create_strip_zones() 204 zone->zone_end = smallest->sectors * cnt; in create_strip_zones() 206 curr_zone_end = zone->zone_end; in create_strip_zones() 213 zone = conf->strip_zone + i; in create_strip_zones() 217 zone->dev_start = smallest->sectors; in create_strip_zones() 223 if (rdev->sectors <= zone->dev_start) { in create_strip_zones() 243 zone->nb_dev = c; in create_strip_zones() 244 sectors = (smallest->sectors - zone->dev_start) * c; in create_strip_zones() [all …]
|
| D | dm-zone.c | 82 static int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, in dm_report_zones_cb() argument 91 if (zone->start >= args->start + args->tgt->len) in dm_report_zones_cb() 98 zone->start += sector_diff; in dm_report_zones_cb() 99 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { in dm_report_zones_cb() 100 if (zone->cond == BLK_ZONE_COND_FULL) in dm_report_zones_cb() 101 zone->wp = zone->start + zone->len; in dm_report_zones_cb() 102 else if (zone->cond == BLK_ZONE_COND_EMPTY) in dm_report_zones_cb() 103 zone->wp = zone->start; in dm_report_zones_cb() 105 zone->wp += sector_diff; in dm_report_zones_cb() 108 args->next_sector = zone->start + zone->len; in dm_report_zones_cb() [all …]
|
| D | dm-zoned.h | 206 sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone); 207 sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone); 220 void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone); 222 void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *zone, 224 void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone); 240 static inline void dmz_activate_zone(struct dm_zone *zone) in dmz_activate_zone() argument 242 atomic_inc(&zone->refcount); in dmz_activate_zone() 245 int dmz_lock_zone_reclaim(struct dm_zone *zone); 246 void dmz_unlock_zone_reclaim(struct dm_zone *zone); 252 void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *zone); [all …]
|
| /drivers/net/ethernet/mellanox/mlx4/ |
| D | alloc.c | 250 struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL); in mlx4_zone_add_one() local 252 if (NULL == zone) in mlx4_zone_add_one() 255 zone->flags = flags; in mlx4_zone_add_one() 256 zone->bitmap = bitmap; in mlx4_zone_add_one() 257 zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0; in mlx4_zone_add_one() 258 zone->priority = priority; in mlx4_zone_add_one() 259 zone->offset = offset; in mlx4_zone_add_one() 263 zone->uid = zone_alloc->last_uid++; in mlx4_zone_add_one() 264 zone->allocator = zone_alloc; in mlx4_zone_add_one() 274 list_add_tail(&zone->prio_list, &it->prio_list); in mlx4_zone_add_one() [all …]
|
| /drivers/thermal/tegra/ |
| D | tegra-bpmp-thermal.c | 33 static int __tegra_bpmp_thermal_get_temp(struct tegra_bpmp_thermal_zone *zone, in __tegra_bpmp_thermal_get_temp() argument 43 req.get_temp.zone = zone->idx; in __tegra_bpmp_thermal_get_temp() 52 err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg); in __tegra_bpmp_thermal_get_temp() 67 struct tegra_bpmp_thermal_zone *zone = thermal_zone_device_priv(tz); in tegra_bpmp_thermal_get_temp() local 69 return __tegra_bpmp_thermal_get_temp(zone, out_temp); in tegra_bpmp_thermal_get_temp() 74 struct tegra_bpmp_thermal_zone *zone = thermal_zone_device_priv(tz); in tegra_bpmp_thermal_set_trips() local 81 req.set_trip.zone = zone->idx; in tegra_bpmp_thermal_set_trips() 91 err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg); in tegra_bpmp_thermal_set_trips() 102 struct tegra_bpmp_thermal_zone *zone; in tz_device_update_work_fn() local 104 zone = container_of(work, struct tegra_bpmp_thermal_zone, in tz_device_update_work_fn() [all …]
|
| /drivers/mtd/ |
| D | sm_ftl.c | 192 static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset) in sm_mkoffset() argument 195 WARN_ON(zone < 0 || zone >= ftl->zone_count); in sm_mkoffset() 202 return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset; in sm_mkoffset() 207 int *zone, int *block, int *boffset) in sm_break_offset() argument 212 *zone = offset >= ftl->zone_count ? -1 : offset; in sm_break_offset() 238 int zone, int block, int boffset, in sm_read_sector() argument 270 if (zone == 0 && block == ftl->cis_block && boffset == in sm_read_sector() 282 ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); in sm_read_sector() 287 block, zone, ret); in sm_read_sector() 305 " as bad" , block, zone); in sm_read_sector() [all …]
|
| /drivers/platform/x86/dell/ |
| D | alienware-wmi.c | 239 static int parse_rgb(const char *buf, struct platform_zone *zone) in parse_rgb() argument 259 zone->colors = repackager.cp; in parse_rgb() 265 u8 zone; in match_zone() local 267 for (zone = 0; zone < quirks->num_zones; zone++) { in match_zone() 268 if ((struct device_attribute *)zone_data[zone].attr == attr) { in match_zone() 270 zone_data[zone].location); in match_zone() 271 return &zone_data[zone]; in match_zone() 280 static int alienware_update_led(struct platform_zone *zone) in alienware_update_led() argument 289 wmax_basic_args.led_mask = 1 << zone->location; in alienware_update_led() 290 wmax_basic_args.colors = zone->colors; in alienware_update_led() [all …]
|
| /drivers/staging/rts5208/ |
| D | xd.c | 793 xd_card->zone = vmalloc(size); in xd_init_l2p_tbl() 794 if (!xd_card->zone) in xd_init_l2p_tbl() 798 xd_card->zone[i].build_flag = 0; in xd_init_l2p_tbl() 799 xd_card->zone[i].l2p_table = NULL; in xd_init_l2p_tbl() 800 xd_card->zone[i].free_table = NULL; in xd_init_l2p_tbl() 801 xd_card->zone[i].get_index = 0; in xd_init_l2p_tbl() 802 xd_card->zone[i].set_index = 0; in xd_init_l2p_tbl() 803 xd_card->zone[i].unused_blk_cnt = 0; in xd_init_l2p_tbl() 809 static inline void free_zone(struct zone_entry *zone) in free_zone() argument 811 if (!zone) in free_zone() [all …]
|
| /drivers/thermal/intel/int340x_thermal/ |
| D | int340x_thermal_zone.c | 14 static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone, in int340x_thermal_get_zone_temp() argument 17 struct int34x_thermal_zone *d = thermal_zone_device_priv(zone); in int340x_thermal_get_zone_temp() 41 static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone, in int340x_thermal_set_trip_temp() argument 44 struct int34x_thermal_zone *d = thermal_zone_device_priv(zone); in int340x_thermal_set_trip_temp() 60 static void int340x_thermal_critical(struct thermal_zone_device *zone) in int340x_thermal_critical() argument 62 dev_dbg(thermal_zone_device(zone), "%s: critical temperature reached\n", in int340x_thermal_critical() 63 thermal_zone_device_type(zone)); in int340x_thermal_critical() 164 int34x_zone->zone = thermal_zone_device_register_with_trips( in int340x_thermal_zone_add() 173 if (IS_ERR(int34x_zone->zone)) { in int340x_thermal_zone_add() 174 ret = PTR_ERR(int34x_zone->zone); in int340x_thermal_zone_add() [all …]
|
| /drivers/iio/light/ |
| D | lm3533-als.c | 57 atomic_t zone; member 86 static int _lm3533_als_get_zone(struct iio_dev *indio_dev, u8 *zone) in _lm3533_als_get_zone() argument 99 *zone = min_t(u8, val, LM3533_ALS_ZONE_MAX); in _lm3533_als_get_zone() 104 static int lm3533_als_get_zone(struct iio_dev *indio_dev, u8 *zone) in lm3533_als_get_zone() argument 110 *zone = atomic_read(&als->zone); in lm3533_als_get_zone() 112 ret = _lm3533_als_get_zone(indio_dev, zone); in lm3533_als_get_zone() 124 static inline u8 lm3533_als_get_target_reg(unsigned channel, unsigned zone) in lm3533_als_get_target_reg() argument 126 return LM3533_REG_ALS_TARGET_BASE + 5 * channel + zone; in lm3533_als_get_target_reg() 130 unsigned zone, u8 *val) in lm3533_als_get_target() argument 139 if (zone > LM3533_ALS_ZONE_MAX) in lm3533_als_get_target() [all …]
|
| /drivers/scsi/ |
| D | sd_zbc.c | 50 struct blk_zone zone = { 0 }; in sd_zbc_parse_report() local 57 zone.type = buf[0] & 0x0f; in sd_zbc_parse_report() 58 zone.cond = (buf[1] >> 4) & 0xf; in sd_zbc_parse_report() 60 zone.reset = 1; in sd_zbc_parse_report() 62 zone.non_seq = 1; in sd_zbc_parse_report() 65 zone.start = logical_to_sectors(sdp, start_lba); in sd_zbc_parse_report() 66 zone.capacity = logical_to_sectors(sdp, get_unaligned_be64(&buf[8])); in sd_zbc_parse_report() 67 zone.len = zone.capacity; in sd_zbc_parse_report() 70 if (zone.len > gran) { in sd_zbc_parse_report() 74 sectors_to_logical(sdp, zone.capacity), in sd_zbc_parse_report() [all …]
|
| /drivers/base/ |
| D | memory.c | 192 struct zone *zone; in memory_block_online() local 198 zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group, in memory_block_online() 223 zone, mem->altmap->inaccessible); in memory_block_online() 229 nr_pages - nr_vmemmap_pages, zone, mem->group); in memory_block_online() 244 mem->zone = zone; in memory_block_online() 265 if (!mem->zone) in memory_block_offline() 281 nr_pages - nr_vmemmap_pages, mem->zone, mem->group); in memory_block_offline() 293 mem->zone = NULL; in memory_block_offline() 438 int online_type, struct zone *default_zone) in print_allowed_zone() 440 struct zone *zone; in print_allowed_zone() local [all …]
|
| /drivers/nvme/host/ |
| D | zns.c | 155 struct blk_zone zone = { }; in nvme_zone_parse_entry() local 163 zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ; in nvme_zone_parse_entry() 164 zone.cond = entry->zs >> 4; in nvme_zone_parse_entry() 165 zone.len = head->zsze; in nvme_zone_parse_entry() 166 zone.capacity = nvme_lba_to_sect(head, le64_to_cpu(entry->zcap)); in nvme_zone_parse_entry() 167 zone.start = nvme_lba_to_sect(head, le64_to_cpu(entry->zslba)); in nvme_zone_parse_entry() 168 if (zone.cond == BLK_ZONE_COND_FULL) in nvme_zone_parse_entry() 169 zone.wp = zone.start + zone.len; in nvme_zone_parse_entry() 171 zone.wp = nvme_lba_to_sect(head, le64_to_cpu(entry->wp)); in nvme_zone_parse_entry() 173 return cb(&zone, idx, data); in nvme_zone_parse_entry()
|
| /drivers/thermal/renesas/ |
| D | rzg2l_thermal.c | 60 struct thermal_zone_device *zone; member 157 thermal_remove_hwmon_sysfs(priv->zone); in rzg2l_thermal_remove() 163 struct thermal_zone_device *zone; in rzg2l_thermal_probe() local 208 zone = devm_thermal_of_zone_register(dev, 0, priv, in rzg2l_thermal_probe() 210 if (IS_ERR(zone)) { in rzg2l_thermal_probe() 212 ret = PTR_ERR(zone); in rzg2l_thermal_probe() 216 priv->zone = zone; in rzg2l_thermal_probe() 217 ret = thermal_add_hwmon_sysfs(priv->zone); in rzg2l_thermal_probe()
|
| /drivers/thermal/ |
| D | da9062-thermal.c | 53 struct thermal_zone_device *zone; member 97 thermal_zone_device_update(thermal->zone, in da9062_thermal_poll_on() 111 thermal_zone_device_update(thermal->zone, in da9062_thermal_poll_on() 199 thermal->zone = thermal_zone_device_register_with_trips(thermal->config->name, in da9062_thermal_probe() 203 if (IS_ERR(thermal->zone)) { in da9062_thermal_probe() 205 ret = PTR_ERR(thermal->zone); in da9062_thermal_probe() 208 ret = thermal_zone_device_enable(thermal->zone); in da9062_thermal_probe() 237 thermal_zone_device_unregister(thermal->zone); in da9062_thermal_probe() 248 thermal_zone_device_unregister(thermal->zone); in da9062_thermal_remove()
|
| /drivers/hwmon/ |
| D | scpi-hwmon.c | 67 struct scpi_thermal_zone *zone = thermal_zone_device_priv(tz); in scpi_read_temp() local 68 struct scpi_sensors *scpi_sensors = zone->scpi_sensors; in scpi_read_temp() 70 struct sensor_data *sensor = &scpi_sensors->data[zone->sensor_id]; in scpi_read_temp() 267 struct scpi_thermal_zone *zone; in scpi_hwmon_probe() local 272 zone = devm_kzalloc(dev, sizeof(*zone), GFP_KERNEL); in scpi_hwmon_probe() 273 if (!zone) in scpi_hwmon_probe() 276 zone->sensor_id = i; in scpi_hwmon_probe() 277 zone->scpi_sensors = scpi_sensors; in scpi_hwmon_probe() 280 zone, in scpi_hwmon_probe() 289 devm_kfree(dev, zone); in scpi_hwmon_probe()
|