Lines Matching full:zone
16 * Zone BIO context.
20 struct dm_zone *zone; member
86 struct dm_zone *zone = bioctx->zone; in dmz_bio_endio() local
88 if (zone) { in dmz_bio_endio()
91 dmz_is_seq(zone)) in dmz_bio_endio()
92 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); in dmz_bio_endio()
93 dmz_deactivate_zone(zone); in dmz_bio_endio()
116 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, in dmz_submit_bio() argument
129 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); in dmz_submit_bio()
139 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) in dmz_submit_bio()
140 zone->wp_block += nr_blocks; in dmz_submit_bio()
164 static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone, in dmz_handle_read() argument
174 if (!zone) { in dmz_handle_read()
179 dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks", in dmz_handle_read()
181 (dmz_is_rnd(zone) ? "RND" : "SEQ"), in dmz_handle_read()
182 dmz_id(dmz->metadata, zone), in dmz_handle_read()
186 bzone = zone->bzone; in dmz_handle_read()
189 if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) { in dmz_handle_read()
190 /* Test block validity in the data zone */ in dmz_handle_read()
191 ret = dmz_block_valid(dmz->metadata, zone, chunk_block); in dmz_handle_read()
195 /* Read data zone blocks */ in dmz_handle_read()
197 rzone = zone; in dmz_handle_read()
202 * No valid blocks found in the data zone. in dmz_handle_read()
203 * Check the buffer zone, if there is one. in dmz_handle_read()
210 /* Read buffer zone blocks */ in dmz_handle_read()
234 * Write blocks directly in a data zone, at the write pointer.
235 * If a buffer zone is assigned, invalidate the blocks written
239 struct dm_zone *zone, struct bio *bio, in dmz_handle_direct_write() argument
244 struct dm_zone *bzone = zone->bzone; in dmz_handle_direct_write()
247 if (dmz_is_readonly(zone)) in dmz_handle_direct_write()
251 ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks); in dmz_handle_direct_write()
256 * Validate the blocks in the data zone and invalidate in dmz_handle_direct_write()
257 * in the buffer zone, if there is one. in dmz_handle_direct_write()
259 ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks); in dmz_handle_direct_write()
267 * Write blocks in the buffer zone of @zone.
268 * If no buffer zone is assigned yet, get one.
269 * Called with @zone write locked.
272 struct dm_zone *zone, struct bio *bio, in dmz_handle_buffered_write() argument
280 /* Get the buffer zone. One will be allocated if needed */ in dmz_handle_buffered_write()
281 bzone = dmz_get_chunk_buffer(zmd, zone); in dmz_handle_buffered_write()
294 * Validate the blocks in the buffer zone in dmz_handle_buffered_write()
295 * and invalidate in the data zone. in dmz_handle_buffered_write()
298 if (ret == 0 && chunk_block < zone->wp_block) in dmz_handle_buffered_write()
299 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks); in dmz_handle_buffered_write()
307 static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone, in dmz_handle_write() argument
313 if (!zone) in dmz_handle_write()
316 dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks", in dmz_handle_write()
318 (dmz_is_rnd(zone) ? "RND" : "SEQ"), in dmz_handle_write()
319 dmz_id(dmz->metadata, zone), in dmz_handle_write()
322 if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) { in dmz_handle_write()
324 * zone is a random zone or it is a sequential zone in dmz_handle_write()
325 * and the BIO is aligned to the zone write pointer: in dmz_handle_write()
326 * direct write the zone. in dmz_handle_write()
328 return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks); in dmz_handle_write()
332 * This is an unaligned write in a sequential zone: in dmz_handle_write()
335 return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks); in dmz_handle_write()
341 static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone, in dmz_handle_discard() argument
351 if (!zone) in dmz_handle_discard()
354 if (dmz_is_readonly(zone)) in dmz_handle_discard()
357 dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks", in dmz_handle_discard()
359 dmz_id(zmd, zone), in dmz_handle_discard()
363 * Invalidate blocks in the data zone and its in dmz_handle_discard()
364 * buffer zone if one is mapped. in dmz_handle_discard()
366 if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) in dmz_handle_discard()
367 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks); in dmz_handle_discard()
368 if (ret == 0 && zone->bzone) in dmz_handle_discard()
369 ret = dmz_invalidate_blocks(zmd, zone->bzone, in dmz_handle_discard()
382 struct dm_zone *zone; in dmz_handle_bio() local
386 * Write may trigger a zone allocation. So make sure the in dmz_handle_bio()
400 * Get the data zone mapping the chunk. There may be no in dmz_handle_bio()
402 + the zone returned will be set to active state. in dmz_handle_bio()
404 zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio), in dmz_handle_bio()
406 if (IS_ERR(zone)) { in dmz_handle_bio()
407 ret = PTR_ERR(zone); in dmz_handle_bio()
412 if (zone) { in dmz_handle_bio()
413 dmz_activate_zone(zone); in dmz_handle_bio()
414 bioctx->zone = zone; in dmz_handle_bio()
419 ret = dmz_handle_read(dmz, zone, bio); in dmz_handle_bio()
422 ret = dmz_handle_write(dmz, zone, bio); in dmz_handle_bio()
426 ret = dmz_handle_discard(dmz, zone, bio); in dmz_handle_bio()
436 * is still valid, that is, that the zone used still has valid blocks. in dmz_handle_bio()
438 if (zone) in dmz_handle_bio()
439 dmz_put_chunk_mapping(zmd, zone); in dmz_handle_bio()
647 bioctx->zone = NULL; in dmz_map()
660 /* Split zone BIOs to fit entirely into a zone */ in dmz_map()
804 /* Zone BIO */ in dmz_ctr()
838 ti->error = "Zone reclaim initialization failed"; in dmz_ctr()
912 /* FS hint to try to align to the device zone size */ in dmz_io_hints()