• Home
  • Raw
  • Download

Lines Matching refs:lc

131 static void put_pending_block(struct log_writes_c *lc)  in put_pending_block()  argument
133 if (atomic_dec_and_test(&lc->pending_blocks)) { in put_pending_block()
135 if (waitqueue_active(&lc->wait)) in put_pending_block()
136 wake_up(&lc->wait); in put_pending_block()
140 static void put_io_block(struct log_writes_c *lc) in put_io_block() argument
142 if (atomic_dec_and_test(&lc->io_blocks)) { in put_io_block()
144 if (waitqueue_active(&lc->wait)) in put_io_block()
145 wake_up(&lc->wait); in put_io_block()
151 struct log_writes_c *lc = bio->bi_private; in log_end_io() local
159 spin_lock_irqsave(&lc->blocks_lock, flags); in log_end_io()
160 lc->logging_enabled = false; in log_end_io()
161 spin_unlock_irqrestore(&lc->blocks_lock, flags); in log_end_io()
167 put_io_block(lc); in log_end_io()
175 static void free_pending_block(struct log_writes_c *lc, in free_pending_block() argument
186 put_pending_block(lc); in free_pending_block()
189 static int write_metadata(struct log_writes_c *lc, void *entry, in write_metadata() argument
205 bio->bi_bdev = lc->logdev->bdev; in write_metadata()
207 bio->bi_private = lc; in write_metadata()
221 lc->sectorsize - entrylen - datalen); in write_metadata()
224 ret = bio_add_page(bio, page, lc->sectorsize, 0); in write_metadata()
225 if (ret != lc->sectorsize) { in write_metadata()
235 put_io_block(lc); in write_metadata()
239 static int log_one_block(struct log_writes_c *lc, in log_one_block() argument
251 if (write_metadata(lc, &entry, sizeof(entry), block->data, in log_one_block()
253 free_pending_block(lc, block); in log_one_block()
261 atomic_inc(&lc->io_blocks); in log_one_block()
269 bio->bi_bdev = lc->logdev->bdev; in log_one_block()
271 bio->bi_private = lc; in log_one_block()
281 atomic_inc(&lc->io_blocks); in log_one_block()
290 bio->bi_bdev = lc->logdev->bdev; in log_one_block()
292 bio->bi_private = lc; in log_one_block()
308 put_pending_block(lc); in log_one_block()
311 free_pending_block(lc, block); in log_one_block()
312 put_io_block(lc); in log_one_block()
316 static int log_super(struct log_writes_c *lc) in log_super() argument
322 super.nr_entries = cpu_to_le64(lc->logged_entries); in log_super()
323 super.sectorsize = cpu_to_le32(lc->sectorsize); in log_super()
325 if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) { in log_super()
333 static inline sector_t logdev_last_sector(struct log_writes_c *lc) in logdev_last_sector() argument
335 return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT; in logdev_last_sector()
340 struct log_writes_c *lc = (struct log_writes_c *)arg; in log_writes_kthread() local
349 spin_lock_irq(&lc->blocks_lock); in log_writes_kthread()
350 if (!list_empty(&lc->logging_blocks)) { in log_writes_kthread()
351 block = list_first_entry(&lc->logging_blocks, in log_writes_kthread()
354 if (!lc->logging_enabled) in log_writes_kthread()
357 sector = lc->next_sector; in log_writes_kthread()
359 lc->next_sector++; in log_writes_kthread()
361 lc->next_sector += block->nr_sectors + 1; in log_writes_kthread()
367 if (!lc->end_sector) in log_writes_kthread()
368 lc->end_sector = logdev_last_sector(lc); in log_writes_kthread()
369 if (lc->end_sector && in log_writes_kthread()
370 lc->next_sector >= lc->end_sector) { in log_writes_kthread()
372 lc->logging_enabled = false; in log_writes_kthread()
375 lc->logged_entries++; in log_writes_kthread()
376 atomic_inc(&lc->io_blocks); in log_writes_kthread()
380 atomic_inc(&lc->io_blocks); in log_writes_kthread()
383 logging_enabled = lc->logging_enabled; in log_writes_kthread()
384 spin_unlock_irq(&lc->blocks_lock); in log_writes_kthread()
387 ret = log_one_block(lc, block, sector); in log_writes_kthread()
389 ret = log_super(lc); in log_writes_kthread()
391 spin_lock_irq(&lc->blocks_lock); in log_writes_kthread()
392 lc->logging_enabled = false; in log_writes_kthread()
393 spin_unlock_irq(&lc->blocks_lock); in log_writes_kthread()
396 free_pending_block(lc, block); in log_writes_kthread()
403 !atomic_read(&lc->pending_blocks)) in log_writes_kthread()
417 struct log_writes_c *lc; in log_writes_ctr() local
430 lc = kzalloc(sizeof(struct log_writes_c), GFP_KERNEL); in log_writes_ctr()
431 if (!lc) { in log_writes_ctr()
435 spin_lock_init(&lc->blocks_lock); in log_writes_ctr()
436 INIT_LIST_HEAD(&lc->unflushed_blocks); in log_writes_ctr()
437 INIT_LIST_HEAD(&lc->logging_blocks); in log_writes_ctr()
438 init_waitqueue_head(&lc->wait); in log_writes_ctr()
439 lc->sectorsize = 1 << SECTOR_SHIFT; in log_writes_ctr()
440 atomic_set(&lc->io_blocks, 0); in log_writes_ctr()
441 atomic_set(&lc->pending_blocks, 0); in log_writes_ctr()
444 ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev); in log_writes_ctr()
452 &lc->logdev); in log_writes_ctr()
455 dm_put_device(ti, lc->dev); in log_writes_ctr()
459 lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); in log_writes_ctr()
460 if (IS_ERR(lc->log_kthread)) { in log_writes_ctr()
461 ret = PTR_ERR(lc->log_kthread); in log_writes_ctr()
463 dm_put_device(ti, lc->dev); in log_writes_ctr()
464 dm_put_device(ti, lc->logdev); in log_writes_ctr()
469 lc->next_sector = 1; in log_writes_ctr()
470 lc->logging_enabled = true; in log_writes_ctr()
471 lc->end_sector = logdev_last_sector(lc); in log_writes_ctr()
472 lc->device_supports_discard = true; in log_writes_ctr()
479 ti->private = lc; in log_writes_ctr()
483 kfree(lc); in log_writes_ctr()
487 static int log_mark(struct log_writes_c *lc, char *data) in log_mark() argument
490 size_t maxsize = lc->sectorsize - sizeof(struct log_write_entry); in log_mark()
504 atomic_inc(&lc->pending_blocks); in log_mark()
507 spin_lock_irq(&lc->blocks_lock); in log_mark()
508 list_add_tail(&block->list, &lc->logging_blocks); in log_mark()
509 spin_unlock_irq(&lc->blocks_lock); in log_mark()
510 wake_up_process(lc->log_kthread); in log_mark()
516 struct log_writes_c *lc = ti->private; in log_writes_dtr() local
518 spin_lock_irq(&lc->blocks_lock); in log_writes_dtr()
519 list_splice_init(&lc->unflushed_blocks, &lc->logging_blocks); in log_writes_dtr()
520 spin_unlock_irq(&lc->blocks_lock); in log_writes_dtr()
526 log_mark(lc, "dm-log-writes-end"); in log_writes_dtr()
527 wake_up_process(lc->log_kthread); in log_writes_dtr()
528 wait_event(lc->wait, !atomic_read(&lc->io_blocks) && in log_writes_dtr()
529 !atomic_read(&lc->pending_blocks)); in log_writes_dtr()
530 kthread_stop(lc->log_kthread); in log_writes_dtr()
532 WARN_ON(!list_empty(&lc->logging_blocks)); in log_writes_dtr()
533 WARN_ON(!list_empty(&lc->unflushed_blocks)); in log_writes_dtr()
534 dm_put_device(ti, lc->dev); in log_writes_dtr()
535 dm_put_device(ti, lc->logdev); in log_writes_dtr()
536 kfree(lc); in log_writes_dtr()
541 struct log_writes_c *lc = ti->private; in normal_map_bio() local
543 bio->bi_bdev = lc->dev->bdev; in normal_map_bio()
548 struct log_writes_c *lc = ti->private; in log_writes_map() local
562 if (!lc->logging_enabled) in log_writes_map()
587 spin_lock_irq(&lc->blocks_lock); in log_writes_map()
588 lc->logging_enabled = false; in log_writes_map()
589 spin_unlock_irq(&lc->blocks_lock); in log_writes_map()
594 atomic_inc(&lc->pending_blocks); in log_writes_map()
609 if (lc->device_supports_discard) in log_writes_map()
617 spin_lock_irq(&lc->blocks_lock); in log_writes_map()
618 list_splice_init(&lc->unflushed_blocks, &block->list); in log_writes_map()
619 spin_unlock_irq(&lc->blocks_lock); in log_writes_map()
639 free_pending_block(lc, block); in log_writes_map()
640 spin_lock_irq(&lc->blocks_lock); in log_writes_map()
641 lc->logging_enabled = false; in log_writes_map()
642 spin_unlock_irq(&lc->blocks_lock); in log_writes_map()
659 spin_lock_irq(&lc->blocks_lock); in log_writes_map()
660 list_splice_init(&lc->unflushed_blocks, &block->list); in log_writes_map()
661 spin_unlock_irq(&lc->blocks_lock); in log_writes_map()
670 struct log_writes_c *lc = ti->private; in normal_end_io() local
677 spin_lock_irqsave(&lc->blocks_lock, flags); in normal_end_io()
679 list_splice_tail_init(&block->list, &lc->logging_blocks); in normal_end_io()
680 list_add_tail(&block->list, &lc->logging_blocks); in normal_end_io()
681 wake_up_process(lc->log_kthread); in normal_end_io()
683 list_add_tail(&block->list, &lc->logging_blocks); in normal_end_io()
684 wake_up_process(lc->log_kthread); in normal_end_io()
686 list_add_tail(&block->list, &lc->unflushed_blocks); in normal_end_io()
687 spin_unlock_irqrestore(&lc->blocks_lock, flags); in normal_end_io()
701 struct log_writes_c *lc = ti->private; in log_writes_status() local
705 DMEMIT("%llu %llu", lc->logged_entries, in log_writes_status()
706 (unsigned long long)lc->next_sector - 1); in log_writes_status()
707 if (!lc->logging_enabled) in log_writes_status()
712 DMEMIT("%s %s", lc->dev->name, lc->logdev->name); in log_writes_status()
720 struct log_writes_c *lc = ti->private; in log_writes_prepare_ioctl() local
721 struct dm_dev *dev = lc->dev; in log_writes_prepare_ioctl()
736 struct log_writes_c *lc = ti->private; in log_writes_iterate_devices() local
738 return fn(ti, lc->dev, 0, ti->len, data); in log_writes_iterate_devices()
748 struct log_writes_c *lc = ti->private; in log_writes_message() local
756 r = log_mark(lc, argv[1]); in log_writes_message()
765 struct log_writes_c *lc = ti->private; in log_writes_io_hints() local
766 struct request_queue *q = bdev_get_queue(lc->dev->bdev); in log_writes_io_hints()
769 lc->device_supports_discard = false; in log_writes_io_hints()