• Home
  • Raw
  • Download

Lines Matching refs:bvec

55 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
145 static inline bool is_partial_io(struct bio_vec *bvec) in is_partial_io() argument
147 return bvec->bv_len != PAGE_SIZE; in is_partial_io()
150 static inline bool is_partial_io(struct bio_vec *bvec) in is_partial_io() argument
180 static void update_position(u32 *index, int *offset, struct bio_vec *bvec) in update_position() argument
182 *index += (*offset + bvec->bv_len) / PAGE_SIZE; in update_position()
183 *offset = (*offset + bvec->bv_len) % PAGE_SIZE; in update_position()
586 static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, in read_from_bdev_async() argument
597 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { in read_from_bdev_async()
665 struct bio_vec bvec; in writeback_store() local
667 bvec.bv_page = page; in writeback_store()
668 bvec.bv_len = PAGE_SIZE; in writeback_store()
669 bvec.bv_offset = 0; in writeback_store()
710 if (zram_bvec_read(zram, &bvec, index, 0, NULL)) { in writeback_store()
723 bio_add_page(&bio, bvec.bv_page, bvec.bv_len, in writeback_store()
724 bvec.bv_offset); in writeback_store()
785 struct bio_vec bvec; member
796 read_from_bdev_async(zram, &zw->bvec, entry, bio); in zram_sync_read()
804 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, in read_from_bdev_sync() argument
809 work.bvec = *bvec; in read_from_bdev_sync()
822 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, in read_from_bdev_sync() argument
830 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, in read_from_bdev() argument
835 return read_from_bdev_sync(zram, bvec, entry, parent); in read_from_bdev()
837 return read_from_bdev_async(zram, bvec, entry, parent); in read_from_bdev()
841 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, in read_from_bdev() argument
1230 struct bio_vec bvec; in __zram_bvec_read() local
1234 bvec.bv_page = page; in __zram_bvec_read()
1235 bvec.bv_len = PAGE_SIZE; in __zram_bvec_read()
1236 bvec.bv_offset = 0; in __zram_bvec_read()
1237 return read_from_bdev(zram, &bvec, in __zram_bvec_read()
1281 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, in zram_bvec_read() argument
1287 page = bvec->bv_page; in zram_bvec_read()
1288 if (is_partial_io(bvec)) { in zram_bvec_read()
1295 ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec)); in zram_bvec_read()
1299 if (is_partial_io(bvec)) { in zram_bvec_read()
1300 void *dst = kmap_atomic(bvec->bv_page); in zram_bvec_read()
1303 memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len); in zram_bvec_read()
1308 if (is_partial_io(bvec)) in zram_bvec_read()
1314 static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, in __zram_bvec_write() argument
1323 struct page *page = bvec->bv_page; in __zram_bvec_write()
1430 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, in zram_bvec_write() argument
1438 vec = *bvec; in zram_bvec_write()
1439 if (is_partial_io(bvec)) { in zram_bvec_write()
1453 src = kmap_atomic(bvec->bv_page); in zram_bvec_write()
1455 memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len); in zram_bvec_write()
1466 if (is_partial_io(bvec)) in zram_bvec_write()
1514 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, in zram_bvec_rw() argument
1522 generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT, in zram_bvec_rw()
1527 ret = zram_bvec_read(zram, bvec, index, offset, bio); in zram_bvec_rw()
1528 flush_dcache_page(bvec->bv_page); in zram_bvec_rw()
1531 ret = zram_bvec_write(zram, bvec, index, offset, bio); in zram_bvec_rw()
1554 struct bio_vec bvec; in __zram_make_request() local
1571 bio_for_each_segment(bvec, bio, iter) { in __zram_make_request()
1572 struct bio_vec bv = bvec; in __zram_make_request()
1573 unsigned int unwritten = bvec.bv_len; in __zram_make_request()