• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * RDMA Network Block Driver
4  *
5  * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6  * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7  * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8  */
9 #ifndef RNBD_SRV_DEV_H
10 #define RNBD_SRV_DEV_H
11 
12 #include <linux/fs.h>
13 #include "rnbd-proto.h"
14 
15 struct rnbd_dev {
16 	struct block_device	*bdev;
17 	struct bio_set		*ibd_bio_set;
18 	fmode_t			blk_open_flags;
19 	char			name[BDEVNAME_SIZE];
20 };
21 
22 struct rnbd_dev_blk_io {
23 	struct rnbd_dev *dev;
24 	void		 *priv;
25 	/* have to be last member for front_pad usage of bioset_init */
26 	struct bio	bio;
27 };
28 
29 /**
30  * rnbd_dev_open() - Open a device
31  * @flags:	open flags
32  * @bs:		bio_set to use during block io,
33  */
34 struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
35 			       struct bio_set *bs);
36 
37 /**
38  * rnbd_dev_close() - Close a device
39  */
40 void rnbd_dev_close(struct rnbd_dev *dev);
41 
42 void rnbd_endio(void *priv, int error);
43 
44 void rnbd_dev_bi_end_io(struct bio *bio);
45 
46 struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
47 			      unsigned int len, gfp_t gfp_mask);
48 
rnbd_dev_get_max_segs(const struct rnbd_dev * dev)49 static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
50 {
51 	return queue_max_segments(bdev_get_queue(dev->bdev));
52 }
53 
rnbd_dev_get_max_hw_sects(const struct rnbd_dev * dev)54 static inline int rnbd_dev_get_max_hw_sects(const struct rnbd_dev *dev)
55 {
56 	return queue_max_hw_sectors(bdev_get_queue(dev->bdev));
57 }
58 
rnbd_dev_get_secure_discard(const struct rnbd_dev * dev)59 static inline int rnbd_dev_get_secure_discard(const struct rnbd_dev *dev)
60 {
61 	return blk_queue_secure_erase(bdev_get_queue(dev->bdev));
62 }
63 
rnbd_dev_get_max_discard_sects(const struct rnbd_dev * dev)64 static inline int rnbd_dev_get_max_discard_sects(const struct rnbd_dev *dev)
65 {
66 	if (!blk_queue_discard(bdev_get_queue(dev->bdev)))
67 		return 0;
68 
69 	return blk_queue_get_max_sectors(bdev_get_queue(dev->bdev),
70 					 REQ_OP_DISCARD);
71 }
72 
rnbd_dev_get_discard_granularity(const struct rnbd_dev * dev)73 static inline int rnbd_dev_get_discard_granularity(const struct rnbd_dev *dev)
74 {
75 	return bdev_get_queue(dev->bdev)->limits.discard_granularity;
76 }
77 
rnbd_dev_get_discard_alignment(const struct rnbd_dev * dev)78 static inline int rnbd_dev_get_discard_alignment(const struct rnbd_dev *dev)
79 {
80 	return bdev_get_queue(dev->bdev)->limits.discard_alignment;
81 }
82 
83 #endif /* RNBD_SRV_DEV_H */
84