• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLKTRACE_H
3 #define BLKTRACE_H
4 
5 #include <linux/blkdev.h>
6 #include <linux/relay.h>
7 #include <linux/compat.h>
8 #include <uapi/linux/blktrace_api.h>
9 #include <linux/list.h>
10 
11 #if defined(CONFIG_BLK_DEV_IO_TRACE)
12 
13 #include <linux/sysfs.h>
14 
15 struct blk_trace {
16 	int trace_state;
17 	struct rchan *rchan;
18 	unsigned long __percpu *sequence;
19 	unsigned char __percpu *msg_data;
20 	u16 act_mask;
21 	u64 start_lba;
22 	u64 end_lba;
23 	u32 pid;
24 	u32 dev;
25 	struct dentry *dir;
26 	struct list_head running_list;
27 	atomic_t dropped;
28 };
29 
30 struct blkcg;
31 
32 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
33 extern void blk_trace_shutdown(struct request_queue *);
34 extern __printf(3, 4)
35 void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *fmt, ...);
36 
37 /**
38  * blk_add_trace_msg - Add a (simple) message to the blktrace stream
39  * @q:		queue the io is for
40  * @fmt:	format to print message in
41  * args...	Variable argument list for format
42  *
43  * Description:
44  *     Records a (simple) message onto the blktrace stream.
45  *
46  *     NOTE: BLK_TN_MAX_MSG characters are output at most.
47  *     NOTE: Can not use 'static inline' due to presence of var args...
48  *
49  **/
50 #define blk_add_cgroup_trace_msg(q, cg, fmt, ...)			\
51 	do {								\
52 		struct blk_trace *bt;					\
53 									\
54 		rcu_read_lock();					\
55 		bt = rcu_dereference((q)->blk_trace);			\
56 		if (unlikely(bt))					\
57 			__trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
58 		rcu_read_unlock();					\
59 	} while (0)
60 #define blk_add_trace_msg(q, fmt, ...)					\
61 	blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
62 #define BLK_TN_MAX_MSG		128
63 
blk_trace_note_message_enabled(struct request_queue * q)64 static inline bool blk_trace_note_message_enabled(struct request_queue *q)
65 {
66 	struct blk_trace *bt;
67 	bool ret;
68 
69 	rcu_read_lock();
70 	bt = rcu_dereference(q->blk_trace);
71 	ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
72 	rcu_read_unlock();
73 	return ret;
74 }
75 
76 extern void blk_add_driver_data(struct request *rq, void *data, size_t len);
77 extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
78 			   struct block_device *bdev,
79 			   char __user *arg);
80 extern int blk_trace_startstop(struct request_queue *q, int start);
81 extern int blk_trace_remove(struct request_queue *q);
82 extern void blk_trace_remove_sysfs(struct device *dev);
83 extern int blk_trace_init_sysfs(struct device *dev);
84 
85 extern struct attribute_group blk_trace_attr_group;
86 
87 #else /* !CONFIG_BLK_DEV_IO_TRACE */
88 # define blk_trace_ioctl(bdev, cmd, arg)		(-ENOTTY)
89 # define blk_trace_shutdown(q)				do { } while (0)
90 # define blk_add_driver_data(rq, data, len)		do {} while (0)
91 # define blk_trace_setup(q, name, dev, bdev, arg)	(-ENOTTY)
92 # define blk_trace_startstop(q, start)			(-ENOTTY)
93 # define blk_trace_remove(q)				(-ENOTTY)
94 # define blk_add_trace_msg(q, fmt, ...)			do { } while (0)
95 # define blk_add_cgroup_trace_msg(q, cg, fmt, ...)	do { } while (0)
96 # define blk_trace_remove_sysfs(dev)			do { } while (0)
97 # define blk_trace_note_message_enabled(q)		(false)
blk_trace_init_sysfs(struct device * dev)98 static inline int blk_trace_init_sysfs(struct device *dev)
99 {
100 	return 0;
101 }
102 
103 #endif /* CONFIG_BLK_DEV_IO_TRACE */
104 
105 #ifdef CONFIG_COMPAT
106 
107 struct compat_blk_user_trace_setup {
108 	char name[BLKTRACE_BDEV_SIZE];
109 	u16 act_mask;
110 	u32 buf_size;
111 	u32 buf_nr;
112 	compat_u64 start_lba;
113 	compat_u64 end_lba;
114 	u32 pid;
115 };
116 #define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup)
117 
118 #endif
119 
120 void blk_fill_rwbs(char *rwbs, unsigned int op);
121 
blk_rq_trace_sector(struct request * rq)122 static inline sector_t blk_rq_trace_sector(struct request *rq)
123 {
124 	/*
125 	 * Tracing should ignore starting sector for passthrough requests and
126 	 * requests where starting sector didn't get set.
127 	 */
128 	if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
129 		return 0;
130 	return blk_rq_pos(rq);
131 }
132 
blk_rq_trace_nr_sectors(struct request * rq)133 static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
134 {
135 	return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq);
136 }
137 
138 #endif
139