1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4 *
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/percpu.h>
13 #include <linux/init.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/debugfs.h>
17 #include <linux/export.h>
18 #include <linux/time.h>
19 #include <linux/uaccess.h>
20 #include <linux/list.h>
21 #include <linux/blk-cgroup.h>
22
23 #include "../../block/blk.h"
24
25 #include <trace/events/block.h>
26
27 #include "trace_output.h"
28
29 #ifdef CONFIG_BLK_DEV_IO_TRACE
30
31 static unsigned int blktrace_seq __read_mostly = 1;
32
33 static struct trace_array *blk_tr;
34 static bool blk_tracer_enabled __read_mostly;
35
36 static LIST_HEAD(running_trace_list);
37 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
38
39 /* Select an alternative, minimalistic output than the original one */
40 #define TRACE_BLK_OPT_CLASSIC 0x1
41 #define TRACE_BLK_OPT_CGROUP 0x2
42 #define TRACE_BLK_OPT_CGNAME 0x4
43
44 static struct tracer_opt blk_tracer_opts[] = {
45 /* Default disable the minimalistic output */
46 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
47 #ifdef CONFIG_BLK_CGROUP
48 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
49 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
50 #endif
51 { }
52 };
53
54 static struct tracer_flags blk_tracer_flags = {
55 .val = 0,
56 .opts = blk_tracer_opts,
57 };
58
59 /* Global reference count of probes */
60 static DEFINE_MUTEX(blk_probe_mutex);
61 static int blk_probes_ref;
62
63 static void blk_register_tracepoints(void);
64 static void blk_unregister_tracepoints(void);
65
66 /*
67 * Send out a notify message.
68 */
trace_note(struct blk_trace * bt,pid_t pid,int action,const void * data,size_t len,union kernfs_node_id * cgid)69 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
70 const void *data, size_t len,
71 union kernfs_node_id *cgid)
72 {
73 struct blk_io_trace *t;
74 struct ring_buffer_event *event = NULL;
75 struct ring_buffer *buffer = NULL;
76 int pc = 0;
77 int cpu = smp_processor_id();
78 bool blk_tracer = blk_tracer_enabled;
79 ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
80
81 if (blk_tracer) {
82 buffer = blk_tr->trace_buffer.buffer;
83 pc = preempt_count();
84 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
85 sizeof(*t) + len + cgid_len,
86 0, pc);
87 if (!event)
88 return;
89 t = ring_buffer_event_data(event);
90 goto record_it;
91 }
92
93 if (!bt->rchan)
94 return;
95
96 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
97 if (t) {
98 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
99 t->time = ktime_to_ns(ktime_get());
100 record_it:
101 t->device = bt->dev;
102 t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
103 t->pid = pid;
104 t->cpu = cpu;
105 t->pdu_len = len + cgid_len;
106 if (cgid)
107 memcpy((void *)t + sizeof(*t), cgid, cgid_len);
108 memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
109
110 if (blk_tracer)
111 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
112 }
113 }
114
115 /*
116 * Send out a notify for this process, if we haven't done so since a trace
117 * started
118 */
trace_note_tsk(struct task_struct * tsk)119 static void trace_note_tsk(struct task_struct *tsk)
120 {
121 unsigned long flags;
122 struct blk_trace *bt;
123
124 tsk->btrace_seq = blktrace_seq;
125 spin_lock_irqsave(&running_trace_lock, flags);
126 list_for_each_entry(bt, &running_trace_list, running_list) {
127 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
128 sizeof(tsk->comm), NULL);
129 }
130 spin_unlock_irqrestore(&running_trace_lock, flags);
131 }
132
trace_note_time(struct blk_trace * bt)133 static void trace_note_time(struct blk_trace *bt)
134 {
135 struct timespec64 now;
136 unsigned long flags;
137 u32 words[2];
138
139 /* need to check user space to see if this breaks in y2038 or y2106 */
140 ktime_get_real_ts64(&now);
141 words[0] = (u32)now.tv_sec;
142 words[1] = now.tv_nsec;
143
144 local_irq_save(flags);
145 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), NULL);
146 local_irq_restore(flags);
147 }
148
__trace_note_message(struct blk_trace * bt,struct blkcg * blkcg,const char * fmt,...)149 void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
150 const char *fmt, ...)
151 {
152 int n;
153 va_list args;
154 unsigned long flags;
155 char *buf;
156
157 if (unlikely(bt->trace_state != Blktrace_running &&
158 !blk_tracer_enabled))
159 return;
160
161 /*
162 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
163 * message to the trace.
164 */
165 if (!(bt->act_mask & BLK_TC_NOTIFY))
166 return;
167
168 local_irq_save(flags);
169 buf = this_cpu_ptr(bt->msg_data);
170 va_start(args, fmt);
171 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
172 va_end(args);
173
174 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
175 blkcg = NULL;
176 #ifdef CONFIG_BLK_CGROUP
177 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n,
178 blkcg ? cgroup_get_kernfs_id(blkcg->css.cgroup) : NULL);
179 #else
180 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, NULL);
181 #endif
182 local_irq_restore(flags);
183 }
184 EXPORT_SYMBOL_GPL(__trace_note_message);
185
act_log_check(struct blk_trace * bt,u32 what,sector_t sector,pid_t pid)186 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
187 pid_t pid)
188 {
189 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
190 return 1;
191 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
192 return 1;
193 if (bt->pid && pid != bt->pid)
194 return 1;
195
196 return 0;
197 }
198
199 /*
200 * Data direction bit lookup
201 */
202 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
203 BLK_TC_ACT(BLK_TC_WRITE) };
204
205 #define BLK_TC_RAHEAD BLK_TC_AHEAD
206 #define BLK_TC_PREFLUSH BLK_TC_FLUSH
207
208 /* The ilog2() calls fall out because they're constant */
209 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
210 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
211
212 /*
213 * The worker for the various blk_add_trace*() types. Fills out a
214 * blk_io_trace structure and places it in a per-cpu subbuffer.
215 */
__blk_add_trace(struct blk_trace * bt,sector_t sector,int bytes,int op,int op_flags,u32 what,int error,int pdu_len,void * pdu_data,union kernfs_node_id * cgid)216 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
217 int op, int op_flags, u32 what, int error, int pdu_len,
218 void *pdu_data, union kernfs_node_id *cgid)
219 {
220 struct task_struct *tsk = current;
221 struct ring_buffer_event *event = NULL;
222 struct ring_buffer *buffer = NULL;
223 struct blk_io_trace *t;
224 unsigned long flags = 0;
225 unsigned long *sequence;
226 pid_t pid;
227 int cpu, pc = 0;
228 bool blk_tracer = blk_tracer_enabled;
229 ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
230
231 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
232 return;
233
234 what |= ddir_act[op_is_write(op) ? WRITE : READ];
235 what |= MASK_TC_BIT(op_flags, SYNC);
236 what |= MASK_TC_BIT(op_flags, RAHEAD);
237 what |= MASK_TC_BIT(op_flags, META);
238 what |= MASK_TC_BIT(op_flags, PREFLUSH);
239 what |= MASK_TC_BIT(op_flags, FUA);
240 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
241 what |= BLK_TC_ACT(BLK_TC_DISCARD);
242 if (op == REQ_OP_FLUSH)
243 what |= BLK_TC_ACT(BLK_TC_FLUSH);
244 if (cgid)
245 what |= __BLK_TA_CGROUP;
246
247 pid = tsk->pid;
248 if (act_log_check(bt, what, sector, pid))
249 return;
250 cpu = raw_smp_processor_id();
251
252 if (blk_tracer) {
253 tracing_record_cmdline(current);
254
255 buffer = blk_tr->trace_buffer.buffer;
256 pc = preempt_count();
257 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
258 sizeof(*t) + pdu_len + cgid_len,
259 0, pc);
260 if (!event)
261 return;
262 t = ring_buffer_event_data(event);
263 goto record_it;
264 }
265
266 if (unlikely(tsk->btrace_seq != blktrace_seq))
267 trace_note_tsk(tsk);
268
269 /*
270 * A word about the locking here - we disable interrupts to reserve
271 * some space in the relay per-cpu buffer, to prevent an irq
272 * from coming in and stepping on our toes.
273 */
274 local_irq_save(flags);
275 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
276 if (t) {
277 sequence = per_cpu_ptr(bt->sequence, cpu);
278
279 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
280 t->sequence = ++(*sequence);
281 t->time = ktime_to_ns(ktime_get());
282 record_it:
283 /*
284 * These two are not needed in ftrace as they are in the
285 * generic trace_entry, filled by tracing_generic_entry_update,
286 * but for the trace_event->bin() synthesizer benefit we do it
287 * here too.
288 */
289 t->cpu = cpu;
290 t->pid = pid;
291
292 t->sector = sector;
293 t->bytes = bytes;
294 t->action = what;
295 t->device = bt->dev;
296 t->error = error;
297 t->pdu_len = pdu_len + cgid_len;
298
299 if (cgid_len)
300 memcpy((void *)t + sizeof(*t), cgid, cgid_len);
301 if (pdu_len)
302 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
303
304 if (blk_tracer) {
305 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
306 return;
307 }
308 }
309
310 local_irq_restore(flags);
311 }
312
blk_trace_free(struct blk_trace * bt)313 static void blk_trace_free(struct blk_trace *bt)
314 {
315 debugfs_remove(bt->msg_file);
316 debugfs_remove(bt->dropped_file);
317 relay_close(bt->rchan);
318 debugfs_remove(bt->dir);
319 free_percpu(bt->sequence);
320 free_percpu(bt->msg_data);
321 kfree(bt);
322 }
323
get_probe_ref(void)324 static void get_probe_ref(void)
325 {
326 mutex_lock(&blk_probe_mutex);
327 if (++blk_probes_ref == 1)
328 blk_register_tracepoints();
329 mutex_unlock(&blk_probe_mutex);
330 }
331
put_probe_ref(void)332 static void put_probe_ref(void)
333 {
334 mutex_lock(&blk_probe_mutex);
335 if (!--blk_probes_ref)
336 blk_unregister_tracepoints();
337 mutex_unlock(&blk_probe_mutex);
338 }
339
blk_trace_cleanup(struct blk_trace * bt)340 static void blk_trace_cleanup(struct blk_trace *bt)
341 {
342 synchronize_rcu();
343 blk_trace_free(bt);
344 put_probe_ref();
345 }
346
__blk_trace_remove(struct request_queue * q)347 static int __blk_trace_remove(struct request_queue *q)
348 {
349 struct blk_trace *bt;
350
351 bt = xchg(&q->blk_trace, NULL);
352 if (!bt)
353 return -EINVAL;
354
355 if (bt->trace_state != Blktrace_running)
356 blk_trace_cleanup(bt);
357
358 return 0;
359 }
360
blk_trace_remove(struct request_queue * q)361 int blk_trace_remove(struct request_queue *q)
362 {
363 int ret;
364
365 mutex_lock(&q->blk_trace_mutex);
366 ret = __blk_trace_remove(q);
367 mutex_unlock(&q->blk_trace_mutex);
368
369 return ret;
370 }
371 EXPORT_SYMBOL_GPL(blk_trace_remove);
372
blk_dropped_read(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)373 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
374 size_t count, loff_t *ppos)
375 {
376 struct blk_trace *bt = filp->private_data;
377 char buf[16];
378
379 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
380
381 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
382 }
383
384 static const struct file_operations blk_dropped_fops = {
385 .owner = THIS_MODULE,
386 .open = simple_open,
387 .read = blk_dropped_read,
388 .llseek = default_llseek,
389 };
390
blk_msg_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)391 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
392 size_t count, loff_t *ppos)
393 {
394 char *msg;
395 struct blk_trace *bt;
396
397 if (count >= BLK_TN_MAX_MSG)
398 return -EINVAL;
399
400 msg = memdup_user_nul(buffer, count);
401 if (IS_ERR(msg))
402 return PTR_ERR(msg);
403
404 bt = filp->private_data;
405 __trace_note_message(bt, NULL, "%s", msg);
406 kfree(msg);
407
408 return count;
409 }
410
411 static const struct file_operations blk_msg_fops = {
412 .owner = THIS_MODULE,
413 .open = simple_open,
414 .write = blk_msg_write,
415 .llseek = noop_llseek,
416 };
417
418 /*
419 * Keep track of how many times we encountered a full subbuffer, to aid
420 * the user space app in telling how many lost events there were.
421 */
blk_subbuf_start_callback(struct rchan_buf * buf,void * subbuf,void * prev_subbuf,size_t prev_padding)422 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
423 void *prev_subbuf, size_t prev_padding)
424 {
425 struct blk_trace *bt;
426
427 if (!relay_buf_full(buf))
428 return 1;
429
430 bt = buf->chan->private_data;
431 atomic_inc(&bt->dropped);
432 return 0;
433 }
434
blk_remove_buf_file_callback(struct dentry * dentry)435 static int blk_remove_buf_file_callback(struct dentry *dentry)
436 {
437 debugfs_remove(dentry);
438
439 return 0;
440 }
441
blk_create_buf_file_callback(const char * filename,struct dentry * parent,umode_t mode,struct rchan_buf * buf,int * is_global)442 static struct dentry *blk_create_buf_file_callback(const char *filename,
443 struct dentry *parent,
444 umode_t mode,
445 struct rchan_buf *buf,
446 int *is_global)
447 {
448 return debugfs_create_file(filename, mode, parent, buf,
449 &relay_file_operations);
450 }
451
452 static struct rchan_callbacks blk_relay_callbacks = {
453 .subbuf_start = blk_subbuf_start_callback,
454 .create_buf_file = blk_create_buf_file_callback,
455 .remove_buf_file = blk_remove_buf_file_callback,
456 };
457
blk_trace_setup_lba(struct blk_trace * bt,struct block_device * bdev)458 static void blk_trace_setup_lba(struct blk_trace *bt,
459 struct block_device *bdev)
460 {
461 struct hd_struct *part = NULL;
462
463 if (bdev)
464 part = bdev->bd_part;
465
466 if (part) {
467 bt->start_lba = part->start_sect;
468 bt->end_lba = part->start_sect + part->nr_sects;
469 } else {
470 bt->start_lba = 0;
471 bt->end_lba = -1ULL;
472 }
473 }
474
475 /*
476 * Setup everything required to start tracing
477 */
do_blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,struct blk_user_trace_setup * buts)478 static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
479 struct block_device *bdev,
480 struct blk_user_trace_setup *buts)
481 {
482 struct blk_trace *bt = NULL;
483 struct dentry *dir = NULL;
484 int ret;
485
486 if (!buts->buf_size || !buts->buf_nr)
487 return -EINVAL;
488
489 if (!blk_debugfs_root)
490 return -ENOENT;
491
492 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
493 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
494
495 /*
496 * some device names have larger paths - convert the slashes
497 * to underscores for this to work as expected
498 */
499 strreplace(buts->name, '/', '_');
500
501 /*
502 * bdev can be NULL, as with scsi-generic, this is a helpful as
503 * we can be.
504 */
505 if (q->blk_trace) {
506 pr_warn("Concurrent blktraces are not allowed on %s\n",
507 buts->name);
508 return -EBUSY;
509 }
510
511 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
512 if (!bt)
513 return -ENOMEM;
514
515 ret = -ENOMEM;
516 bt->sequence = alloc_percpu(unsigned long);
517 if (!bt->sequence)
518 goto err;
519
520 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
521 if (!bt->msg_data)
522 goto err;
523
524 #ifdef CONFIG_BLK_DEBUG_FS
525 /*
526 * When tracing whole make_request drivers (multiqueue) block devices,
527 * reuse the existing debugfs directory created by the block layer on
528 * init. For request-based block devices, all partitions block devices,
529 * and scsi-generic block devices we create a temporary new debugfs
530 * directory that will be removed once the trace ends.
531 */
532 if (queue_is_mq(q) && bdev && bdev == bdev->bd_contains)
533 dir = q->debugfs_dir;
534 else
535 #endif
536 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
537
538 /*
539 * As blktrace relies on debugfs for its interface the debugfs directory
540 * is required, contrary to the usual mantra of not checking for debugfs
541 * files or directories.
542 */
543 if (IS_ERR_OR_NULL(dir)) {
544 pr_warn("debugfs_dir not present for %s so skipping\n",
545 buts->name);
546 ret = -ENOENT;
547 goto err;
548 }
549
550 bt->dev = dev;
551 atomic_set(&bt->dropped, 0);
552 INIT_LIST_HEAD(&bt->running_list);
553
554 ret = -EIO;
555 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
556 &blk_dropped_fops);
557
558 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
559
560 bt->rchan = relay_open("trace", dir, buts->buf_size,
561 buts->buf_nr, &blk_relay_callbacks, bt);
562 if (!bt->rchan)
563 goto err;
564
565 bt->act_mask = buts->act_mask;
566 if (!bt->act_mask)
567 bt->act_mask = (u16) -1;
568
569 blk_trace_setup_lba(bt, bdev);
570
571 /* overwrite with user settings */
572 if (buts->start_lba)
573 bt->start_lba = buts->start_lba;
574 if (buts->end_lba)
575 bt->end_lba = buts->end_lba;
576
577 bt->pid = buts->pid;
578 bt->trace_state = Blktrace_setup;
579
580 ret = -EBUSY;
581 if (cmpxchg(&q->blk_trace, NULL, bt))
582 goto err;
583
584 get_probe_ref();
585
586 ret = 0;
587 err:
588 if (ret)
589 blk_trace_free(bt);
590 return ret;
591 }
592
__blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,char __user * arg)593 static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
594 struct block_device *bdev, char __user *arg)
595 {
596 struct blk_user_trace_setup buts;
597 int ret;
598
599 ret = copy_from_user(&buts, arg, sizeof(buts));
600 if (ret)
601 return -EFAULT;
602
603 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
604 if (ret)
605 return ret;
606
607 if (copy_to_user(arg, &buts, sizeof(buts))) {
608 __blk_trace_remove(q);
609 return -EFAULT;
610 }
611 return 0;
612 }
613
blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,char __user * arg)614 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
615 struct block_device *bdev,
616 char __user *arg)
617 {
618 int ret;
619
620 mutex_lock(&q->blk_trace_mutex);
621 ret = __blk_trace_setup(q, name, dev, bdev, arg);
622 mutex_unlock(&q->blk_trace_mutex);
623
624 return ret;
625 }
626 EXPORT_SYMBOL_GPL(blk_trace_setup);
627
628 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
compat_blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,char __user * arg)629 static int compat_blk_trace_setup(struct request_queue *q, char *name,
630 dev_t dev, struct block_device *bdev,
631 char __user *arg)
632 {
633 struct blk_user_trace_setup buts;
634 struct compat_blk_user_trace_setup cbuts;
635 int ret;
636
637 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
638 return -EFAULT;
639
640 buts = (struct blk_user_trace_setup) {
641 .act_mask = cbuts.act_mask,
642 .buf_size = cbuts.buf_size,
643 .buf_nr = cbuts.buf_nr,
644 .start_lba = cbuts.start_lba,
645 .end_lba = cbuts.end_lba,
646 .pid = cbuts.pid,
647 };
648
649 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
650 if (ret)
651 return ret;
652
653 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
654 __blk_trace_remove(q);
655 return -EFAULT;
656 }
657
658 return 0;
659 }
660 #endif
661
__blk_trace_startstop(struct request_queue * q,int start)662 static int __blk_trace_startstop(struct request_queue *q, int start)
663 {
664 int ret;
665 struct blk_trace *bt;
666
667 bt = rcu_dereference_protected(q->blk_trace,
668 lockdep_is_held(&q->blk_trace_mutex));
669 if (bt == NULL)
670 return -EINVAL;
671
672 /*
673 * For starting a trace, we can transition from a setup or stopped
674 * trace. For stopping a trace, the state must be running
675 */
676 ret = -EINVAL;
677 if (start) {
678 if (bt->trace_state == Blktrace_setup ||
679 bt->trace_state == Blktrace_stopped) {
680 blktrace_seq++;
681 smp_mb();
682 bt->trace_state = Blktrace_running;
683 spin_lock_irq(&running_trace_lock);
684 list_add(&bt->running_list, &running_trace_list);
685 spin_unlock_irq(&running_trace_lock);
686
687 trace_note_time(bt);
688 ret = 0;
689 }
690 } else {
691 if (bt->trace_state == Blktrace_running) {
692 bt->trace_state = Blktrace_stopped;
693 spin_lock_irq(&running_trace_lock);
694 list_del_init(&bt->running_list);
695 spin_unlock_irq(&running_trace_lock);
696 relay_flush(bt->rchan);
697 ret = 0;
698 }
699 }
700
701 return ret;
702 }
703
blk_trace_startstop(struct request_queue * q,int start)704 int blk_trace_startstop(struct request_queue *q, int start)
705 {
706 int ret;
707
708 mutex_lock(&q->blk_trace_mutex);
709 ret = __blk_trace_startstop(q, start);
710 mutex_unlock(&q->blk_trace_mutex);
711
712 return ret;
713 }
714 EXPORT_SYMBOL_GPL(blk_trace_startstop);
715
716 /*
717 * When reading or writing the blktrace sysfs files, the references to the
718 * opened sysfs or device files should prevent the underlying block device
719 * from being removed. So no further delete protection is really needed.
720 */
721
722 /**
723 * blk_trace_ioctl: - handle the ioctls associated with tracing
724 * @bdev: the block device
725 * @cmd: the ioctl cmd
726 * @arg: the argument data, if any
727 *
728 **/
blk_trace_ioctl(struct block_device * bdev,unsigned cmd,char __user * arg)729 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
730 {
731 struct request_queue *q;
732 int ret, start = 0;
733 char b[BDEVNAME_SIZE];
734
735 q = bdev_get_queue(bdev);
736 if (!q)
737 return -ENXIO;
738
739 mutex_lock(&q->blk_trace_mutex);
740
741 switch (cmd) {
742 case BLKTRACESETUP:
743 bdevname(bdev, b);
744 ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
745 break;
746 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
747 case BLKTRACESETUP32:
748 bdevname(bdev, b);
749 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
750 break;
751 #endif
752 case BLKTRACESTART:
753 start = 1;
754 /* fall through */
755 case BLKTRACESTOP:
756 ret = __blk_trace_startstop(q, start);
757 break;
758 case BLKTRACETEARDOWN:
759 ret = __blk_trace_remove(q);
760 break;
761 default:
762 ret = -ENOTTY;
763 break;
764 }
765
766 mutex_unlock(&q->blk_trace_mutex);
767 return ret;
768 }
769
770 /**
771 * blk_trace_shutdown: - stop and cleanup trace structures
772 * @q: the request queue associated with the device
773 *
774 **/
blk_trace_shutdown(struct request_queue * q)775 void blk_trace_shutdown(struct request_queue *q)
776 {
777 mutex_lock(&q->blk_trace_mutex);
778 if (rcu_dereference_protected(q->blk_trace,
779 lockdep_is_held(&q->blk_trace_mutex))) {
780 __blk_trace_startstop(q, 0);
781 __blk_trace_remove(q);
782 }
783
784 mutex_unlock(&q->blk_trace_mutex);
785 }
786
787 #ifdef CONFIG_BLK_CGROUP
788 static union kernfs_node_id *
blk_trace_bio_get_cgid(struct request_queue * q,struct bio * bio)789 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
790 {
791 struct blk_trace *bt;
792
793 /* We don't use the 'bt' value here except as an optimization... */
794 bt = rcu_dereference_protected(q->blk_trace, 1);
795 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
796 return NULL;
797
798 if (!bio->bi_blkg)
799 return NULL;
800 return cgroup_get_kernfs_id(bio_blkcg(bio)->css.cgroup);
801 }
802 #else
803 static union kernfs_node_id *
blk_trace_bio_get_cgid(struct request_queue * q,struct bio * bio)804 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
805 {
806 return NULL;
807 }
808 #endif
809
810 static union kernfs_node_id *
blk_trace_request_get_cgid(struct request_queue * q,struct request * rq)811 blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
812 {
813 if (!rq->bio)
814 return NULL;
815 /* Use the first bio */
816 return blk_trace_bio_get_cgid(q, rq->bio);
817 }
818
819 /*
820 * blktrace probes
821 */
822
823 /**
824 * blk_add_trace_rq - Add a trace for a request oriented action
825 * @rq: the source request
826 * @error: return status to log
827 * @nr_bytes: number of completed bytes
828 * @what: the action
829 * @cgid: the cgroup info
830 *
831 * Description:
832 * Records an action against a request. Will log the bio offset + size.
833 *
834 **/
blk_add_trace_rq(struct request * rq,int error,unsigned int nr_bytes,u32 what,union kernfs_node_id * cgid)835 static void blk_add_trace_rq(struct request *rq, int error,
836 unsigned int nr_bytes, u32 what,
837 union kernfs_node_id *cgid)
838 {
839 struct blk_trace *bt;
840
841 rcu_read_lock();
842 bt = rcu_dereference(rq->q->blk_trace);
843 if (likely(!bt)) {
844 rcu_read_unlock();
845 return;
846 }
847
848 if (blk_rq_is_passthrough(rq))
849 what |= BLK_TC_ACT(BLK_TC_PC);
850 else
851 what |= BLK_TC_ACT(BLK_TC_FS);
852
853 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
854 rq->cmd_flags, what, error, 0, NULL, cgid);
855 rcu_read_unlock();
856 }
857
blk_add_trace_rq_insert(void * ignore,struct request_queue * q,struct request * rq)858 static void blk_add_trace_rq_insert(void *ignore,
859 struct request_queue *q, struct request *rq)
860 {
861 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
862 blk_trace_request_get_cgid(q, rq));
863 }
864
blk_add_trace_rq_issue(void * ignore,struct request_queue * q,struct request * rq)865 static void blk_add_trace_rq_issue(void *ignore,
866 struct request_queue *q, struct request *rq)
867 {
868 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
869 blk_trace_request_get_cgid(q, rq));
870 }
871
blk_add_trace_rq_requeue(void * ignore,struct request_queue * q,struct request * rq)872 static void blk_add_trace_rq_requeue(void *ignore,
873 struct request_queue *q,
874 struct request *rq)
875 {
876 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
877 blk_trace_request_get_cgid(q, rq));
878 }
879
blk_add_trace_rq_complete(void * ignore,struct request * rq,int error,unsigned int nr_bytes)880 static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
881 int error, unsigned int nr_bytes)
882 {
883 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
884 blk_trace_request_get_cgid(rq->q, rq));
885 }
886
887 /**
888 * blk_add_trace_bio - Add a trace for a bio oriented action
889 * @q: queue the io is for
890 * @bio: the source bio
891 * @what: the action
892 * @error: error, if any
893 *
894 * Description:
895 * Records an action against a bio. Will log the bio offset + size.
896 *
897 **/
blk_add_trace_bio(struct request_queue * q,struct bio * bio,u32 what,int error)898 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
899 u32 what, int error)
900 {
901 struct blk_trace *bt;
902
903 rcu_read_lock();
904 bt = rcu_dereference(q->blk_trace);
905 if (likely(!bt)) {
906 rcu_read_unlock();
907 return;
908 }
909
910 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
911 bio_op(bio), bio->bi_opf, what, error, 0, NULL,
912 blk_trace_bio_get_cgid(q, bio));
913 rcu_read_unlock();
914 }
915
blk_add_trace_bio_bounce(void * ignore,struct request_queue * q,struct bio * bio)916 static void blk_add_trace_bio_bounce(void *ignore,
917 struct request_queue *q, struct bio *bio)
918 {
919 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
920 }
921
blk_add_trace_bio_complete(void * ignore,struct request_queue * q,struct bio * bio,int error)922 static void blk_add_trace_bio_complete(void *ignore,
923 struct request_queue *q, struct bio *bio,
924 int error)
925 {
926 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
927 }
928
blk_add_trace_bio_backmerge(void * ignore,struct request_queue * q,struct request * rq,struct bio * bio)929 static void blk_add_trace_bio_backmerge(void *ignore,
930 struct request_queue *q,
931 struct request *rq,
932 struct bio *bio)
933 {
934 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
935 }
936
blk_add_trace_bio_frontmerge(void * ignore,struct request_queue * q,struct request * rq,struct bio * bio)937 static void blk_add_trace_bio_frontmerge(void *ignore,
938 struct request_queue *q,
939 struct request *rq,
940 struct bio *bio)
941 {
942 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
943 }
944
blk_add_trace_bio_queue(void * ignore,struct request_queue * q,struct bio * bio)945 static void blk_add_trace_bio_queue(void *ignore,
946 struct request_queue *q, struct bio *bio)
947 {
948 blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
949 }
950
blk_add_trace_getrq(void * ignore,struct request_queue * q,struct bio * bio,int rw)951 static void blk_add_trace_getrq(void *ignore,
952 struct request_queue *q,
953 struct bio *bio, int rw)
954 {
955 if (bio)
956 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
957 else {
958 struct blk_trace *bt;
959
960 rcu_read_lock();
961 bt = rcu_dereference(q->blk_trace);
962 if (bt)
963 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
964 NULL, NULL);
965 rcu_read_unlock();
966 }
967 }
968
969
blk_add_trace_sleeprq(void * ignore,struct request_queue * q,struct bio * bio,int rw)970 static void blk_add_trace_sleeprq(void *ignore,
971 struct request_queue *q,
972 struct bio *bio, int rw)
973 {
974 if (bio)
975 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
976 else {
977 struct blk_trace *bt;
978
979 rcu_read_lock();
980 bt = rcu_dereference(q->blk_trace);
981 if (bt)
982 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
983 0, 0, NULL, NULL);
984 rcu_read_unlock();
985 }
986 }
987
blk_add_trace_plug(void * ignore,struct request_queue * q)988 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
989 {
990 struct blk_trace *bt;
991
992 rcu_read_lock();
993 bt = rcu_dereference(q->blk_trace);
994 if (bt)
995 __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL);
996 rcu_read_unlock();
997 }
998
blk_add_trace_unplug(void * ignore,struct request_queue * q,unsigned int depth,bool explicit)999 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
1000 unsigned int depth, bool explicit)
1001 {
1002 struct blk_trace *bt;
1003
1004 rcu_read_lock();
1005 bt = rcu_dereference(q->blk_trace);
1006 if (bt) {
1007 __be64 rpdu = cpu_to_be64(depth);
1008 u32 what;
1009
1010 if (explicit)
1011 what = BLK_TA_UNPLUG_IO;
1012 else
1013 what = BLK_TA_UNPLUG_TIMER;
1014
1015 __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL);
1016 }
1017 rcu_read_unlock();
1018 }
1019
blk_add_trace_split(void * ignore,struct request_queue * q,struct bio * bio,unsigned int pdu)1020 static void blk_add_trace_split(void *ignore,
1021 struct request_queue *q, struct bio *bio,
1022 unsigned int pdu)
1023 {
1024 struct blk_trace *bt;
1025
1026 rcu_read_lock();
1027 bt = rcu_dereference(q->blk_trace);
1028 if (bt) {
1029 __be64 rpdu = cpu_to_be64(pdu);
1030
1031 __blk_add_trace(bt, bio->bi_iter.bi_sector,
1032 bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
1033 BLK_TA_SPLIT,
1034 blk_status_to_errno(bio->bi_status),
1035 sizeof(rpdu), &rpdu,
1036 blk_trace_bio_get_cgid(q, bio));
1037 }
1038 rcu_read_unlock();
1039 }
1040
1041 /**
1042 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
1043 * @ignore: trace callback data parameter (not used)
1044 * @q: queue the io is for
1045 * @bio: the source bio
1046 * @dev: target device
1047 * @from: source sector
1048 *
1049 * Description:
1050 * Device mapper or raid target sometimes need to split a bio because
1051 * it spans a stripe (or similar). Add a trace for that action.
1052 *
1053 **/
blk_add_trace_bio_remap(void * ignore,struct request_queue * q,struct bio * bio,dev_t dev,sector_t from)1054 static void blk_add_trace_bio_remap(void *ignore,
1055 struct request_queue *q, struct bio *bio,
1056 dev_t dev, sector_t from)
1057 {
1058 struct blk_trace *bt;
1059 struct blk_io_trace_remap r;
1060
1061 rcu_read_lock();
1062 bt = rcu_dereference(q->blk_trace);
1063 if (likely(!bt)) {
1064 rcu_read_unlock();
1065 return;
1066 }
1067
1068 r.device_from = cpu_to_be32(dev);
1069 r.device_to = cpu_to_be32(bio_dev(bio));
1070 r.sector_from = cpu_to_be64(from);
1071
1072 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1073 bio_op(bio), bio->bi_opf, BLK_TA_REMAP,
1074 blk_status_to_errno(bio->bi_status),
1075 sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1076 rcu_read_unlock();
1077 }
1078
1079 /**
1080 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1081 * @ignore: trace callback data parameter (not used)
1082 * @q: queue the io is for
1083 * @rq: the source request
1084 * @dev: target device
1085 * @from: source sector
1086 *
1087 * Description:
1088 * Device mapper remaps request to other devices.
1089 * Add a trace for that action.
1090 *
1091 **/
blk_add_trace_rq_remap(void * ignore,struct request_queue * q,struct request * rq,dev_t dev,sector_t from)1092 static void blk_add_trace_rq_remap(void *ignore,
1093 struct request_queue *q,
1094 struct request *rq, dev_t dev,
1095 sector_t from)
1096 {
1097 struct blk_trace *bt;
1098 struct blk_io_trace_remap r;
1099
1100 rcu_read_lock();
1101 bt = rcu_dereference(q->blk_trace);
1102 if (likely(!bt)) {
1103 rcu_read_unlock();
1104 return;
1105 }
1106
1107 r.device_from = cpu_to_be32(dev);
1108 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
1109 r.sector_from = cpu_to_be64(from);
1110
1111 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1112 rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
1113 sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
1114 rcu_read_unlock();
1115 }
1116
1117 /**
1118 * blk_add_driver_data - Add binary message with driver-specific data
1119 * @q: queue the io is for
1120 * @rq: io request
1121 * @data: driver-specific data
1122 * @len: length of driver-specific data
1123 *
1124 * Description:
1125 * Some drivers might want to write driver-specific data per request.
1126 *
1127 **/
blk_add_driver_data(struct request_queue * q,struct request * rq,void * data,size_t len)1128 void blk_add_driver_data(struct request_queue *q,
1129 struct request *rq,
1130 void *data, size_t len)
1131 {
1132 struct blk_trace *bt;
1133
1134 rcu_read_lock();
1135 bt = rcu_dereference(q->blk_trace);
1136 if (likely(!bt)) {
1137 rcu_read_unlock();
1138 return;
1139 }
1140
1141 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
1142 BLK_TA_DRV_DATA, 0, len, data,
1143 blk_trace_request_get_cgid(q, rq));
1144 rcu_read_unlock();
1145 }
1146 EXPORT_SYMBOL_GPL(blk_add_driver_data);
1147
blk_register_tracepoints(void)1148 static void blk_register_tracepoints(void)
1149 {
1150 int ret;
1151
1152 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1153 WARN_ON(ret);
1154 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1155 WARN_ON(ret);
1156 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1157 WARN_ON(ret);
1158 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1159 WARN_ON(ret);
1160 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1161 WARN_ON(ret);
1162 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1163 WARN_ON(ret);
1164 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1165 WARN_ON(ret);
1166 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1167 WARN_ON(ret);
1168 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1169 WARN_ON(ret);
1170 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1171 WARN_ON(ret);
1172 ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1173 WARN_ON(ret);
1174 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1175 WARN_ON(ret);
1176 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1177 WARN_ON(ret);
1178 ret = register_trace_block_split(blk_add_trace_split, NULL);
1179 WARN_ON(ret);
1180 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1181 WARN_ON(ret);
1182 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1183 WARN_ON(ret);
1184 }
1185
blk_unregister_tracepoints(void)1186 static void blk_unregister_tracepoints(void)
1187 {
1188 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1189 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1190 unregister_trace_block_split(blk_add_trace_split, NULL);
1191 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1192 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1193 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1194 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1195 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1196 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1197 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1198 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1199 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1200 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1201 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1202 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1203 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1204
1205 tracepoint_synchronize_unregister();
1206 }
1207
1208 /*
1209 * struct blk_io_tracer formatting routines
1210 */
1211
fill_rwbs(char * rwbs,const struct blk_io_trace * t)1212 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1213 {
1214 int i = 0;
1215 int tc = t->action >> BLK_TC_SHIFT;
1216
1217 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1218 rwbs[i++] = 'N';
1219 goto out;
1220 }
1221
1222 if (tc & BLK_TC_FLUSH)
1223 rwbs[i++] = 'F';
1224
1225 if (tc & BLK_TC_DISCARD)
1226 rwbs[i++] = 'D';
1227 else if (tc & BLK_TC_WRITE)
1228 rwbs[i++] = 'W';
1229 else if (t->bytes)
1230 rwbs[i++] = 'R';
1231 else
1232 rwbs[i++] = 'N';
1233
1234 if (tc & BLK_TC_FUA)
1235 rwbs[i++] = 'F';
1236 if (tc & BLK_TC_AHEAD)
1237 rwbs[i++] = 'A';
1238 if (tc & BLK_TC_SYNC)
1239 rwbs[i++] = 'S';
1240 if (tc & BLK_TC_META)
1241 rwbs[i++] = 'M';
1242 out:
1243 rwbs[i] = '\0';
1244 }
1245
1246 static inline
te_blk_io_trace(const struct trace_entry * ent)1247 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1248 {
1249 return (const struct blk_io_trace *)ent;
1250 }
1251
pdu_start(const struct trace_entry * ent,bool has_cg)1252 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1253 {
1254 return (void *)(te_blk_io_trace(ent) + 1) +
1255 (has_cg ? sizeof(union kernfs_node_id) : 0);
1256 }
1257
cgid_start(const struct trace_entry * ent)1258 static inline const void *cgid_start(const struct trace_entry *ent)
1259 {
1260 return (void *)(te_blk_io_trace(ent) + 1);
1261 }
1262
pdu_real_len(const struct trace_entry * ent,bool has_cg)1263 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1264 {
1265 return te_blk_io_trace(ent)->pdu_len -
1266 (has_cg ? sizeof(union kernfs_node_id) : 0);
1267 }
1268
t_action(const struct trace_entry * ent)1269 static inline u32 t_action(const struct trace_entry *ent)
1270 {
1271 return te_blk_io_trace(ent)->action;
1272 }
1273
t_bytes(const struct trace_entry * ent)1274 static inline u32 t_bytes(const struct trace_entry *ent)
1275 {
1276 return te_blk_io_trace(ent)->bytes;
1277 }
1278
t_sec(const struct trace_entry * ent)1279 static inline u32 t_sec(const struct trace_entry *ent)
1280 {
1281 return te_blk_io_trace(ent)->bytes >> 9;
1282 }
1283
t_sector(const struct trace_entry * ent)1284 static inline unsigned long long t_sector(const struct trace_entry *ent)
1285 {
1286 return te_blk_io_trace(ent)->sector;
1287 }
1288
t_error(const struct trace_entry * ent)1289 static inline __u16 t_error(const struct trace_entry *ent)
1290 {
1291 return te_blk_io_trace(ent)->error;
1292 }
1293
get_pdu_int(const struct trace_entry * ent,bool has_cg)1294 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1295 {
1296 const __be64 *val = pdu_start(ent, has_cg);
1297 return be64_to_cpu(*val);
1298 }
1299
1300 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1301 bool has_cg);
1302
blk_log_action_classic(struct trace_iterator * iter,const char * act,bool has_cg)1303 static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1304 bool has_cg)
1305 {
1306 char rwbs[RWBS_LEN];
1307 unsigned long long ts = iter->ts;
1308 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1309 unsigned secs = (unsigned long)ts;
1310 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1311
1312 fill_rwbs(rwbs, t);
1313
1314 trace_seq_printf(&iter->seq,
1315 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1316 MAJOR(t->device), MINOR(t->device), iter->cpu,
1317 secs, nsec_rem, iter->ent->pid, act, rwbs);
1318 }
1319
blk_log_action(struct trace_iterator * iter,const char * act,bool has_cg)1320 static void blk_log_action(struct trace_iterator *iter, const char *act,
1321 bool has_cg)
1322 {
1323 char rwbs[RWBS_LEN];
1324 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1325
1326 fill_rwbs(rwbs, t);
1327 if (has_cg) {
1328 const union kernfs_node_id *id = cgid_start(iter->ent);
1329
1330 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1331 char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1332
1333 cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1334 sizeof(blkcg_name_buf));
1335 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1336 MAJOR(t->device), MINOR(t->device),
1337 blkcg_name_buf, act, rwbs);
1338 } else
1339 trace_seq_printf(&iter->seq,
1340 "%3d,%-3d %x,%-x %2s %3s ",
1341 MAJOR(t->device), MINOR(t->device),
1342 id->ino, id->generation, act, rwbs);
1343 } else
1344 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1345 MAJOR(t->device), MINOR(t->device), act, rwbs);
1346 }
1347
blk_log_dump_pdu(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1348 static void blk_log_dump_pdu(struct trace_seq *s,
1349 const struct trace_entry *ent, bool has_cg)
1350 {
1351 const unsigned char *pdu_buf;
1352 int pdu_len;
1353 int i, end;
1354
1355 pdu_buf = pdu_start(ent, has_cg);
1356 pdu_len = pdu_real_len(ent, has_cg);
1357
1358 if (!pdu_len)
1359 return;
1360
1361 /* find the last zero that needs to be printed */
1362 for (end = pdu_len - 1; end >= 0; end--)
1363 if (pdu_buf[end])
1364 break;
1365 end++;
1366
1367 trace_seq_putc(s, '(');
1368
1369 for (i = 0; i < pdu_len; i++) {
1370
1371 trace_seq_printf(s, "%s%02x",
1372 i == 0 ? "" : " ", pdu_buf[i]);
1373
1374 /*
1375 * stop when the rest is just zeroes and indicate so
1376 * with a ".." appended
1377 */
1378 if (i == end && end != pdu_len - 1) {
1379 trace_seq_puts(s, " ..) ");
1380 return;
1381 }
1382 }
1383
1384 trace_seq_puts(s, ") ");
1385 }
1386
blk_log_generic(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1387 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1388 {
1389 char cmd[TASK_COMM_LEN];
1390
1391 trace_find_cmdline(ent->pid, cmd);
1392
1393 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1394 trace_seq_printf(s, "%u ", t_bytes(ent));
1395 blk_log_dump_pdu(s, ent, has_cg);
1396 trace_seq_printf(s, "[%s]\n", cmd);
1397 } else {
1398 if (t_sec(ent))
1399 trace_seq_printf(s, "%llu + %u [%s]\n",
1400 t_sector(ent), t_sec(ent), cmd);
1401 else
1402 trace_seq_printf(s, "[%s]\n", cmd);
1403 }
1404 }
1405
blk_log_with_error(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1406 static void blk_log_with_error(struct trace_seq *s,
1407 const struct trace_entry *ent, bool has_cg)
1408 {
1409 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1410 blk_log_dump_pdu(s, ent, has_cg);
1411 trace_seq_printf(s, "[%d]\n", t_error(ent));
1412 } else {
1413 if (t_sec(ent))
1414 trace_seq_printf(s, "%llu + %u [%d]\n",
1415 t_sector(ent),
1416 t_sec(ent), t_error(ent));
1417 else
1418 trace_seq_printf(s, "%llu [%d]\n",
1419 t_sector(ent), t_error(ent));
1420 }
1421 }
1422
blk_log_remap(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1423 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1424 {
1425 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1426
1427 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1428 t_sector(ent), t_sec(ent),
1429 MAJOR(be32_to_cpu(__r->device_from)),
1430 MINOR(be32_to_cpu(__r->device_from)),
1431 be64_to_cpu(__r->sector_from));
1432 }
1433
blk_log_plug(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1434 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1435 {
1436 char cmd[TASK_COMM_LEN];
1437
1438 trace_find_cmdline(ent->pid, cmd);
1439
1440 trace_seq_printf(s, "[%s]\n", cmd);
1441 }
1442
blk_log_unplug(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1443 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1444 {
1445 char cmd[TASK_COMM_LEN];
1446
1447 trace_find_cmdline(ent->pid, cmd);
1448
1449 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1450 }
1451
blk_log_split(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1452 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1453 {
1454 char cmd[TASK_COMM_LEN];
1455
1456 trace_find_cmdline(ent->pid, cmd);
1457
1458 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1459 get_pdu_int(ent, has_cg), cmd);
1460 }
1461
blk_log_msg(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1462 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1463 bool has_cg)
1464 {
1465
1466 trace_seq_putmem(s, pdu_start(ent, has_cg),
1467 pdu_real_len(ent, has_cg));
1468 trace_seq_putc(s, '\n');
1469 }
1470
1471 /*
1472 * struct tracer operations
1473 */
1474
blk_tracer_print_header(struct seq_file * m)1475 static void blk_tracer_print_header(struct seq_file *m)
1476 {
1477 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1478 return;
1479 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1480 "# | | | | | |\n");
1481 }
1482
blk_tracer_start(struct trace_array * tr)1483 static void blk_tracer_start(struct trace_array *tr)
1484 {
1485 blk_tracer_enabled = true;
1486 }
1487
blk_tracer_init(struct trace_array * tr)1488 static int blk_tracer_init(struct trace_array *tr)
1489 {
1490 blk_tr = tr;
1491 blk_tracer_start(tr);
1492 return 0;
1493 }
1494
blk_tracer_stop(struct trace_array * tr)1495 static void blk_tracer_stop(struct trace_array *tr)
1496 {
1497 blk_tracer_enabled = false;
1498 }
1499
blk_tracer_reset(struct trace_array * tr)1500 static void blk_tracer_reset(struct trace_array *tr)
1501 {
1502 blk_tracer_stop(tr);
1503 }
1504
1505 static const struct {
1506 const char *act[2];
1507 void (*print)(struct trace_seq *s, const struct trace_entry *ent,
1508 bool has_cg);
1509 } what2act[] = {
1510 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
1511 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1512 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1513 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1514 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1515 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1516 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1517 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1518 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1519 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1520 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1521 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1522 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1523 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1524 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1525 };
1526
print_one_line(struct trace_iterator * iter,bool classic)1527 static enum print_line_t print_one_line(struct trace_iterator *iter,
1528 bool classic)
1529 {
1530 struct trace_array *tr = iter->tr;
1531 struct trace_seq *s = &iter->seq;
1532 const struct blk_io_trace *t;
1533 u16 what;
1534 bool long_act;
1535 blk_log_action_t *log_action;
1536 bool has_cg;
1537
1538 t = te_blk_io_trace(iter->ent);
1539 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1540 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1541 log_action = classic ? &blk_log_action_classic : &blk_log_action;
1542 has_cg = t->action & __BLK_TA_CGROUP;
1543
1544 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1545 log_action(iter, long_act ? "message" : "m", has_cg);
1546 blk_log_msg(s, iter->ent, has_cg);
1547 return trace_handle_return(s);
1548 }
1549
1550 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1551 trace_seq_printf(s, "Unknown action %x\n", what);
1552 else {
1553 log_action(iter, what2act[what].act[long_act], has_cg);
1554 what2act[what].print(s, iter->ent, has_cg);
1555 }
1556
1557 return trace_handle_return(s);
1558 }
1559
blk_trace_event_print(struct trace_iterator * iter,int flags,struct trace_event * event)1560 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1561 int flags, struct trace_event *event)
1562 {
1563 return print_one_line(iter, false);
1564 }
1565
blk_trace_synthesize_old_trace(struct trace_iterator * iter)1566 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1567 {
1568 struct trace_seq *s = &iter->seq;
1569 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1570 const int offset = offsetof(struct blk_io_trace, sector);
1571 struct blk_io_trace old = {
1572 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1573 .time = iter->ts,
1574 };
1575
1576 trace_seq_putmem(s, &old, offset);
1577 trace_seq_putmem(s, &t->sector,
1578 sizeof(old) - offset + t->pdu_len);
1579 }
1580
1581 static enum print_line_t
blk_trace_event_print_binary(struct trace_iterator * iter,int flags,struct trace_event * event)1582 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1583 struct trace_event *event)
1584 {
1585 blk_trace_synthesize_old_trace(iter);
1586
1587 return trace_handle_return(&iter->seq);
1588 }
1589
blk_tracer_print_line(struct trace_iterator * iter)1590 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1591 {
1592 if ((iter->ent->type != TRACE_BLK) ||
1593 !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1594 return TRACE_TYPE_UNHANDLED;
1595
1596 return print_one_line(iter, true);
1597 }
1598
1599 static int
blk_tracer_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)1600 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1601 {
1602 /* don't output context-info for blk_classic output */
1603 if (bit == TRACE_BLK_OPT_CLASSIC) {
1604 if (set)
1605 tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1606 else
1607 tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1608 }
1609 return 0;
1610 }
1611
1612 static struct tracer blk_tracer __read_mostly = {
1613 .name = "blk",
1614 .init = blk_tracer_init,
1615 .reset = blk_tracer_reset,
1616 .start = blk_tracer_start,
1617 .stop = blk_tracer_stop,
1618 .print_header = blk_tracer_print_header,
1619 .print_line = blk_tracer_print_line,
1620 .flags = &blk_tracer_flags,
1621 .set_flag = blk_tracer_set_flag,
1622 };
1623
1624 static struct trace_event_functions trace_blk_event_funcs = {
1625 .trace = blk_trace_event_print,
1626 .binary = blk_trace_event_print_binary,
1627 };
1628
1629 static struct trace_event trace_blk_event = {
1630 .type = TRACE_BLK,
1631 .funcs = &trace_blk_event_funcs,
1632 };
1633
init_blk_tracer(void)1634 static int __init init_blk_tracer(void)
1635 {
1636 if (!register_trace_event(&trace_blk_event)) {
1637 pr_warn("Warning: could not register block events\n");
1638 return 1;
1639 }
1640
1641 if (register_tracer(&blk_tracer) != 0) {
1642 pr_warn("Warning: could not register the block tracer\n");
1643 unregister_trace_event(&trace_blk_event);
1644 return 1;
1645 }
1646
1647 return 0;
1648 }
1649
1650 device_initcall(init_blk_tracer);
1651
blk_trace_remove_queue(struct request_queue * q)1652 static int blk_trace_remove_queue(struct request_queue *q)
1653 {
1654 struct blk_trace *bt;
1655
1656 bt = xchg(&q->blk_trace, NULL);
1657 if (bt == NULL)
1658 return -EINVAL;
1659
1660 if (bt->trace_state == Blktrace_running) {
1661 bt->trace_state = Blktrace_stopped;
1662 spin_lock_irq(&running_trace_lock);
1663 list_del_init(&bt->running_list);
1664 spin_unlock_irq(&running_trace_lock);
1665 relay_flush(bt->rchan);
1666 }
1667
1668 put_probe_ref();
1669 synchronize_rcu();
1670 blk_trace_free(bt);
1671 return 0;
1672 }
1673
1674 /*
1675 * Setup everything required to start tracing
1676 */
blk_trace_setup_queue(struct request_queue * q,struct block_device * bdev)1677 static int blk_trace_setup_queue(struct request_queue *q,
1678 struct block_device *bdev)
1679 {
1680 struct blk_trace *bt = NULL;
1681 int ret = -ENOMEM;
1682
1683 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1684 if (!bt)
1685 return -ENOMEM;
1686
1687 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1688 if (!bt->msg_data)
1689 goto free_bt;
1690
1691 bt->dev = bdev->bd_dev;
1692 bt->act_mask = (u16)-1;
1693
1694 blk_trace_setup_lba(bt, bdev);
1695
1696 ret = -EBUSY;
1697 if (cmpxchg(&q->blk_trace, NULL, bt))
1698 goto free_bt;
1699
1700 get_probe_ref();
1701 return 0;
1702
1703 free_bt:
1704 blk_trace_free(bt);
1705 return ret;
1706 }
1707
1708 /*
1709 * sysfs interface to enable and configure tracing
1710 */
1711
1712 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1713 struct device_attribute *attr,
1714 char *buf);
1715 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1716 struct device_attribute *attr,
1717 const char *buf, size_t count);
1718 #define BLK_TRACE_DEVICE_ATTR(_name) \
1719 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1720 sysfs_blk_trace_attr_show, \
1721 sysfs_blk_trace_attr_store)
1722
1723 static BLK_TRACE_DEVICE_ATTR(enable);
1724 static BLK_TRACE_DEVICE_ATTR(act_mask);
1725 static BLK_TRACE_DEVICE_ATTR(pid);
1726 static BLK_TRACE_DEVICE_ATTR(start_lba);
1727 static BLK_TRACE_DEVICE_ATTR(end_lba);
1728
1729 static struct attribute *blk_trace_attrs[] = {
1730 &dev_attr_enable.attr,
1731 &dev_attr_act_mask.attr,
1732 &dev_attr_pid.attr,
1733 &dev_attr_start_lba.attr,
1734 &dev_attr_end_lba.attr,
1735 NULL
1736 };
1737
1738 struct attribute_group blk_trace_attr_group = {
1739 .name = "trace",
1740 .attrs = blk_trace_attrs,
1741 };
1742
1743 static const struct {
1744 int mask;
1745 const char *str;
1746 } mask_maps[] = {
1747 { BLK_TC_READ, "read" },
1748 { BLK_TC_WRITE, "write" },
1749 { BLK_TC_FLUSH, "flush" },
1750 { BLK_TC_SYNC, "sync" },
1751 { BLK_TC_QUEUE, "queue" },
1752 { BLK_TC_REQUEUE, "requeue" },
1753 { BLK_TC_ISSUE, "issue" },
1754 { BLK_TC_COMPLETE, "complete" },
1755 { BLK_TC_FS, "fs" },
1756 { BLK_TC_PC, "pc" },
1757 { BLK_TC_NOTIFY, "notify" },
1758 { BLK_TC_AHEAD, "ahead" },
1759 { BLK_TC_META, "meta" },
1760 { BLK_TC_DISCARD, "discard" },
1761 { BLK_TC_DRV_DATA, "drv_data" },
1762 { BLK_TC_FUA, "fua" },
1763 };
1764
blk_trace_str2mask(const char * str)1765 static int blk_trace_str2mask(const char *str)
1766 {
1767 int i;
1768 int mask = 0;
1769 char *buf, *s, *token;
1770
1771 buf = kstrdup(str, GFP_KERNEL);
1772 if (buf == NULL)
1773 return -ENOMEM;
1774 s = strstrip(buf);
1775
1776 while (1) {
1777 token = strsep(&s, ",");
1778 if (token == NULL)
1779 break;
1780
1781 if (*token == '\0')
1782 continue;
1783
1784 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1785 if (strcasecmp(token, mask_maps[i].str) == 0) {
1786 mask |= mask_maps[i].mask;
1787 break;
1788 }
1789 }
1790 if (i == ARRAY_SIZE(mask_maps)) {
1791 mask = -EINVAL;
1792 break;
1793 }
1794 }
1795 kfree(buf);
1796
1797 return mask;
1798 }
1799
blk_trace_mask2str(char * buf,int mask)1800 static ssize_t blk_trace_mask2str(char *buf, int mask)
1801 {
1802 int i;
1803 char *p = buf;
1804
1805 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1806 if (mask & mask_maps[i].mask) {
1807 p += sprintf(p, "%s%s",
1808 (p == buf) ? "" : ",", mask_maps[i].str);
1809 }
1810 }
1811 *p++ = '\n';
1812
1813 return p - buf;
1814 }
1815
blk_trace_get_queue(struct block_device * bdev)1816 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1817 {
1818 if (bdev->bd_disk == NULL)
1819 return NULL;
1820
1821 return bdev_get_queue(bdev);
1822 }
1823
sysfs_blk_trace_attr_show(struct device * dev,struct device_attribute * attr,char * buf)1824 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1825 struct device_attribute *attr,
1826 char *buf)
1827 {
1828 struct hd_struct *p = dev_to_part(dev);
1829 struct request_queue *q;
1830 struct block_device *bdev;
1831 struct blk_trace *bt;
1832 ssize_t ret = -ENXIO;
1833
1834 bdev = bdget(part_devt(p));
1835 if (bdev == NULL)
1836 goto out;
1837
1838 q = blk_trace_get_queue(bdev);
1839 if (q == NULL)
1840 goto out_bdput;
1841
1842 mutex_lock(&q->blk_trace_mutex);
1843
1844 bt = rcu_dereference_protected(q->blk_trace,
1845 lockdep_is_held(&q->blk_trace_mutex));
1846 if (attr == &dev_attr_enable) {
1847 ret = sprintf(buf, "%u\n", !!bt);
1848 goto out_unlock_bdev;
1849 }
1850
1851 if (bt == NULL)
1852 ret = sprintf(buf, "disabled\n");
1853 else if (attr == &dev_attr_act_mask)
1854 ret = blk_trace_mask2str(buf, bt->act_mask);
1855 else if (attr == &dev_attr_pid)
1856 ret = sprintf(buf, "%u\n", bt->pid);
1857 else if (attr == &dev_attr_start_lba)
1858 ret = sprintf(buf, "%llu\n", bt->start_lba);
1859 else if (attr == &dev_attr_end_lba)
1860 ret = sprintf(buf, "%llu\n", bt->end_lba);
1861
1862 out_unlock_bdev:
1863 mutex_unlock(&q->blk_trace_mutex);
1864 out_bdput:
1865 bdput(bdev);
1866 out:
1867 return ret;
1868 }
1869
sysfs_blk_trace_attr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1870 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1871 struct device_attribute *attr,
1872 const char *buf, size_t count)
1873 {
1874 struct block_device *bdev;
1875 struct request_queue *q;
1876 struct hd_struct *p;
1877 struct blk_trace *bt;
1878 u64 value;
1879 ssize_t ret = -EINVAL;
1880
1881 if (count == 0)
1882 goto out;
1883
1884 if (attr == &dev_attr_act_mask) {
1885 if (kstrtoull(buf, 0, &value)) {
1886 /* Assume it is a list of trace category names */
1887 ret = blk_trace_str2mask(buf);
1888 if (ret < 0)
1889 goto out;
1890 value = ret;
1891 }
1892 } else if (kstrtoull(buf, 0, &value))
1893 goto out;
1894
1895 ret = -ENXIO;
1896
1897 p = dev_to_part(dev);
1898 bdev = bdget(part_devt(p));
1899 if (bdev == NULL)
1900 goto out;
1901
1902 q = blk_trace_get_queue(bdev);
1903 if (q == NULL)
1904 goto out_bdput;
1905
1906 mutex_lock(&q->blk_trace_mutex);
1907
1908 bt = rcu_dereference_protected(q->blk_trace,
1909 lockdep_is_held(&q->blk_trace_mutex));
1910 if (attr == &dev_attr_enable) {
1911 if (!!value == !!bt) {
1912 ret = 0;
1913 goto out_unlock_bdev;
1914 }
1915 if (value)
1916 ret = blk_trace_setup_queue(q, bdev);
1917 else
1918 ret = blk_trace_remove_queue(q);
1919 goto out_unlock_bdev;
1920 }
1921
1922 ret = 0;
1923 if (bt == NULL) {
1924 ret = blk_trace_setup_queue(q, bdev);
1925 bt = rcu_dereference_protected(q->blk_trace,
1926 lockdep_is_held(&q->blk_trace_mutex));
1927 }
1928
1929 if (ret == 0) {
1930 if (attr == &dev_attr_act_mask)
1931 bt->act_mask = value;
1932 else if (attr == &dev_attr_pid)
1933 bt->pid = value;
1934 else if (attr == &dev_attr_start_lba)
1935 bt->start_lba = value;
1936 else if (attr == &dev_attr_end_lba)
1937 bt->end_lba = value;
1938 }
1939
1940 out_unlock_bdev:
1941 mutex_unlock(&q->blk_trace_mutex);
1942 out_bdput:
1943 bdput(bdev);
1944 out:
1945 return ret ? ret : count;
1946 }
1947
blk_trace_init_sysfs(struct device * dev)1948 int blk_trace_init_sysfs(struct device *dev)
1949 {
1950 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1951 }
1952
blk_trace_remove_sysfs(struct device * dev)1953 void blk_trace_remove_sysfs(struct device *dev)
1954 {
1955 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1956 }
1957
1958 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1959
1960 #ifdef CONFIG_EVENT_TRACING
1961
blk_fill_rwbs(char * rwbs,unsigned int op,int bytes)1962 void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
1963 {
1964 int i = 0;
1965
1966 if (op & REQ_PREFLUSH)
1967 rwbs[i++] = 'F';
1968
1969 switch (op & REQ_OP_MASK) {
1970 case REQ_OP_WRITE:
1971 case REQ_OP_WRITE_SAME:
1972 rwbs[i++] = 'W';
1973 break;
1974 case REQ_OP_DISCARD:
1975 rwbs[i++] = 'D';
1976 break;
1977 case REQ_OP_SECURE_ERASE:
1978 rwbs[i++] = 'D';
1979 rwbs[i++] = 'E';
1980 break;
1981 case REQ_OP_FLUSH:
1982 rwbs[i++] = 'F';
1983 break;
1984 case REQ_OP_READ:
1985 rwbs[i++] = 'R';
1986 break;
1987 default:
1988 rwbs[i++] = 'N';
1989 }
1990
1991 if (op & REQ_FUA)
1992 rwbs[i++] = 'F';
1993 if (op & REQ_RAHEAD)
1994 rwbs[i++] = 'A';
1995 if (op & REQ_SYNC)
1996 rwbs[i++] = 'S';
1997 if (op & REQ_META)
1998 rwbs[i++] = 'M';
1999
2000 rwbs[i] = '\0';
2001 }
2002 EXPORT_SYMBOL_GPL(blk_fill_rwbs);
2003
2004 #endif /* CONFIG_EVENT_TRACING */
2005
2006