• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/base/sync.c
3  *
4  * Copyright (C) 2012 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/anon_inodes.h>
28 #include <linux/time64.h>
29 #include "sync.h"
30 
31 #ifdef CONFIG_DEBUG_FS
32 
33 static LIST_HEAD(sync_timeline_list_head);
34 static DEFINE_SPINLOCK(sync_timeline_list_lock);
35 static LIST_HEAD(sync_fence_list_head);
36 static DEFINE_SPINLOCK(sync_fence_list_lock);
37 
sync_timeline_debug_add(struct sync_timeline * obj)38 void sync_timeline_debug_add(struct sync_timeline *obj)
39 {
40 	unsigned long flags;
41 
42 	spin_lock_irqsave(&sync_timeline_list_lock, flags);
43 	list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
44 	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
45 }
46 
sync_timeline_debug_remove(struct sync_timeline * obj)47 void sync_timeline_debug_remove(struct sync_timeline *obj)
48 {
49 	unsigned long flags;
50 
51 	spin_lock_irqsave(&sync_timeline_list_lock, flags);
52 	list_del(&obj->sync_timeline_list);
53 	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
54 }
55 
sync_fence_debug_add(struct sync_fence * fence)56 void sync_fence_debug_add(struct sync_fence *fence)
57 {
58 	unsigned long flags;
59 
60 	spin_lock_irqsave(&sync_fence_list_lock, flags);
61 	list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
62 	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
63 }
64 
sync_fence_debug_remove(struct sync_fence * fence)65 void sync_fence_debug_remove(struct sync_fence *fence)
66 {
67 	unsigned long flags;
68 
69 	spin_lock_irqsave(&sync_fence_list_lock, flags);
70 	list_del(&fence->sync_fence_list);
71 	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
72 }
73 
sync_status_str(int status)74 static const char *sync_status_str(int status)
75 {
76 	if (status == 0)
77 		return "signaled";
78 
79 	if (status > 0)
80 		return "active";
81 
82 	return "error";
83 }
84 
sync_print_pt(struct seq_file * s,struct sync_pt * pt,bool fence)85 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
86 {
87 	int status = 1;
88 	struct sync_timeline *parent = sync_pt_parent(pt);
89 
90 	if (fence_is_signaled_locked(&pt->base))
91 		status = pt->base.status;
92 
93 	seq_printf(s, "  %s%spt %s",
94 		   fence ? parent->name : "",
95 		   fence ? "_" : "",
96 		   sync_status_str(status));
97 
98 	if (status <= 0) {
99 		struct timespec64 ts64 =
100 			ktime_to_timespec64(pt->base.timestamp);
101 
102 		seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
103 	}
104 
105 	if (parent->ops->timeline_value_str &&
106 	    parent->ops->pt_value_str) {
107 		char value[64];
108 
109 		parent->ops->pt_value_str(pt, value, sizeof(value));
110 		seq_printf(s, ": %s", value);
111 		if (fence) {
112 			parent->ops->timeline_value_str(parent, value,
113 						    sizeof(value));
114 			seq_printf(s, " / %s", value);
115 		}
116 	}
117 
118 	seq_puts(s, "\n");
119 }
120 
sync_print_obj(struct seq_file * s,struct sync_timeline * obj)121 static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
122 {
123 	struct list_head *pos;
124 	unsigned long flags;
125 
126 	seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
127 
128 	if (obj->ops->timeline_value_str) {
129 		char value[64];
130 
131 		obj->ops->timeline_value_str(obj, value, sizeof(value));
132 		seq_printf(s, ": %s", value);
133 	}
134 
135 	seq_puts(s, "\n");
136 
137 	spin_lock_irqsave(&obj->child_list_lock, flags);
138 	list_for_each(pos, &obj->child_list_head) {
139 		struct sync_pt *pt =
140 			container_of(pos, struct sync_pt, child_list);
141 		sync_print_pt(s, pt, false);
142 	}
143 	spin_unlock_irqrestore(&obj->child_list_lock, flags);
144 }
145 
sync_print_fence(struct seq_file * s,struct sync_fence * fence)146 static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
147 {
148 	wait_queue_t *pos;
149 	unsigned long flags;
150 	int i;
151 
152 	seq_printf(s, "[%pK] %s: %s\n", fence, fence->name,
153 		   sync_status_str(atomic_read(&fence->status)));
154 
155 	for (i = 0; i < fence->num_fences; ++i) {
156 		struct sync_pt *pt =
157 			container_of(fence->cbs[i].sync_pt,
158 				     struct sync_pt, base);
159 
160 		sync_print_pt(s, pt, true);
161 	}
162 
163 	spin_lock_irqsave(&fence->wq.lock, flags);
164 	list_for_each_entry(pos, &fence->wq.task_list, task_list) {
165 		struct sync_fence_waiter *waiter;
166 
167 		if (pos->func != &sync_fence_wake_up_wq)
168 			continue;
169 
170 		waiter = container_of(pos, struct sync_fence_waiter, work);
171 
172 		seq_printf(s, "waiter %pF\n", waiter->callback);
173 	}
174 	spin_unlock_irqrestore(&fence->wq.lock, flags);
175 }
176 
sync_debugfs_show(struct seq_file * s,void * unused)177 static int sync_debugfs_show(struct seq_file *s, void *unused)
178 {
179 	unsigned long flags;
180 	struct list_head *pos;
181 
182 	seq_puts(s, "objs:\n--------------\n");
183 
184 	spin_lock_irqsave(&sync_timeline_list_lock, flags);
185 	list_for_each(pos, &sync_timeline_list_head) {
186 		struct sync_timeline *obj =
187 			container_of(pos, struct sync_timeline,
188 				     sync_timeline_list);
189 
190 		sync_print_obj(s, obj);
191 		seq_puts(s, "\n");
192 	}
193 	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
194 
195 	seq_puts(s, "fences:\n--------------\n");
196 
197 	spin_lock_irqsave(&sync_fence_list_lock, flags);
198 	list_for_each(pos, &sync_fence_list_head) {
199 		struct sync_fence *fence =
200 			container_of(pos, struct sync_fence, sync_fence_list);
201 
202 		sync_print_fence(s, fence);
203 		seq_puts(s, "\n");
204 	}
205 	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
206 	return 0;
207 }
208 
sync_debugfs_open(struct inode * inode,struct file * file)209 static int sync_debugfs_open(struct inode *inode, struct file *file)
210 {
211 	return single_open(file, sync_debugfs_show, inode->i_private);
212 }
213 
214 static const struct file_operations sync_debugfs_fops = {
215 	.open           = sync_debugfs_open,
216 	.read           = seq_read,
217 	.llseek         = seq_lseek,
218 	.release        = single_release,
219 };
220 
sync_debugfs_init(void)221 static __init int sync_debugfs_init(void)
222 {
223 	debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
224 	return 0;
225 }
226 late_initcall(sync_debugfs_init);
227 
228 #define DUMP_CHUNK 256
229 static char sync_dump_buf[64 * 1024];
sync_dump(void)230 void sync_dump(void)
231 {
232 	struct seq_file s = {
233 		.buf = sync_dump_buf,
234 		.size = sizeof(sync_dump_buf) - 1,
235 	};
236 	int i;
237 
238 	sync_debugfs_show(&s, NULL);
239 
240 	for (i = 0; i < s.count; i += DUMP_CHUNK) {
241 		if ((s.count - i) > DUMP_CHUNK) {
242 			char c = s.buf[i + DUMP_CHUNK];
243 
244 			s.buf[i + DUMP_CHUNK] = 0;
245 			pr_cont("%s", s.buf + i);
246 			s.buf[i + DUMP_CHUNK] = c;
247 		} else {
248 			s.buf[s.count] = 0;
249 			pr_cont("%s", s.buf + i);
250 		}
251 	}
252 }
253 
254 #endif
255