1 /*
2 * linux/drivers/video/fb_defio.c
3 *
4 * Copyright (C) 2006 Jaya Kumar
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/mm.h>
16 #include <linux/vmalloc.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/fb.h>
20 #include <linux/list.h>
21
22 /* to support deferred IO */
23 #include <linux/rmap.h>
24 #include <linux/pagemap.h>
25
fb_deferred_io_page(struct fb_info * info,unsigned long offs)26 static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
27 {
28 void *screen_base = (void __force *) info->screen_base;
29 struct page *page;
30
31 if (is_vmalloc_addr(screen_base + offs))
32 page = vmalloc_to_page(screen_base + offs);
33 else
34 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
35
36 return page;
37 }
38
fb_deferred_io_pageref_get(struct fb_info * info,unsigned long offset,struct page * page)39 static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info,
40 unsigned long offset,
41 struct page *page)
42 {
43 struct fb_deferred_io *fbdefio = info->fbdefio;
44 struct list_head *pos = &fbdefio->pagereflist;
45 unsigned long pgoff = offset >> PAGE_SHIFT;
46 struct fb_deferred_io_pageref *pageref, *cur;
47
48 if (WARN_ON_ONCE(pgoff >= info->npagerefs))
49 return NULL; /* incorrect allocation size */
50
51 /* 1:1 mapping between pageref and page offset */
52 pageref = &info->pagerefs[pgoff];
53
54 /*
55 * This check is to catch the case where a new process could start
56 * writing to the same page through a new PTE. This new access
57 * can cause a call to .page_mkwrite even if the original process'
58 * PTE is marked writable.
59 */
60 if (!list_empty(&pageref->list))
61 goto pageref_already_added;
62
63 pageref->page = page;
64 pageref->offset = pgoff << PAGE_SHIFT;
65
66 if (unlikely(fbdefio->sort_pagereflist)) {
67 /*
68 * We loop through the list of pagerefs before adding in
69 * order to keep the pagerefs sorted. This has significant
70 * overhead of O(n^2) with n being the number of written
71 * pages. If possible, drivers should try to work with
72 * unsorted page lists instead.
73 */
74 list_for_each_entry(cur, &fbdefio->pagereflist, list) {
75 if (cur->offset > pageref->offset)
76 break;
77 }
78 pos = &cur->list;
79 }
80
81 list_add_tail(&pageref->list, pos);
82
83 pageref_already_added:
84 return pageref;
85 }
86
fb_deferred_io_pageref_put(struct fb_deferred_io_pageref * pageref,struct fb_info * info)87 static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref,
88 struct fb_info *info)
89 {
90 list_del_init(&pageref->list);
91 }
92
93 /* this is to find and return the vmalloc-ed fb pages */
fb_deferred_io_fault(struct vm_fault * vmf)94 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
95 {
96 unsigned long offset;
97 struct page *page;
98 struct fb_info *info = vmf->vma->vm_private_data;
99
100 offset = vmf->pgoff << PAGE_SHIFT;
101 if (offset >= info->fix.smem_len)
102 return VM_FAULT_SIGBUS;
103
104 page = fb_deferred_io_page(info, offset);
105 if (!page)
106 return VM_FAULT_SIGBUS;
107
108 get_page(page);
109
110 if (vmf->vma->vm_file)
111 page->mapping = vmf->vma->vm_file->f_mapping;
112 else
113 printk(KERN_ERR "no mapping available\n");
114
115 BUG_ON(!page->mapping);
116 page->index = vmf->pgoff; /* for page_mkclean() */
117
118 vmf->page = page;
119 return 0;
120 }
121
fb_deferred_io_fsync(struct file * file,loff_t start,loff_t end,int datasync)122 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
123 {
124 struct fb_info *info = file->private_data;
125 struct inode *inode = file_inode(file);
126 int err = file_write_and_wait_range(file, start, end);
127 if (err)
128 return err;
129
130 /* Skip if deferred io is compiled-in but disabled on this fbdev */
131 if (!info->fbdefio)
132 return 0;
133
134 inode_lock(inode);
135 flush_delayed_work(&info->deferred_work);
136 inode_unlock(inode);
137
138 return 0;
139 }
140 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
141
142 /*
143 * Adds a page to the dirty list. Call this from struct
144 * vm_operations_struct.page_mkwrite.
145 */
fb_deferred_io_track_page(struct fb_info * info,unsigned long offset,struct page * page)146 static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset,
147 struct page *page)
148 {
149 struct fb_deferred_io *fbdefio = info->fbdefio;
150 struct fb_deferred_io_pageref *pageref;
151 vm_fault_t ret;
152
153 /* protect against the workqueue changing the page list */
154 mutex_lock(&fbdefio->lock);
155
156 /* first write in this cycle, notify the driver */
157 if (fbdefio->first_io && list_empty(&fbdefio->pagereflist))
158 fbdefio->first_io(info);
159
160 pageref = fb_deferred_io_pageref_get(info, offset, page);
161 if (WARN_ON_ONCE(!pageref)) {
162 ret = VM_FAULT_OOM;
163 goto err_mutex_unlock;
164 }
165
166 /*
167 * We want the page to remain locked from ->page_mkwrite until
168 * the PTE is marked dirty to avoid page_mkclean() being called
169 * before the PTE is updated, which would leave the page ignored
170 * by defio.
171 * Do this by locking the page here and informing the caller
172 * about it with VM_FAULT_LOCKED.
173 */
174 lock_page(pageref->page);
175
176 mutex_unlock(&fbdefio->lock);
177
178 /* come back after delay to process the deferred IO */
179 schedule_delayed_work(&info->deferred_work, fbdefio->delay);
180 return VM_FAULT_LOCKED;
181
182 err_mutex_unlock:
183 mutex_unlock(&fbdefio->lock);
184 return ret;
185 }
186
187 /*
188 * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
189 * @fb_info: The fbdev info structure
190 * @vmf: The VM fault
191 *
192 * This is a callback we get when userspace first tries to
193 * write to the page. We schedule a workqueue. That workqueue
194 * will eventually mkclean the touched pages and execute the
195 * deferred framebuffer IO. Then if userspace touches a page
196 * again, we repeat the same scheme.
197 *
198 * Returns:
199 * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
200 */
fb_deferred_io_page_mkwrite(struct fb_info * info,struct vm_fault * vmf)201 static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
202 {
203 unsigned long offset = vmf->address - vmf->vma->vm_start;
204 struct page *page = vmf->page;
205
206 file_update_time(vmf->vma->vm_file);
207
208 return fb_deferred_io_track_page(info, offset, page);
209 }
210
211 /* vm_ops->page_mkwrite handler */
fb_deferred_io_mkwrite(struct vm_fault * vmf)212 static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
213 {
214 struct fb_info *info = vmf->vma->vm_private_data;
215
216 return fb_deferred_io_page_mkwrite(info, vmf);
217 }
218
219 static const struct vm_operations_struct fb_deferred_io_vm_ops = {
220 .fault = fb_deferred_io_fault,
221 .page_mkwrite = fb_deferred_io_mkwrite,
222 };
223
224 static const struct address_space_operations fb_deferred_io_aops = {
225 .dirty_folio = noop_dirty_folio,
226 };
227
fb_deferred_io_mmap(struct fb_info * info,struct vm_area_struct * vma)228 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
229 {
230 vma->vm_ops = &fb_deferred_io_vm_ops;
231 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
232 if (!(info->flags & FBINFO_VIRTFB))
233 vm_flags_set(vma, VM_IO);
234 vma->vm_private_data = info;
235 return 0;
236 }
237 EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
238
239 /* workqueue callback */
fb_deferred_io_work(struct work_struct * work)240 static void fb_deferred_io_work(struct work_struct *work)
241 {
242 struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
243 struct fb_deferred_io_pageref *pageref, *next;
244 struct fb_deferred_io *fbdefio = info->fbdefio;
245
246 /* here we mkclean the pages, then do all deferred IO */
247 mutex_lock(&fbdefio->lock);
248 list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
249 struct page *cur = pageref->page;
250 lock_page(cur);
251 page_mkclean(cur);
252 unlock_page(cur);
253 }
254
255 /* driver's callback with pagereflist */
256 fbdefio->deferred_io(info, &fbdefio->pagereflist);
257
258 /* clear the list */
259 list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list)
260 fb_deferred_io_pageref_put(pageref, info);
261
262 mutex_unlock(&fbdefio->lock);
263 }
264
fb_deferred_io_init(struct fb_info * info)265 int fb_deferred_io_init(struct fb_info *info)
266 {
267 struct fb_deferred_io *fbdefio = info->fbdefio;
268 struct fb_deferred_io_pageref *pagerefs;
269 unsigned long npagerefs, i;
270 int ret;
271
272 BUG_ON(!fbdefio);
273
274 if (WARN_ON(!info->fix.smem_len))
275 return -EINVAL;
276
277 mutex_init(&fbdefio->lock);
278 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
279 INIT_LIST_HEAD(&fbdefio->pagereflist);
280 if (fbdefio->delay == 0) /* set a default of 1 s */
281 fbdefio->delay = HZ;
282
283 npagerefs = DIV_ROUND_UP(info->fix.smem_len, PAGE_SIZE);
284
285 /* alloc a page ref for each page of the display memory */
286 pagerefs = kvcalloc(npagerefs, sizeof(*pagerefs), GFP_KERNEL);
287 if (!pagerefs) {
288 ret = -ENOMEM;
289 goto err;
290 }
291 for (i = 0; i < npagerefs; ++i)
292 INIT_LIST_HEAD(&pagerefs[i].list);
293 info->npagerefs = npagerefs;
294 info->pagerefs = pagerefs;
295
296 return 0;
297
298 err:
299 mutex_destroy(&fbdefio->lock);
300 return ret;
301 }
302 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
303
fb_deferred_io_open(struct fb_info * info,struct inode * inode,struct file * file)304 void fb_deferred_io_open(struct fb_info *info,
305 struct inode *inode,
306 struct file *file)
307 {
308 struct fb_deferred_io *fbdefio = info->fbdefio;
309
310 file->f_mapping->a_ops = &fb_deferred_io_aops;
311 fbdefio->open_count++;
312 }
313 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
314
fb_deferred_io_lastclose(struct fb_info * info)315 static void fb_deferred_io_lastclose(struct fb_info *info)
316 {
317 struct page *page;
318 int i;
319
320 flush_delayed_work(&info->deferred_work);
321
322 /* clear out the mapping that we setup */
323 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
324 page = fb_deferred_io_page(info, i);
325 page->mapping = NULL;
326 }
327 }
328
fb_deferred_io_release(struct fb_info * info)329 void fb_deferred_io_release(struct fb_info *info)
330 {
331 struct fb_deferred_io *fbdefio = info->fbdefio;
332
333 if (!--fbdefio->open_count)
334 fb_deferred_io_lastclose(info);
335 }
336 EXPORT_SYMBOL_GPL(fb_deferred_io_release);
337
fb_deferred_io_cleanup(struct fb_info * info)338 void fb_deferred_io_cleanup(struct fb_info *info)
339 {
340 struct fb_deferred_io *fbdefio = info->fbdefio;
341
342 fb_deferred_io_lastclose(info);
343
344 kvfree(info->pagerefs);
345 mutex_destroy(&fbdefio->lock);
346 }
347 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
348