• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: MIT
2 
3 #include <linux/fb.h>
4 #include <linux/vmalloc.h>
5 
6 #include <drm/drm_crtc_helper.h>
7 #include <drm/drm_drv.h>
8 #include <drm/drm_fb_dma_helper.h>
9 #include <drm/drm_fb_helper.h>
10 #include <drm/drm_framebuffer.h>
11 #include <drm/drm_gem_dma_helper.h>
12 
13 #include <drm/drm_fbdev_dma.h>
14 
15 /*
16  * struct fb_ops
17  */
18 
drm_fbdev_dma_fb_open(struct fb_info * info,int user)19 static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
20 {
21 	struct drm_fb_helper *fb_helper = info->par;
22 
23 	/* No need to take a ref for fbcon because it unbinds on unregister */
24 	if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
25 		return -ENODEV;
26 
27 	return 0;
28 }
29 
drm_fbdev_dma_fb_release(struct fb_info * info,int user)30 static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
31 {
32 	struct drm_fb_helper *fb_helper = info->par;
33 
34 	if (user)
35 		module_put(fb_helper->dev->driver->fops->owner);
36 
37 	return 0;
38 }
39 
drm_fbdev_dma_fb_mmap(struct fb_info * info,struct vm_area_struct * vma)40 static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
41 {
42 	struct drm_fb_helper *fb_helper = info->par;
43 
44 	return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
45 }
46 
drm_fbdev_dma_fb_destroy(struct fb_info * info)47 static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
48 {
49 	struct drm_fb_helper *fb_helper = info->par;
50 
51 	if (!fb_helper->dev)
52 		return;
53 
54 	if (info->fbdefio)
55 		fb_deferred_io_cleanup(info);
56 	drm_fb_helper_fini(fb_helper);
57 
58 	drm_client_buffer_vunmap(fb_helper->buffer);
59 	drm_client_framebuffer_delete(fb_helper->buffer);
60 	drm_client_release(&fb_helper->client);
61 	drm_fb_helper_unprepare(fb_helper);
62 	kfree(fb_helper);
63 }
64 
65 static const struct fb_ops drm_fbdev_dma_fb_ops = {
66 	.owner = THIS_MODULE,
67 	.fb_open = drm_fbdev_dma_fb_open,
68 	.fb_release = drm_fbdev_dma_fb_release,
69 	__FB_DEFAULT_DMAMEM_OPS_RDWR,
70 	DRM_FB_HELPER_DEFAULT_OPS,
71 	__FB_DEFAULT_DMAMEM_OPS_DRAW,
72 	.fb_mmap = drm_fbdev_dma_fb_mmap,
73 	.fb_destroy = drm_fbdev_dma_fb_destroy,
74 };
75 
76 FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
77 				   drm_fb_helper_damage_range,
78 				   drm_fb_helper_damage_area);
79 
drm_fbdev_dma_shadowed_fb_destroy(struct fb_info * info)80 static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
81 {
82 	struct drm_fb_helper *fb_helper = info->par;
83 	void *shadow = info->screen_buffer;
84 
85 	if (!fb_helper->dev)
86 		return;
87 
88 	if (info->fbdefio)
89 		fb_deferred_io_cleanup(info);
90 	drm_fb_helper_fini(fb_helper);
91 	vfree(shadow);
92 
93 	drm_client_buffer_vunmap(fb_helper->buffer);
94 	drm_client_framebuffer_delete(fb_helper->buffer);
95 	drm_client_release(&fb_helper->client);
96 	drm_fb_helper_unprepare(fb_helper);
97 	kfree(fb_helper);
98 }
99 
100 static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
101 	.owner = THIS_MODULE,
102 	.fb_open = drm_fbdev_dma_fb_open,
103 	.fb_release = drm_fbdev_dma_fb_release,
104 	FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
105 	DRM_FB_HELPER_DEFAULT_OPS,
106 	.fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
107 };
108 
109 /*
110  * struct drm_fb_helper
111  */
112 
drm_fbdev_dma_damage_blit_real(struct drm_fb_helper * fb_helper,struct drm_clip_rect * clip,struct iosys_map * dst)113 static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
114 					   struct drm_clip_rect *clip,
115 					   struct iosys_map *dst)
116 {
117 	struct drm_framebuffer *fb = fb_helper->fb;
118 	size_t offset = clip->y1 * fb->pitches[0];
119 	size_t len = clip->x2 - clip->x1;
120 	unsigned int y;
121 	void *src;
122 
123 	switch (drm_format_info_bpp(fb->format, 0)) {
124 	case 1:
125 		offset += clip->x1 / 8;
126 		len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
127 		break;
128 	case 2:
129 		offset += clip->x1 / 4;
130 		len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
131 		break;
132 	case 4:
133 		offset += clip->x1 / 2;
134 		len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
135 		break;
136 	default:
137 		offset += clip->x1 * fb->format->cpp[0];
138 		len *= fb->format->cpp[0];
139 		break;
140 	}
141 
142 	src = fb_helper->info->screen_buffer + offset;
143 	iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
144 
145 	for (y = clip->y1; y < clip->y2; y++) {
146 		iosys_map_memcpy_to(dst, 0, src, len);
147 		iosys_map_incr(dst, fb->pitches[0]);
148 		src += fb->pitches[0];
149 	}
150 }
151 
drm_fbdev_dma_damage_blit(struct drm_fb_helper * fb_helper,struct drm_clip_rect * clip)152 static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
153 				     struct drm_clip_rect *clip)
154 {
155 	struct drm_client_buffer *buffer = fb_helper->buffer;
156 	struct iosys_map dst;
157 
158 	/*
159 	 * For fbdev emulation, we only have to protect against fbdev modeset
160 	 * operations. Nothing else will involve the client buffer's BO. So it
161 	 * is sufficient to acquire struct drm_fb_helper.lock here.
162 	 */
163 	mutex_lock(&fb_helper->lock);
164 
165 	dst = buffer->map;
166 	drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
167 
168 	mutex_unlock(&fb_helper->lock);
169 
170 	return 0;
171 }
172 
drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper * fb_helper,struct drm_fb_helper_surface_size * sizes)173 static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
174 					 struct drm_fb_helper_surface_size *sizes)
175 {
176 	return drm_fbdev_dma_driver_fbdev_probe(fb_helper, sizes);
177 }
drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper * helper,struct drm_clip_rect * clip)178 static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
179 					 struct drm_clip_rect *clip)
180 {
181 	struct drm_device *dev = helper->dev;
182 	int ret;
183 
184 	/* Call damage handlers only if necessary */
185 	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
186 		return 0;
187 
188 	if (helper->fb->funcs->dirty) {
189 		ret = drm_fbdev_dma_damage_blit(helper, clip);
190 		if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
191 			return ret;
192 
193 		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
194 		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
195 			return ret;
196 	}
197 
198 	return 0;
199 }
200 
201 static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
202 	.fb_probe = drm_fbdev_dma_helper_fb_probe,
203 	.fb_dirty = drm_fbdev_dma_helper_fb_dirty,
204 };
205 
206 /*
207  * struct drm_fb_helper
208  */
209 
drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper * fb_helper,struct drm_fb_helper_surface_size * sizes)210 static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
211 						 struct drm_fb_helper_surface_size *sizes)
212 {
213 	struct drm_device *dev = fb_helper->dev;
214 	struct drm_client_buffer *buffer = fb_helper->buffer;
215 	struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
216 	struct drm_framebuffer *fb = fb_helper->fb;
217 	struct fb_info *info = fb_helper->info;
218 	struct iosys_map map = buffer->map;
219 
220 	info->fbops = &drm_fbdev_dma_fb_ops;
221 
222 	/* screen */
223 	info->flags |= FBINFO_VIRTFB; /* system memory */
224 	if (dma_obj->map_noncoherent)
225 		info->flags |= FBINFO_READS_FAST; /* signal caching */
226 	info->screen_size = sizes->surface_height * fb->pitches[0];
227 	info->screen_buffer = map.vaddr;
228 	if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
229 		if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
230 			info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
231 	}
232 	info->fix.smem_len = info->screen_size;
233 
234 	return 0;
235 }
236 
drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper * fb_helper,struct drm_fb_helper_surface_size * sizes)237 static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
238 							  struct drm_fb_helper_surface_size *sizes)
239 {
240 	struct drm_client_buffer *buffer = fb_helper->buffer;
241 	struct fb_info *info = fb_helper->info;
242 	size_t screen_size = buffer->gem->size;
243 	void *screen_buffer;
244 	int ret;
245 
246 	/*
247 	 * Deferred I/O requires struct page for framebuffer memory,
248 	 * which is not guaranteed for all DMA ranges. We thus create
249 	 * a shadow buffer in system memory.
250 	 */
251 	screen_buffer = vzalloc(screen_size);
252 	if (!screen_buffer)
253 		return -ENOMEM;
254 
255 	info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
256 
257 	/* screen */
258 	info->flags |= FBINFO_VIRTFB; /* system memory */
259 	info->flags |= FBINFO_READS_FAST; /* signal caching */
260 	info->screen_buffer = screen_buffer;
261 	info->fix.smem_len = screen_size;
262 
263 	fb_helper->fbdefio.delay = HZ / 20;
264 	fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
265 
266 	info->fbdefio = &fb_helper->fbdefio;
267 	ret = fb_deferred_io_init(info);
268 	if (ret)
269 		goto err_vfree;
270 
271 	return 0;
272 
273 err_vfree:
274 	vfree(screen_buffer);
275 	return ret;
276 }
277 
drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper * fb_helper,struct drm_fb_helper_surface_size * sizes)278 int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
279 				     struct drm_fb_helper_surface_size *sizes)
280 {
281 	struct drm_client_dev *client = &fb_helper->client;
282 	struct drm_device *dev = fb_helper->dev;
283 	struct drm_client_buffer *buffer;
284 	struct drm_framebuffer *fb;
285 	struct fb_info *info;
286 	u32 format;
287 	struct iosys_map map;
288 	int ret;
289 
290 	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
291 		    sizes->surface_width, sizes->surface_height,
292 		    sizes->surface_bpp);
293 
294 	format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
295 					     sizes->surface_depth);
296 	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
297 					       sizes->surface_height, format);
298 	if (IS_ERR(buffer))
299 		return PTR_ERR(buffer);
300 
301 	fb = buffer->fb;
302 
303 	ret = drm_client_buffer_vmap(buffer, &map);
304 	if (ret) {
305 		goto err_drm_client_buffer_delete;
306 	} else if (drm_WARN_ON(dev, map.is_iomem)) {
307 		ret = -ENODEV; /* I/O memory not supported; use generic emulation */
308 		goto err_drm_client_buffer_delete;
309 	}
310 
311 	fb_helper->funcs = &drm_fbdev_dma_helper_funcs;
312 	fb_helper->buffer = buffer;
313 	fb_helper->fb = fb;
314 
315 	info = drm_fb_helper_alloc_info(fb_helper);
316 	if (IS_ERR(info)) {
317 		ret = PTR_ERR(info);
318 		goto err_drm_client_buffer_vunmap;
319 	}
320 
321 	drm_fb_helper_fill_info(info, fb_helper, sizes);
322 
323 	if (fb->funcs->dirty)
324 		ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
325 	else
326 		ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
327 	if (ret)
328 		goto err_drm_fb_helper_release_info;
329 
330 	return 0;
331 
332 err_drm_fb_helper_release_info:
333 	drm_fb_helper_release_info(fb_helper);
334 err_drm_client_buffer_vunmap:
335 	fb_helper->fb = NULL;
336 	fb_helper->buffer = NULL;
337 	drm_client_buffer_vunmap(buffer);
338 err_drm_client_buffer_delete:
339 	drm_client_framebuffer_delete(buffer);
340 	return ret;
341 }
342 EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe);
343 
344 /*
345  * struct drm_client_funcs
346  */
347 
drm_fbdev_dma_client_unregister(struct drm_client_dev * client)348 static void drm_fbdev_dma_client_unregister(struct drm_client_dev *client)
349 {
350 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
351 
352 	if (fb_helper->info) {
353 		drm_fb_helper_unregister_info(fb_helper);
354 	} else {
355 		drm_client_release(&fb_helper->client);
356 		drm_fb_helper_unprepare(fb_helper);
357 		kfree(fb_helper);
358 	}
359 }
360 
drm_fbdev_dma_client_restore(struct drm_client_dev * client)361 static int drm_fbdev_dma_client_restore(struct drm_client_dev *client)
362 {
363 	drm_fb_helper_lastclose(client->dev);
364 
365 	return 0;
366 }
367 
drm_fbdev_dma_client_hotplug(struct drm_client_dev * client)368 static int drm_fbdev_dma_client_hotplug(struct drm_client_dev *client)
369 {
370 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
371 	struct drm_device *dev = client->dev;
372 	int ret;
373 
374 	if (dev->fb_helper)
375 		return drm_fb_helper_hotplug_event(dev->fb_helper);
376 
377 	ret = drm_fb_helper_init(dev, fb_helper);
378 	if (ret)
379 		goto err_drm_err;
380 
381 	if (!drm_drv_uses_atomic_modeset(dev))
382 		drm_helper_disable_unused_functions(dev);
383 
384 	ret = drm_fb_helper_initial_config(fb_helper);
385 	if (ret)
386 		goto err_drm_fb_helper_fini;
387 
388 	return 0;
389 
390 err_drm_fb_helper_fini:
391 	drm_fb_helper_fini(fb_helper);
392 err_drm_err:
393 	drm_err(dev, "fbdev-dma: Failed to setup generic emulation (ret=%d)\n", ret);
394 	return ret;
395 }
396 
397 static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
398 	.owner		= THIS_MODULE,
399 	.unregister	= drm_fbdev_dma_client_unregister,
400 	.restore	= drm_fbdev_dma_client_restore,
401 	.hotplug	= drm_fbdev_dma_client_hotplug,
402 };
403 
404 /**
405  * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
406  * @dev: DRM device
407  * @preferred_bpp: Preferred bits per pixel for the device.
408  *                 32 is used if this is zero.
409  *
410  * This function sets up fbdev emulation for GEM DMA drivers that support
411  * dumb buffers with a virtual address and that can be mmap'ed.
412  * drm_fbdev_dma_setup() shall be called after the DRM driver registered
413  * the new DRM device with drm_dev_register().
414  *
415  * Restore, hotplug events and teardown are all taken care of. Drivers that do
416  * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
417  * Simple drivers might use drm_mode_config_helper_suspend().
418  *
419  * This function is safe to call even when there are no connectors present.
420  * Setup will be retried on the next hotplug event.
421  *
422  * The fbdev is destroyed by drm_dev_unregister().
423  */
drm_fbdev_dma_setup(struct drm_device * dev,unsigned int preferred_bpp)424 void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
425 {
426 	struct drm_fb_helper *fb_helper;
427 	int ret;
428 
429 	drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
430 	drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
431 
432 	fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
433 	if (!fb_helper)
434 		return;
435 	drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_dma_helper_funcs);
436 
437 	ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_dma_client_funcs);
438 	if (ret) {
439 		drm_err(dev, "Failed to register client: %d\n", ret);
440 		goto err_drm_client_init;
441 	}
442 
443 	drm_client_register(&fb_helper->client);
444 
445 	return;
446 
447 err_drm_client_init:
448 	drm_fb_helper_unprepare(fb_helper);
449 	kfree(fb_helper);
450 }
451 EXPORT_SYMBOL(drm_fbdev_dma_setup);
452