1

drm/fbdev-dma: Implement damage handling and deferred I/O

Add support for damage handling and deferred I/O to fbdev-dma. This
enables fbdev-dma to support all DMA-memory-based DRM drivers, even
such with a dirty callback in their framebuffers.

The patch adds the code for deferred I/O and also sets a dedicated
helper for struct fb_ops.fb_mmap that support coherent mappings.

v3:
- init fb_ops with FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS() (Javier)

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240419083331.7761-22-tzimmermann@suse.de
This commit is contained in:
Thomas Zimmermann 2024-04-19 10:29:14 +02:00
parent d2b42634fd
commit 808a40b694

View File

@ -4,6 +4,7 @@
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h> #include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h> #include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_dma_helper.h>
@ -35,6 +36,22 @@ static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
return 0; return 0;
} }
FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
drm_fb_helper_damage_range,
drm_fb_helper_damage_area);
static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
if (!dma->map_noncoherent)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
return fb_deferred_io_mmap(info, vma);
}
static void drm_fbdev_dma_fb_destroy(struct fb_info *info) static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
{ {
struct drm_fb_helper *fb_helper = info->par; struct drm_fb_helper *fb_helper = info->par;
@ -51,20 +68,13 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
kfree(fb_helper); kfree(fb_helper);
} }
static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *fb_helper = info->par;
return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
}
static const struct fb_ops drm_fbdev_dma_fb_ops = { static const struct fb_ops drm_fbdev_dma_fb_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.fb_open = drm_fbdev_dma_fb_open, .fb_open = drm_fbdev_dma_fb_open,
.fb_release = drm_fbdev_dma_fb_release, .fb_release = drm_fbdev_dma_fb_release,
__FB_DEFAULT_DMAMEM_OPS_RDWR, __FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
DRM_FB_HELPER_DEFAULT_OPS, DRM_FB_HELPER_DEFAULT_OPS,
__FB_DEFAULT_DMAMEM_OPS_DRAW, __FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
.fb_mmap = drm_fbdev_dma_fb_mmap, .fb_mmap = drm_fbdev_dma_fb_mmap,
.fb_destroy = drm_fbdev_dma_fb_destroy, .fb_destroy = drm_fbdev_dma_fb_destroy,
}; };
@ -98,10 +108,6 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
dma_obj = to_drm_gem_dma_obj(buffer->gem); dma_obj = to_drm_gem_dma_obj(buffer->gem);
fb = buffer->fb; fb = buffer->fb;
if (drm_WARN_ON(dev, fb->funcs->dirty)) {
ret = -ENODEV; /* damage handling not supported; use generic emulation */
goto err_drm_client_buffer_delete;
}
ret = drm_client_buffer_vmap(buffer, &map); ret = drm_client_buffer_vmap(buffer, &map);
if (ret) { if (ret) {
@ -112,7 +118,7 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
} }
fb_helper->buffer = buffer; fb_helper->buffer = buffer;
fb_helper->fb = buffer->fb; fb_helper->fb = fb;
info = drm_fb_helper_alloc_info(fb_helper); info = drm_fb_helper_alloc_info(fb_helper);
if (IS_ERR(info)) { if (IS_ERR(info)) {
@ -133,8 +139,19 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer)); info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
info->fix.smem_len = info->screen_size; info->fix.smem_len = info->screen_size;
/* deferred I/O */
fb_helper->fbdefio.delay = HZ / 20;
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
info->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(info);
if (ret)
goto err_drm_fb_helper_release_info;
return 0; return 0;
err_drm_fb_helper_release_info:
drm_fb_helper_release_info(fb_helper);
err_drm_client_buffer_vunmap: err_drm_client_buffer_vunmap:
fb_helper->fb = NULL; fb_helper->fb = NULL;
fb_helper->buffer = NULL; fb_helper->buffer = NULL;
@ -144,8 +161,28 @@ err_drm_client_buffer_delete:
return ret; return ret;
} }
static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
struct drm_clip_rect *clip)
{
struct drm_device *dev = helper->dev;
int ret;
/* Call damage handlers only if necessary */
if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
return 0;
if (helper->fb->funcs->dirty) {
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
return ret;
}
return 0;
}
static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = { static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
.fb_probe = drm_fbdev_dma_helper_fb_probe, .fb_probe = drm_fbdev_dma_helper_fb_probe,
.fb_dirty = drm_fbdev_dma_helper_fb_dirty,
}; };
/* /*