1/* $NetBSD: virtgpu_plane.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $ */ 2 3/* 4 * Copyright (C) 2015 Red Hat, Inc. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining 8 * a copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sublicense, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial 17 * portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 22 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 23 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 24 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 25 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 */ 27 28#include <sys/cdefs.h> 29__KERNEL_RCSID(0, "$NetBSD: virtgpu_plane.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $"); 30 31#include <drm/drm_atomic_helper.h> 32#include <drm/drm_damage_helper.h> 33#include <drm/drm_fourcc.h> 34#include <drm/drm_plane_helper.h> 35 36#include "virtgpu_drv.h" 37 38static const uint32_t virtio_gpu_formats[] = { 39 DRM_FORMAT_HOST_XRGB8888, 40}; 41 42static const uint32_t virtio_gpu_cursor_formats[] = { 43 DRM_FORMAT_HOST_ARGB8888, 44}; 45 46uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc) 47{ 48 uint32_t format; 49 50 switch (drm_fourcc) { 51 case DRM_FORMAT_XRGB8888: 52 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; 53 break; 54 case DRM_FORMAT_ARGB8888: 55 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM; 56 break; 57 case DRM_FORMAT_BGRX8888: 58 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM; 59 break; 60 case DRM_FORMAT_BGRA8888: 61 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM; 62 break; 63 default: 64 /* 65 * This should not happen, we handle everything listed 66 * in virtio_gpu_formats[]. 67 */ 68 format = 0; 69 break; 70 } 71 WARN_ON(format == 0); 72 return format; 73} 74 75static void virtio_gpu_plane_destroy(struct drm_plane *plane) 76{ 77 drm_plane_cleanup(plane); 78 kfree(plane); 79} 80 81static const struct drm_plane_funcs virtio_gpu_plane_funcs = { 82 .update_plane = drm_atomic_helper_update_plane, 83 .disable_plane = drm_atomic_helper_disable_plane, 84 .destroy = virtio_gpu_plane_destroy, 85 .reset = drm_atomic_helper_plane_reset, 86 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 87 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 88}; 89 90static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, 91 struct drm_plane_state *state) 92{ 93 bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; 94 struct drm_crtc_state *crtc_state; 95 int ret; 96 97 if (!state->fb || WARN_ON(!state->crtc)) 98 return 0; 99 100 crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); 101 if (IS_ERR(crtc_state)) 102 return PTR_ERR(crtc_state); 103 104 ret = drm_atomic_helper_check_plane_state(state, crtc_state, 105 DRM_PLANE_HELPER_NO_SCALING, 106 DRM_PLANE_HELPER_NO_SCALING, 107 is_cursor, true); 108 return ret; 109} 110 111static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, 112 struct drm_plane_state *state, 113 struct drm_rect *rect) 114{ 115 struct virtio_gpu_object *bo = 116 gem_to_virtio_gpu_obj(state->fb->obj[0]); 117 struct virtio_gpu_object_array *objs; 118 uint32_t w = rect->x2 - rect->x1; 119 uint32_t h = rect->y2 - rect->y1; 120 uint32_t x = rect->x1; 121 uint32_t y = rect->y1; 122 uint32_t off = x * state->fb->format->cpp[0] + 123 y * state->fb->pitches[0]; 124 125 objs = virtio_gpu_array_alloc(1); 126 if (!objs) 127 return; 128 virtio_gpu_array_add_obj(objs, &bo->base.base); 129 130 virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y, 131 objs, NULL); 132} 133 134static void virtio_gpu_primary_plane_update(struct drm_plane *plane, 135 struct drm_plane_state *old_state) 136{ 137 struct drm_device *dev = plane->dev; 138 struct virtio_gpu_device *vgdev = dev->dev_private; 139 struct virtio_gpu_output *output = NULL; 140 struct virtio_gpu_object *bo; 141 struct drm_rect rect; 142 143 if (plane->state->crtc) 144 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 145 if (old_state->crtc) 146 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 147 if (WARN_ON(!output)) 148 return; 149 150 if (!plane->state->fb || !output->enabled) { 151 DRM_DEBUG("nofb\n"); 152 virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 153 plane->state->src_w >> 16, 154 plane->state->src_h >> 16, 155 0, 0); 156 return; 157 } 158 159 if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect)) 160 return; 161 162 virtio_gpu_disable_notify(vgdev); 163 164 bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); 165 if (bo->dumb) 166 virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect); 167 168 if (plane->state->fb != old_state->fb || 169 plane->state->src_w != old_state->src_w || 170 plane->state->src_h != old_state->src_h || 171 plane->state->src_x != old_state->src_x || 172 plane->state->src_y != old_state->src_y) { 173 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", 174 bo->hw_res_handle, 175 plane->state->crtc_w, plane->state->crtc_h, 176 plane->state->crtc_x, plane->state->crtc_y, 177 plane->state->src_w >> 16, 178 plane->state->src_h >> 16, 179 plane->state->src_x >> 16, 180 plane->state->src_y >> 16); 181 virtio_gpu_cmd_set_scanout(vgdev, output->index, 182 bo->hw_res_handle, 183 plane->state->src_w >> 16, 184 plane->state->src_h >> 16, 185 plane->state->src_x >> 16, 186 plane->state->src_y >> 16); 187 } 188 189 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, 190 rect.x1, 191 rect.y1, 192 rect.x2 - rect.x1, 193 rect.y2 - rect.y1); 194 195 virtio_gpu_enable_notify(vgdev); 196} 197 198static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane, 199 struct drm_plane_state *new_state) 200{ 201 struct drm_device *dev = plane->dev; 202 struct virtio_gpu_device *vgdev = dev->dev_private; 203 struct virtio_gpu_framebuffer *vgfb; 204 struct virtio_gpu_object *bo; 205 206 if (!new_state->fb) 207 return 0; 208 209 vgfb = to_virtio_gpu_framebuffer(new_state->fb); 210 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 211 if (bo && bo->dumb && (plane->state->fb != new_state->fb)) { 212 vgfb->fence = virtio_gpu_fence_alloc(vgdev); 213 if (!vgfb->fence) 214 return -ENOMEM; 215 } 216 217 return 0; 218} 219 220static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane, 221 struct drm_plane_state *old_state) 222{ 223 struct virtio_gpu_framebuffer *vgfb; 224 225 if (!plane->state->fb) 226 return; 227 228 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 229 if (vgfb->fence) { 230 dma_fence_put(&vgfb->fence->f); 231 vgfb->fence = NULL; 232 } 233} 234 235static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, 236 struct drm_plane_state *old_state) 237{ 238 struct drm_device *dev = plane->dev; 239 struct virtio_gpu_device *vgdev = dev->dev_private; 240 struct virtio_gpu_output *output = NULL; 241 struct virtio_gpu_framebuffer *vgfb; 242 struct virtio_gpu_object *bo = NULL; 243 uint32_t handle; 244 245 if (plane->state->crtc) 246 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 247 if (old_state->crtc) 248 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 249 if (WARN_ON(!output)) 250 return; 251 252 if (plane->state->fb) { 253 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 254 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 255 handle = bo->hw_res_handle; 256 } else { 257 handle = 0; 258 } 259 260 if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { 261 /* new cursor -- update & wait */ 262 struct virtio_gpu_object_array *objs; 263 264 objs = virtio_gpu_array_alloc(1); 265 if (!objs) 266 return; 267 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); 268 virtio_gpu_array_lock_resv(objs); 269 virtio_gpu_cmd_transfer_to_host_2d 270 (vgdev, 0, 271 plane->state->crtc_w, 272 plane->state->crtc_h, 273 0, 0, objs, vgfb->fence); 274 dma_fence_wait(&vgfb->fence->f, true); 275 dma_fence_put(&vgfb->fence->f); 276 vgfb->fence = NULL; 277 } 278 279 if (plane->state->fb != old_state->fb) { 280 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, 281 plane->state->crtc_x, 282 plane->state->crtc_y, 283 plane->state->fb ? plane->state->fb->hot_x : 0, 284 plane->state->fb ? plane->state->fb->hot_y : 0); 285 output->cursor.hdr.type = 286 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); 287 output->cursor.resource_id = cpu_to_le32(handle); 288 if (plane->state->fb) { 289 output->cursor.hot_x = 290 cpu_to_le32(plane->state->fb->hot_x); 291 output->cursor.hot_y = 292 cpu_to_le32(plane->state->fb->hot_y); 293 } else { 294 output->cursor.hot_x = cpu_to_le32(0); 295 output->cursor.hot_y = cpu_to_le32(0); 296 } 297 } else { 298 DRM_DEBUG("move +%d+%d\n", 299 plane->state->crtc_x, 300 plane->state->crtc_y); 301 output->cursor.hdr.type = 302 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); 303 } 304 output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x); 305 output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y); 306 virtio_gpu_cursor_ping(vgdev, output); 307} 308 309static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { 310 .atomic_check = virtio_gpu_plane_atomic_check, 311 .atomic_update = virtio_gpu_primary_plane_update, 312}; 313 314static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { 315 .prepare_fb = virtio_gpu_cursor_prepare_fb, 316 .cleanup_fb = virtio_gpu_cursor_cleanup_fb, 317 .atomic_check = virtio_gpu_plane_atomic_check, 318 .atomic_update = virtio_gpu_cursor_plane_update, 319}; 320 321struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 322 enum drm_plane_type type, 323 int index) 324{ 325 struct drm_device *dev = vgdev->ddev; 326 const struct drm_plane_helper_funcs *funcs; 327 struct drm_plane *plane; 328 const uint32_t *formats; 329 int ret, nformats; 330 331 plane = kzalloc(sizeof(*plane), GFP_KERNEL); 332 if (!plane) 333 return ERR_PTR(-ENOMEM); 334 335 if (type == DRM_PLANE_TYPE_CURSOR) { 336 formats = virtio_gpu_cursor_formats; 337 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats); 338 funcs = &virtio_gpu_cursor_helper_funcs; 339 } else { 340 formats = virtio_gpu_formats; 341 nformats = ARRAY_SIZE(virtio_gpu_formats); 342 funcs = &virtio_gpu_primary_helper_funcs; 343 } 344 ret = drm_universal_plane_init(dev, plane, 1 << index, 345 &virtio_gpu_plane_funcs, 346 formats, nformats, 347 NULL, type, NULL); 348 if (ret) 349 goto err_plane_init; 350 351 drm_plane_helper_add(plane, funcs); 352 return plane; 353 354err_plane_init: 355 kfree(plane); 356 return ERR_PTR(ret); 357} 358