1/* $NetBSD: vmwgfx_mob.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $ */ 2 3// SPDX-License-Identifier: GPL-2.0 OR MIT 4/************************************************************************** 5 * 6 * Copyright 2012-2015 VMware, Inc., Palo Alto, CA., USA 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 26 * USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 **************************************************************************/ 29 30#include <sys/cdefs.h> 31__KERNEL_RCSID(0, "$NetBSD: vmwgfx_mob.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $"); 32 33#include <linux/highmem.h> 34 35#include "vmwgfx_drv.h" 36 37/* 38 * If we set up the screen target otable, screen objects stop working. 39 */ 40 41#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1)) 42 43#ifdef CONFIG_64BIT 44#define VMW_PPN_SIZE 8 45#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0 46#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1 47#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2 48#else 49#define VMW_PPN_SIZE 4 50#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0 51#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1 52#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2 53#endif 54 55/* 56 * struct vmw_mob - Structure containing page table and metadata for a 57 * Guest Memory OBject. 58 * 59 * @num_pages Number of pages that make up the page table. 60 * @pt_level The indirection level of the page table. 0-2. 61 * @pt_root_page DMA address of the level 0 page of the page table. 62 */ 63struct vmw_mob { 64 struct ttm_buffer_object *pt_bo; 65 unsigned long num_pages; 66 unsigned pt_level; 67 dma_addr_t pt_root_page; 68 uint32_t id; 69}; 70 71/* 72 * struct vmw_otable - Guest Memory OBject table metadata 73 * 74 * @size: Size of the table (page-aligned). 75 * @page_table: Pointer to a struct vmw_mob holding the page table. 76 */ 77static const struct vmw_otable pre_dx_tables[] = { 78 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true}, 79 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true}, 80 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true}, 81 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true}, 82 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE, 83 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE} 84}; 85 86static const struct vmw_otable dx_tables[] = { 87 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true}, 88 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true}, 89 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true}, 90 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true}, 91 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE, 92 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}, 93 {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true}, 94}; 95 96static int vmw_mob_pt_populate(struct vmw_private *dev_priv, 97 struct vmw_mob *mob); 98static void vmw_mob_pt_setup(struct vmw_mob *mob, 99 struct vmw_piter data_iter, 100 unsigned long num_data_pages); 101 102/* 103 * vmw_setup_otable_base - Issue an object table base setup command to 104 * the device 105 * 106 * @dev_priv: Pointer to a device private structure 107 * @type: Type of object table base 108 * @offset Start of table offset into dev_priv::otable_bo 109 * @otable Pointer to otable metadata; 110 * 111 * This function returns -ENOMEM if it fails to reserve fifo space, 112 * and may block waiting for fifo space. 113 */ 114static int vmw_setup_otable_base(struct vmw_private *dev_priv, 115 SVGAOTableType type, 116 struct ttm_buffer_object *otable_bo, 117 unsigned long offset, 118 struct vmw_otable *otable) 119{ 120 struct { 121 SVGA3dCmdHeader header; 122 SVGA3dCmdSetOTableBase64 body; 123 } *cmd; 124 struct vmw_mob *mob; 125 const struct vmw_sg_table *vsgt; 126 struct vmw_piter iter; 127 int ret; 128 129 BUG_ON(otable->page_table != NULL); 130 131 vsgt = vmw_bo_sg_table(otable_bo); 132 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); 133 WARN_ON(!vmw_piter_next(&iter)); 134 135 mob = vmw_mob_create(otable->size >> PAGE_SHIFT); 136 if (unlikely(mob == NULL)) { 137 DRM_ERROR("Failed creating OTable page table.\n"); 138 return -ENOMEM; 139 } 140 141 if (otable->size <= PAGE_SIZE) { 142 mob->pt_level = VMW_MOBFMT_PTDEPTH_0; 143 mob->pt_root_page = vmw_piter_dma_addr(&iter); 144 } else if (vsgt->num_regions == 1) { 145 mob->pt_level = SVGA3D_MOBFMT_RANGE; 146 mob->pt_root_page = vmw_piter_dma_addr(&iter); 147 } else { 148 ret = vmw_mob_pt_populate(dev_priv, mob); 149 if (unlikely(ret != 0)) 150 goto out_no_populate; 151 152 vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT); 153 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; 154 } 155 156 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 157 if (unlikely(cmd == NULL)) { 158 ret = -ENOMEM; 159 goto out_no_fifo; 160 } 161 162 memset(cmd, 0, sizeof(*cmd)); 163 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64; 164 cmd->header.size = sizeof(cmd->body); 165 cmd->body.type = type; 166 cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT; 167 cmd->body.sizeInBytes = otable->size; 168 cmd->body.validSizeInBytes = 0; 169 cmd->body.ptDepth = mob->pt_level; 170 171 /* 172 * The device doesn't support this, But the otable size is 173 * determined at compile-time, so this BUG shouldn't trigger 174 * randomly. 175 */ 176 BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2); 177 178 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 179 otable->page_table = mob; 180 181 return 0; 182 183out_no_fifo: 184out_no_populate: 185 vmw_mob_destroy(mob); 186 return ret; 187} 188 189/* 190 * vmw_takedown_otable_base - Issue an object table base takedown command 191 * to the device 192 * 193 * @dev_priv: Pointer to a device private structure 194 * @type: Type of object table base 195 * 196 */ 197static void vmw_takedown_otable_base(struct vmw_private *dev_priv, 198 SVGAOTableType type, 199 struct vmw_otable *otable) 200{ 201 struct { 202 SVGA3dCmdHeader header; 203 SVGA3dCmdSetOTableBase body; 204 } *cmd; 205 struct ttm_buffer_object *bo; 206 207 if (otable->page_table == NULL) 208 return; 209 210 bo = otable->page_table->pt_bo; 211 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 212 if (unlikely(cmd == NULL)) 213 return; 214 215 memset(cmd, 0, sizeof(*cmd)); 216 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; 217 cmd->header.size = sizeof(cmd->body); 218 cmd->body.type = type; 219 cmd->body.baseAddress = 0; 220 cmd->body.sizeInBytes = 0; 221 cmd->body.validSizeInBytes = 0; 222 cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; 223 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 224 225 if (bo) { 226 int ret; 227 228 ret = ttm_bo_reserve(bo, false, true, NULL); 229 BUG_ON(ret != 0); 230 231 vmw_bo_fence_single(bo, NULL); 232 ttm_bo_unreserve(bo); 233 } 234 235 vmw_mob_destroy(otable->page_table); 236 otable->page_table = NULL; 237} 238 239 240static int vmw_otable_batch_setup(struct vmw_private *dev_priv, 241 struct vmw_otable_batch *batch) 242{ 243 unsigned long offset; 244 unsigned long bo_size; 245 struct vmw_otable *otables = batch->otables; 246 struct ttm_operation_ctx ctx = { 247 .interruptible = false, 248 .no_wait_gpu = false 249 }; 250 SVGAOTableType i; 251 int ret; 252 253 bo_size = 0; 254 for (i = 0; i < batch->num_otables; ++i) { 255 if (!otables[i].enabled) 256 continue; 257 258 otables[i].size = 259 (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; 260 bo_size += otables[i].size; 261 } 262 263 ret = ttm_bo_create(&dev_priv->bdev, bo_size, 264 ttm_bo_type_device, 265 &vmw_sys_ne_placement, 266 0, false, &batch->otable_bo); 267 268 if (unlikely(ret != 0)) 269 goto out_no_bo; 270 271 ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL); 272 BUG_ON(ret != 0); 273 ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx); 274 if (unlikely(ret != 0)) 275 goto out_unreserve; 276 ret = vmw_bo_map_dma(batch->otable_bo); 277 if (unlikely(ret != 0)) 278 goto out_unreserve; 279 280 ttm_bo_unreserve(batch->otable_bo); 281 282 offset = 0; 283 for (i = 0; i < batch->num_otables; ++i) { 284 if (!batch->otables[i].enabled) 285 continue; 286 287 ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo, 288 offset, 289 &otables[i]); 290 if (unlikely(ret != 0)) 291 goto out_no_setup; 292 offset += otables[i].size; 293 } 294 295 return 0; 296 297out_unreserve: 298 ttm_bo_unreserve(batch->otable_bo); 299out_no_setup: 300 for (i = 0; i < batch->num_otables; ++i) { 301 if (batch->otables[i].enabled) 302 vmw_takedown_otable_base(dev_priv, i, 303 &batch->otables[i]); 304 } 305 306 ttm_bo_put(batch->otable_bo); 307 batch->otable_bo = NULL; 308out_no_bo: 309 return ret; 310} 311 312/* 313 * vmw_otables_setup - Set up guest backed memory object tables 314 * 315 * @dev_priv: Pointer to a device private structure 316 * 317 * Takes care of the device guest backed surface 318 * initialization, by setting up the guest backed memory object tables. 319 * Returns 0 on success and various error codes on failure. A successful return 320 * means the object tables can be taken down using the vmw_otables_takedown 321 * function. 322 */ 323int vmw_otables_setup(struct vmw_private *dev_priv) 324{ 325 struct vmw_otable **otables = &dev_priv->otable_batch.otables; 326 int ret; 327 328 if (dev_priv->has_dx) { 329 *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL); 330 if (!(*otables)) 331 return -ENOMEM; 332 333 dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); 334 } else { 335 *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables), 336 GFP_KERNEL); 337 if (!(*otables)) 338 return -ENOMEM; 339 340 dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); 341 } 342 343 ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch); 344 if (unlikely(ret != 0)) 345 goto out_setup; 346 347 return 0; 348 349out_setup: 350 kfree(*otables); 351 return ret; 352} 353 354static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, 355 struct vmw_otable_batch *batch) 356{ 357 SVGAOTableType i; 358 struct ttm_buffer_object *bo = batch->otable_bo; 359 int ret; 360 361 for (i = 0; i < batch->num_otables; ++i) 362 if (batch->otables[i].enabled) 363 vmw_takedown_otable_base(dev_priv, i, 364 &batch->otables[i]); 365 366 ret = ttm_bo_reserve(bo, false, true, NULL); 367 BUG_ON(ret != 0); 368 369 vmw_bo_fence_single(bo, NULL); 370 ttm_bo_unreserve(bo); 371 372 ttm_bo_put(batch->otable_bo); 373 batch->otable_bo = NULL; 374} 375 376/* 377 * vmw_otables_takedown - Take down guest backed memory object tables 378 * 379 * @dev_priv: Pointer to a device private structure 380 * 381 * Take down the Guest Memory Object tables. 382 */ 383void vmw_otables_takedown(struct vmw_private *dev_priv) 384{ 385 vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch); 386 kfree(dev_priv->otable_batch.otables); 387} 388 389/* 390 * vmw_mob_calculate_pt_pages - Calculate the number of page table pages 391 * needed for a guest backed memory object. 392 * 393 * @data_pages: Number of data pages in the memory object buffer. 394 */ 395static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages) 396{ 397 unsigned long data_size = data_pages * PAGE_SIZE; 398 unsigned long tot_size = 0; 399 400 while (likely(data_size > PAGE_SIZE)) { 401 data_size = DIV_ROUND_UP(data_size, PAGE_SIZE); 402 data_size *= VMW_PPN_SIZE; 403 tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK; 404 } 405 406 return tot_size >> PAGE_SHIFT; 407} 408 409/* 410 * vmw_mob_create - Create a mob, but don't populate it. 411 * 412 * @data_pages: Number of data pages of the underlying buffer object. 413 */ 414struct vmw_mob *vmw_mob_create(unsigned long data_pages) 415{ 416 struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); 417 418 if (unlikely(!mob)) 419 return NULL; 420 421 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); 422 423 return mob; 424} 425 426/* 427 * vmw_mob_pt_populate - Populate the mob pagetable 428 * 429 * @mob: Pointer to the mob the pagetable of which we want to 430 * populate. 431 * 432 * This function allocates memory to be used for the pagetable, and 433 * adjusts TTM memory accounting accordingly. Returns ENOMEM if 434 * memory resources aren't sufficient and may cause TTM buffer objects 435 * to be swapped out by using the TTM memory accounting function. 436 */ 437static int vmw_mob_pt_populate(struct vmw_private *dev_priv, 438 struct vmw_mob *mob) 439{ 440 int ret; 441 struct ttm_operation_ctx ctx = { 442 .interruptible = false, 443 .no_wait_gpu = false 444 }; 445 446 BUG_ON(mob->pt_bo != NULL); 447 448 ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE, 449 ttm_bo_type_device, 450 &vmw_sys_ne_placement, 451 0, false, &mob->pt_bo); 452 if (unlikely(ret != 0)) 453 return ret; 454 455 ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL); 456 457 BUG_ON(ret != 0); 458 ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx); 459 if (unlikely(ret != 0)) 460 goto out_unreserve; 461 ret = vmw_bo_map_dma(mob->pt_bo); 462 if (unlikely(ret != 0)) 463 goto out_unreserve; 464 465 ttm_bo_unreserve(mob->pt_bo); 466 467 return 0; 468 469out_unreserve: 470 ttm_bo_unreserve(mob->pt_bo); 471 ttm_bo_put(mob->pt_bo); 472 mob->pt_bo = NULL; 473 474 return ret; 475} 476 477/** 478 * vmw_mob_assign_ppn - Assign a value to a page table entry 479 * 480 * @addr: Pointer to pointer to page table entry. 481 * @val: The page table entry 482 * 483 * Assigns a value to a page table entry pointed to by *@addr and increments 484 * *@addr according to the page table entry size. 485 */ 486#if (VMW_PPN_SIZE == 8) 487static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val) 488{ 489 *((u64 *) *addr) = val >> PAGE_SHIFT; 490 *addr += 2; 491} 492#else 493static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val) 494{ 495 *(*addr)++ = val >> PAGE_SHIFT; 496} 497#endif 498 499/* 500 * vmw_mob_build_pt - Build a pagetable 501 * 502 * @data_addr: Array of DMA addresses to the underlying buffer 503 * object's data pages. 504 * @num_data_pages: Number of buffer object data pages. 505 * @pt_pages: Array of page pointers to the page table pages. 506 * 507 * Returns the number of page table pages actually used. 508 * Uses atomic kmaps of highmem pages to avoid TLB thrashing. 509 */ 510static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter, 511 unsigned long num_data_pages, 512 struct vmw_piter *pt_iter) 513{ 514 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; 515 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); 516 unsigned long pt_page; 517 u32 *addr, *save_addr; 518 unsigned long i; 519 struct page *page; 520 521 for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) { 522 page = vmw_piter_page(pt_iter); 523 524 save_addr = addr = kmap_atomic(page); 525 526 for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) { 527 vmw_mob_assign_ppn(&addr, 528 vmw_piter_dma_addr(data_iter)); 529 if (unlikely(--num_data_pages == 0)) 530 break; 531 WARN_ON(!vmw_piter_next(data_iter)); 532 } 533 kunmap_atomic(save_addr); 534 vmw_piter_next(pt_iter); 535 } 536 537 return num_pt_pages; 538} 539 540/* 541 * vmw_mob_build_pt - Set up a multilevel mob pagetable 542 * 543 * @mob: Pointer to a mob whose page table needs setting up. 544 * @data_addr Array of DMA addresses to the buffer object's data 545 * pages. 546 * @num_data_pages: Number of buffer object data pages. 547 * 548 * Uses tail recursion to set up a multilevel mob page table. 549 */ 550static void vmw_mob_pt_setup(struct vmw_mob *mob, 551 struct vmw_piter data_iter, 552 unsigned long num_data_pages) 553{ 554 unsigned long num_pt_pages = 0; 555 struct ttm_buffer_object *bo = mob->pt_bo; 556 struct vmw_piter save_pt_iter; 557 struct vmw_piter pt_iter; 558 const struct vmw_sg_table *vsgt; 559 int ret; 560 561 ret = ttm_bo_reserve(bo, false, true, NULL); 562 BUG_ON(ret != 0); 563 564 vsgt = vmw_bo_sg_table(bo); 565 vmw_piter_start(&pt_iter, vsgt, 0); 566 BUG_ON(!vmw_piter_next(&pt_iter)); 567 mob->pt_level = 0; 568 while (likely(num_data_pages > 1)) { 569 ++mob->pt_level; 570 BUG_ON(mob->pt_level > 2); 571 save_pt_iter = pt_iter; 572 num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages, 573 &pt_iter); 574 data_iter = save_pt_iter; 575 num_data_pages = num_pt_pages; 576 } 577 578 mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter); 579 ttm_bo_unreserve(bo); 580} 581 582/* 583 * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary. 584 * 585 * @mob: Pointer to a mob to destroy. 586 */ 587void vmw_mob_destroy(struct vmw_mob *mob) 588{ 589 if (mob->pt_bo) { 590 ttm_bo_put(mob->pt_bo); 591 mob->pt_bo = NULL; 592 } 593 kfree(mob); 594} 595 596/* 597 * vmw_mob_unbind - Hide a mob from the device. 598 * 599 * @dev_priv: Pointer to a device private. 600 * @mob_id: Device id of the mob to unbind. 601 */ 602void vmw_mob_unbind(struct vmw_private *dev_priv, 603 struct vmw_mob *mob) 604{ 605 struct { 606 SVGA3dCmdHeader header; 607 SVGA3dCmdDestroyGBMob body; 608 } *cmd; 609 int ret; 610 struct ttm_buffer_object *bo = mob->pt_bo; 611 612 if (bo) { 613 ret = ttm_bo_reserve(bo, false, true, NULL); 614 /* 615 * Noone else should be using this buffer. 616 */ 617 BUG_ON(ret != 0); 618 } 619 620 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 621 if (cmd) { 622 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; 623 cmd->header.size = sizeof(cmd->body); 624 cmd->body.mobid = mob->id; 625 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 626 } 627 628 if (bo) { 629 vmw_bo_fence_single(bo, NULL); 630 ttm_bo_unreserve(bo); 631 } 632 vmw_fifo_resource_dec(dev_priv); 633} 634 635/* 636 * vmw_mob_bind - Make a mob visible to the device after first 637 * populating it if necessary. 638 * 639 * @dev_priv: Pointer to a device private. 640 * @mob: Pointer to the mob we're making visible. 641 * @data_addr: Array of DMA addresses to the data pages of the underlying 642 * buffer object. 643 * @num_data_pages: Number of data pages of the underlying buffer 644 * object. 645 * @mob_id: Device id of the mob to bind 646 * 647 * This function is intended to be interfaced with the ttm_tt backend 648 * code. 649 */ 650int vmw_mob_bind(struct vmw_private *dev_priv, 651 struct vmw_mob *mob, 652 const struct vmw_sg_table *vsgt, 653 unsigned long num_data_pages, 654 int32_t mob_id) 655{ 656 int ret; 657 bool pt_set_up = false; 658 struct vmw_piter data_iter; 659 struct { 660 SVGA3dCmdHeader header; 661 SVGA3dCmdDefineGBMob64 body; 662 } *cmd; 663 664 mob->id = mob_id; 665 vmw_piter_start(&data_iter, vsgt, 0); 666 if (unlikely(!vmw_piter_next(&data_iter))) 667 return 0; 668 669 if (likely(num_data_pages == 1)) { 670 mob->pt_level = VMW_MOBFMT_PTDEPTH_0; 671 mob->pt_root_page = vmw_piter_dma_addr(&data_iter); 672 } else if (vsgt->num_regions == 1) { 673 mob->pt_level = SVGA3D_MOBFMT_RANGE; 674 mob->pt_root_page = vmw_piter_dma_addr(&data_iter); 675 } else if (unlikely(mob->pt_bo == NULL)) { 676 ret = vmw_mob_pt_populate(dev_priv, mob); 677 if (unlikely(ret != 0)) 678 return ret; 679 680 vmw_mob_pt_setup(mob, data_iter, num_data_pages); 681 pt_set_up = true; 682 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; 683 } 684 685 vmw_fifo_resource_inc(dev_priv); 686 687 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 688 if (unlikely(cmd == NULL)) 689 goto out_no_cmd_space; 690 691 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64; 692 cmd->header.size = sizeof(cmd->body); 693 cmd->body.mobid = mob_id; 694 cmd->body.ptDepth = mob->pt_level; 695 cmd->body.base = mob->pt_root_page >> PAGE_SHIFT; 696 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; 697 698 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 699 700 return 0; 701 702out_no_cmd_space: 703 vmw_fifo_resource_dec(dev_priv); 704 if (pt_set_up) { 705 ttm_bo_put(mob->pt_bo); 706 mob->pt_bo = NULL; 707 } 708 709 return -ENOMEM; 710} 711