1235783Skib/*- 2235783Skib * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 3235783Skib * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 4235783Skib * All Rights Reserved. 5235783Skib * 6235783Skib * Permission is hereby granted, free of charge, to any person obtaining a 7235783Skib * copy of this software and associated documentation files (the "Software"), 8235783Skib * to deal in the Software without restriction, including without limitation 9235783Skib * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10235783Skib * and/or sell copies of the Software, and to permit persons to whom the 11235783Skib * Software is furnished to do so, subject to the following conditions: 12235783Skib * 13235783Skib * The above copyright notice and this permission notice (including the next 14235783Skib * paragraph) shall be included in all copies or substantial portions of the 15235783Skib * Software. 16235783Skib * 17235783Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18235783Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19235783Skib * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20235783Skib * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 21235783Skib * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22235783Skib * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 23235783Skib * OTHER DEALINGS IN THE SOFTWARE. 24235783Skib * 25235783Skib * Authors: 26235783Skib * Rickard E. (Rik) Faith <faith@valinux.com> 27235783Skib * Gareth Hughes <gareth@valinux.com> 28235783Skib * 29235783Skib */ 30235783Skib 31235783Skib#include <sys/cdefs.h> 32235783Skib__FBSDID("$FreeBSD$"); 33235783Skib 34235783Skib/** @file drm_bufs.c 35235783Skib * Implementation of the ioctls for setup of DRM mappings and DMA buffers. 36235783Skib */ 37235783Skib 38235783Skib#include <dev/pci/pcireg.h> 39235783Skib 40235783Skib#include <dev/drm2/drmP.h> 41235783Skib 42235783Skib/* Allocation of PCI memory resources (framebuffer, registers, etc.) for 43235783Skib * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual 44235783Skib * address for accessing them. Cleaned up at unload. 45235783Skib */ 46235783Skibstatic int drm_alloc_resource(struct drm_device *dev, int resource) 47235783Skib{ 48235783Skib struct resource *res; 49235783Skib int rid; 50235783Skib 51235783Skib DRM_LOCK_ASSERT(dev); 52235783Skib 53235783Skib if (resource >= DRM_MAX_PCI_RESOURCE) { 54235783Skib DRM_ERROR("Resource %d too large\n", resource); 55235783Skib return 1; 56235783Skib } 57235783Skib 58235783Skib if (dev->pcir[resource] != NULL) { 59235783Skib return 0; 60235783Skib } 61235783Skib 62235783Skib DRM_UNLOCK(dev); 63235783Skib rid = PCIR_BAR(resource); 64235783Skib res = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &rid, 65235783Skib RF_SHAREABLE); 66235783Skib DRM_LOCK(dev); 67235783Skib if (res == NULL) { 68235783Skib DRM_ERROR("Couldn't find resource 0x%x\n", resource); 69235783Skib return 1; 70235783Skib } 71235783Skib 72235783Skib if (dev->pcir[resource] == NULL) { 73235783Skib dev->pcirid[resource] = rid; 74235783Skib dev->pcir[resource] = res; 75235783Skib } 76235783Skib 77235783Skib return 0; 78235783Skib} 79235783Skib 80235783Skibunsigned long drm_get_resource_start(struct drm_device *dev, 81235783Skib unsigned int resource) 82235783Skib{ 83235783Skib if (drm_alloc_resource(dev, resource) != 0) 84235783Skib return 0; 85235783Skib 86235783Skib return rman_get_start(dev->pcir[resource]); 87235783Skib} 88235783Skib 89235783Skibunsigned long drm_get_resource_len(struct drm_device *dev, 90235783Skib unsigned int resource) 91235783Skib{ 92235783Skib if (drm_alloc_resource(dev, resource) != 0) 93235783Skib return 0; 94235783Skib 95235783Skib return rman_get_size(dev->pcir[resource]); 96235783Skib} 97235783Skib 98235783Skibint drm_addmap(struct drm_device * dev, unsigned long offset, 99235783Skib unsigned long size, 100235783Skib enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr) 101235783Skib{ 102235783Skib drm_local_map_t *map; 103235783Skib int align; 104235783Skib /*drm_agp_mem_t *entry; 105235783Skib int valid;*/ 106235783Skib 107235783Skib /* Only allow shared memory to be removable since we only keep enough 108235783Skib * book keeping information about shared memory to allow for removal 109235783Skib * when processes fork. 110235783Skib */ 111235783Skib if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) { 112235783Skib DRM_ERROR("Requested removable map for non-DRM_SHM\n"); 113235783Skib return EINVAL; 114235783Skib } 115235783Skib if ((offset & PAGE_MASK) || (size & PAGE_MASK)) { 116235783Skib DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n", 117235783Skib offset, size); 118235783Skib return EINVAL; 119235783Skib } 120235783Skib if (offset + size < offset) { 121235783Skib DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n", 122235783Skib offset, size); 123235783Skib return EINVAL; 124235783Skib } 125235783Skib 126235783Skib DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset, 127235783Skib size, type); 128235783Skib 129235783Skib /* Check if this is just another version of a kernel-allocated map, and 130235783Skib * just hand that back if so. 131235783Skib */ 132235783Skib if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER || 133235783Skib type == _DRM_SHM) { 134235783Skib TAILQ_FOREACH(map, &dev->maplist, link) { 135235783Skib if (map->type == type && (map->offset == offset || 136235783Skib (map->type == _DRM_SHM && 137235783Skib map->flags == _DRM_CONTAINS_LOCK))) { 138235783Skib map->size = size; 139235783Skib DRM_DEBUG("Found kernel map %d\n", type); 140235783Skib goto done; 141235783Skib } 142235783Skib } 143235783Skib } 144235783Skib DRM_UNLOCK(dev); 145235783Skib 146235783Skib /* Allocate a new map structure, fill it in, and do any type-specific 147235783Skib * initialization necessary. 148235783Skib */ 149235783Skib map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT); 150235783Skib if (!map) { 151235783Skib DRM_LOCK(dev); 152235783Skib return ENOMEM; 153235783Skib } 154235783Skib 155235783Skib map->offset = offset; 156235783Skib map->size = size; 157235783Skib map->type = type; 158235783Skib map->flags = flags; 159235783Skib map->handle = (void *)((unsigned long)alloc_unr(dev->map_unrhdr) << 160235783Skib DRM_MAP_HANDLE_SHIFT); 161235783Skib 162235783Skib switch (map->type) { 163235783Skib case _DRM_REGISTERS: 164235783Skib map->virtual = drm_ioremap(dev, map); 165235783Skib if (!(map->flags & _DRM_WRITE_COMBINING)) 166235783Skib break; 167235783Skib /* FALLTHROUGH */ 168235783Skib case _DRM_FRAME_BUFFER: 169235783Skib if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0) 170235783Skib map->mtrr = 1; 171235783Skib break; 172235783Skib case _DRM_SHM: 173235783Skib map->virtual = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT); 174235783Skib DRM_DEBUG("%lu %d %p\n", 175235783Skib map->size, drm_order(map->size), map->virtual); 176235783Skib if (!map->virtual) { 177235783Skib free(map, DRM_MEM_MAPS); 178235783Skib DRM_LOCK(dev); 179235783Skib return ENOMEM; 180235783Skib } 181235783Skib map->offset = (unsigned long)map->virtual; 182235783Skib if (map->flags & _DRM_CONTAINS_LOCK) { 183235783Skib /* Prevent a 2nd X Server from creating a 2nd lock */ 184235783Skib DRM_LOCK(dev); 185235783Skib if (dev->lock.hw_lock != NULL) { 186235783Skib DRM_UNLOCK(dev); 187235783Skib free(map->virtual, DRM_MEM_MAPS); 188235783Skib free(map, DRM_MEM_MAPS); 189235783Skib return EBUSY; 190235783Skib } 191235783Skib dev->lock.hw_lock = map->virtual; /* Pointer to lock */ 192235783Skib DRM_UNLOCK(dev); 193235783Skib } 194235783Skib break; 195235783Skib case _DRM_AGP: 196235783Skib /*valid = 0;*/ 197235783Skib /* In some cases (i810 driver), user space may have already 198235783Skib * added the AGP base itself, because dev->agp->base previously 199235783Skib * only got set during AGP enable. So, only add the base 200235783Skib * address if the map's offset isn't already within the 201235783Skib * aperture. 202235783Skib */ 203235783Skib if (map->offset < dev->agp->base || 204235783Skib map->offset > dev->agp->base + 205235783Skib dev->agp->info.ai_aperture_size - 1) { 206235783Skib map->offset += dev->agp->base; 207235783Skib } 208235783Skib map->mtrr = dev->agp->mtrr; /* for getmap */ 209235783Skib /*for (entry = dev->agp->memory; entry; entry = entry->next) { 210235783Skib if ((map->offset >= entry->bound) && 211235783Skib (map->offset + map->size <= 212235783Skib entry->bound + entry->pages * PAGE_SIZE)) { 213235783Skib valid = 1; 214235783Skib break; 215235783Skib } 216235783Skib } 217235783Skib if (!valid) { 218235783Skib free(map, DRM_MEM_MAPS); 219235783Skib DRM_LOCK(dev); 220235783Skib return EACCES; 221235783Skib }*/ 222235783Skib break; 223235783Skib case _DRM_SCATTER_GATHER: 224235783Skib if (!dev->sg) { 225235783Skib free(map, DRM_MEM_MAPS); 226235783Skib DRM_LOCK(dev); 227235783Skib return EINVAL; 228235783Skib } 229235783Skib map->virtual = (void *)(dev->sg->vaddr + offset); 230235783Skib map->offset = dev->sg->vaddr + offset; 231235783Skib break; 232235783Skib case _DRM_CONSISTENT: 233235783Skib /* Unfortunately, we don't get any alignment specification from 234235783Skib * the caller, so we have to guess. drm_pci_alloc requires 235235783Skib * a power-of-two alignment, so try to align the bus address of 236235783Skib * the map to it size if possible, otherwise just assume 237235783Skib * PAGE_SIZE alignment. 238235783Skib */ 239235783Skib align = map->size; 240235783Skib if ((align & (align - 1)) != 0) 241235783Skib align = PAGE_SIZE; 242235783Skib map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful); 243235783Skib if (map->dmah == NULL) { 244235783Skib free(map, DRM_MEM_MAPS); 245235783Skib DRM_LOCK(dev); 246235783Skib return ENOMEM; 247235783Skib } 248235783Skib map->virtual = map->dmah->vaddr; 249235783Skib map->offset = map->dmah->busaddr; 250235783Skib break; 251235783Skib default: 252235783Skib DRM_ERROR("Bad map type %d\n", map->type); 253235783Skib free(map, DRM_MEM_MAPS); 254235783Skib DRM_LOCK(dev); 255235783Skib return EINVAL; 256235783Skib } 257235783Skib 258235783Skib DRM_LOCK(dev); 259235783Skib TAILQ_INSERT_TAIL(&dev->maplist, map, link); 260235783Skib 261235783Skibdone: 262235783Skib /* Jumped to, with lock held, when a kernel map is found. */ 263235783Skib 264235783Skib DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset, 265235783Skib map->size); 266235783Skib 267235783Skib *map_ptr = map; 268235783Skib 269235783Skib return 0; 270235783Skib} 271235783Skib 272235783Skibint drm_addmap_ioctl(struct drm_device *dev, void *data, 273235783Skib struct drm_file *file_priv) 274235783Skib{ 275235783Skib struct drm_map *request = data; 276235783Skib drm_local_map_t *map; 277235783Skib int err; 278235783Skib 279235783Skib if (!(dev->flags & (FREAD|FWRITE))) 280235783Skib return EACCES; /* Require read/write */ 281235783Skib 282235783Skib if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP) 283235783Skib return EACCES; 284235783Skib 285235783Skib DRM_LOCK(dev); 286235783Skib err = drm_addmap(dev, request->offset, request->size, request->type, 287235783Skib request->flags, &map); 288235783Skib DRM_UNLOCK(dev); 289235783Skib if (err != 0) 290235783Skib return err; 291235783Skib 292235783Skib request->offset = map->offset; 293235783Skib request->size = map->size; 294235783Skib request->type = map->type; 295235783Skib request->flags = map->flags; 296235783Skib request->mtrr = map->mtrr; 297235783Skib request->handle = (void *)map->handle; 298235783Skib 299235783Skib return 0; 300235783Skib} 301235783Skib 302235783Skibvoid drm_rmmap(struct drm_device *dev, drm_local_map_t *map) 303235783Skib{ 304235783Skib DRM_LOCK_ASSERT(dev); 305235783Skib 306235783Skib if (map == NULL) 307235783Skib return; 308235783Skib 309235783Skib TAILQ_REMOVE(&dev->maplist, map, link); 310235783Skib 311235783Skib switch (map->type) { 312235783Skib case _DRM_REGISTERS: 313235783Skib if (map->bsr == NULL) 314235783Skib drm_ioremapfree(map); 315235783Skib /* FALLTHROUGH */ 316235783Skib case _DRM_FRAME_BUFFER: 317235783Skib if (map->mtrr) { 318235783Skib int __unused retcode; 319235783Skib 320235783Skib retcode = drm_mtrr_del(0, map->offset, map->size, 321235783Skib DRM_MTRR_WC); 322235783Skib DRM_DEBUG("mtrr_del = %d\n", retcode); 323235783Skib } 324235783Skib break; 325235783Skib case _DRM_SHM: 326235783Skib free(map->virtual, DRM_MEM_MAPS); 327235783Skib break; 328235783Skib case _DRM_AGP: 329235783Skib case _DRM_SCATTER_GATHER: 330235783Skib break; 331235783Skib case _DRM_CONSISTENT: 332235783Skib drm_pci_free(dev, map->dmah); 333235783Skib break; 334235783Skib default: 335235783Skib DRM_ERROR("Bad map type %d\n", map->type); 336235783Skib break; 337235783Skib } 338235783Skib 339235783Skib if (map->bsr != NULL) { 340235783Skib bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid, 341235783Skib map->bsr); 342235783Skib } 343235783Skib 344235783Skib DRM_UNLOCK(dev); 345235783Skib if (map->handle) 346235783Skib free_unr(dev->map_unrhdr, (unsigned long)map->handle >> 347235783Skib DRM_MAP_HANDLE_SHIFT); 348235783Skib DRM_LOCK(dev); 349235783Skib 350235783Skib free(map, DRM_MEM_MAPS); 351235783Skib} 352235783Skib 353235783Skib/* Remove a map private from list and deallocate resources if the mapping 354235783Skib * isn't in use. 355235783Skib */ 356235783Skib 357235783Skibint drm_rmmap_ioctl(struct drm_device *dev, void *data, 358235783Skib struct drm_file *file_priv) 359235783Skib{ 360235783Skib drm_local_map_t *map; 361235783Skib struct drm_map *request = data; 362235783Skib 363235783Skib DRM_LOCK(dev); 364235783Skib TAILQ_FOREACH(map, &dev->maplist, link) { 365235783Skib if (map->handle == request->handle && 366235783Skib map->flags & _DRM_REMOVABLE) 367235783Skib break; 368235783Skib } 369235783Skib 370235783Skib /* No match found. */ 371235783Skib if (map == NULL) { 372235783Skib DRM_UNLOCK(dev); 373235783Skib return EINVAL; 374235783Skib } 375235783Skib 376235783Skib drm_rmmap(dev, map); 377235783Skib 378235783Skib DRM_UNLOCK(dev); 379235783Skib 380235783Skib return 0; 381235783Skib} 382235783Skib 383235783Skib 384235783Skibstatic void drm_cleanup_buf_error(struct drm_device *dev, 385235783Skib drm_buf_entry_t *entry) 386235783Skib{ 387235783Skib int i; 388235783Skib 389235783Skib if (entry->seg_count) { 390235783Skib for (i = 0; i < entry->seg_count; i++) { 391235783Skib drm_pci_free(dev, entry->seglist[i]); 392235783Skib } 393235783Skib free(entry->seglist, DRM_MEM_SEGS); 394235783Skib 395235783Skib entry->seg_count = 0; 396235783Skib } 397235783Skib 398235783Skib if (entry->buf_count) { 399235783Skib for (i = 0; i < entry->buf_count; i++) { 400235783Skib free(entry->buflist[i].dev_private, DRM_MEM_BUFS); 401235783Skib } 402235783Skib free(entry->buflist, DRM_MEM_BUFS); 403235783Skib 404235783Skib entry->buf_count = 0; 405235783Skib } 406235783Skib} 407235783Skib 408235783Skibstatic int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request) 409235783Skib{ 410235783Skib drm_device_dma_t *dma = dev->dma; 411235783Skib drm_buf_entry_t *entry; 412235783Skib /*drm_agp_mem_t *agp_entry; 413235783Skib int valid*/ 414235783Skib drm_buf_t *buf; 415235783Skib unsigned long offset; 416235783Skib unsigned long agp_offset; 417235783Skib int count; 418235783Skib int order; 419235783Skib int size; 420235783Skib int alignment; 421235783Skib int page_order; 422235783Skib int total; 423235783Skib int byte_count; 424235783Skib int i; 425235783Skib drm_buf_t **temp_buflist; 426235783Skib 427235783Skib count = request->count; 428235783Skib order = drm_order(request->size); 429235783Skib size = 1 << order; 430235783Skib 431235783Skib alignment = (request->flags & _DRM_PAGE_ALIGN) 432235783Skib ? round_page(size) : size; 433235783Skib page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 434235783Skib total = PAGE_SIZE << page_order; 435235783Skib 436235783Skib byte_count = 0; 437235783Skib agp_offset = dev->agp->base + request->agp_start; 438235783Skib 439235783Skib DRM_DEBUG("count: %d\n", count); 440235783Skib DRM_DEBUG("order: %d\n", order); 441235783Skib DRM_DEBUG("size: %d\n", size); 442235783Skib DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset); 443235783Skib DRM_DEBUG("alignment: %d\n", alignment); 444235783Skib DRM_DEBUG("page_order: %d\n", page_order); 445235783Skib DRM_DEBUG("total: %d\n", total); 446235783Skib 447235783Skib /* Make sure buffers are located in AGP memory that we own */ 448235783Skib /* Breaks MGA due to drm_alloc_agp not setting up entries for the 449235783Skib * memory. Safe to ignore for now because these ioctls are still 450235783Skib * root-only. 451235783Skib */ 452235783Skib /*valid = 0; 453235783Skib for (agp_entry = dev->agp->memory; agp_entry; 454235783Skib agp_entry = agp_entry->next) { 455235783Skib if ((agp_offset >= agp_entry->bound) && 456235783Skib (agp_offset + total * count <= 457235783Skib agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { 458235783Skib valid = 1; 459235783Skib break; 460235783Skib } 461235783Skib } 462235783Skib if (!valid) { 463235783Skib DRM_DEBUG("zone invalid\n"); 464235783Skib return EINVAL; 465235783Skib }*/ 466235783Skib 467235783Skib entry = &dma->bufs[order]; 468235783Skib 469235783Skib entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS, 470235783Skib M_NOWAIT | M_ZERO); 471235783Skib if (!entry->buflist) { 472235783Skib return ENOMEM; 473235783Skib } 474235783Skib 475235783Skib entry->buf_size = size; 476235783Skib entry->page_order = page_order; 477235783Skib 478235783Skib offset = 0; 479235783Skib 480235783Skib while (entry->buf_count < count) { 481235783Skib buf = &entry->buflist[entry->buf_count]; 482235783Skib buf->idx = dma->buf_count + entry->buf_count; 483235783Skib buf->total = alignment; 484235783Skib buf->order = order; 485235783Skib buf->used = 0; 486235783Skib 487235783Skib buf->offset = (dma->byte_count + offset); 488235783Skib buf->bus_address = agp_offset + offset; 489235783Skib buf->address = (void *)(agp_offset + offset); 490235783Skib buf->next = NULL; 491235783Skib buf->pending = 0; 492235783Skib buf->file_priv = NULL; 493235783Skib 494235783Skib buf->dev_priv_size = dev->driver->buf_priv_size; 495235783Skib buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS, 496235783Skib M_NOWAIT | M_ZERO); 497235783Skib if (buf->dev_private == NULL) { 498235783Skib /* Set count correctly so we free the proper amount. */ 499235783Skib entry->buf_count = count; 500235783Skib drm_cleanup_buf_error(dev, entry); 501235783Skib return ENOMEM; 502235783Skib } 503235783Skib 504235783Skib offset += alignment; 505235783Skib entry->buf_count++; 506235783Skib byte_count += PAGE_SIZE << page_order; 507235783Skib } 508235783Skib 509235783Skib DRM_DEBUG("byte_count: %d\n", byte_count); 510235783Skib 511235783Skib temp_buflist = realloc(dma->buflist, 512235783Skib (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), 513235783Skib DRM_MEM_BUFS, M_NOWAIT); 514235783Skib if (temp_buflist == NULL) { 515235783Skib /* Free the entry because it isn't valid */ 516235783Skib drm_cleanup_buf_error(dev, entry); 517235783Skib return ENOMEM; 518235783Skib } 519235783Skib dma->buflist = temp_buflist; 520235783Skib 521235783Skib for (i = 0; i < entry->buf_count; i++) { 522235783Skib dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 523235783Skib } 524235783Skib 525235783Skib dma->buf_count += entry->buf_count; 526235783Skib dma->byte_count += byte_count; 527235783Skib 528235783Skib DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 529235783Skib DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 530235783Skib 531235783Skib request->count = entry->buf_count; 532235783Skib request->size = size; 533235783Skib 534235783Skib dma->flags = _DRM_DMA_USE_AGP; 535235783Skib 536235783Skib return 0; 537235783Skib} 538235783Skib 539235783Skibstatic int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request) 540235783Skib{ 541235783Skib drm_device_dma_t *dma = dev->dma; 542235783Skib int count; 543235783Skib int order; 544235783Skib int size; 545235783Skib int total; 546235783Skib int page_order; 547235783Skib drm_buf_entry_t *entry; 548235783Skib drm_buf_t *buf; 549235783Skib int alignment; 550235783Skib unsigned long offset; 551235783Skib int i; 552235783Skib int byte_count; 553235783Skib int page_count; 554235783Skib unsigned long *temp_pagelist; 555235783Skib drm_buf_t **temp_buflist; 556235783Skib 557235783Skib count = request->count; 558235783Skib order = drm_order(request->size); 559235783Skib size = 1 << order; 560235783Skib 561235783Skib DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", 562235783Skib request->count, request->size, size, order); 563235783Skib 564235783Skib alignment = (request->flags & _DRM_PAGE_ALIGN) 565235783Skib ? round_page(size) : size; 566235783Skib page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 567235783Skib total = PAGE_SIZE << page_order; 568235783Skib 569235783Skib entry = &dma->bufs[order]; 570235783Skib 571235783Skib entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS, 572235783Skib M_NOWAIT | M_ZERO); 573235783Skib entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS, 574235783Skib M_NOWAIT | M_ZERO); 575235783Skib 576235783Skib /* Keep the original pagelist until we know all the allocations 577235783Skib * have succeeded 578235783Skib */ 579235783Skib temp_pagelist = malloc((dma->page_count + (count << page_order)) * 580235783Skib sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT); 581235783Skib 582235783Skib if (entry->buflist == NULL || entry->seglist == NULL || 583235783Skib temp_pagelist == NULL) { 584235783Skib free(temp_pagelist, DRM_MEM_PAGES); 585235783Skib free(entry->seglist, DRM_MEM_SEGS); 586235783Skib free(entry->buflist, DRM_MEM_BUFS); 587235783Skib return ENOMEM; 588235783Skib } 589235783Skib 590235783Skib memcpy(temp_pagelist, dma->pagelist, dma->page_count * 591235783Skib sizeof(*dma->pagelist)); 592235783Skib 593235783Skib DRM_DEBUG("pagelist: %d entries\n", 594235783Skib dma->page_count + (count << page_order)); 595235783Skib 596235783Skib entry->buf_size = size; 597235783Skib entry->page_order = page_order; 598235783Skib byte_count = 0; 599235783Skib page_count = 0; 600235783Skib 601235783Skib while (entry->buf_count < count) { 602235783Skib DRM_SPINUNLOCK(&dev->dma_lock); 603235783Skib drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment, 604235783Skib 0xfffffffful); 605235783Skib DRM_SPINLOCK(&dev->dma_lock); 606235783Skib if (dmah == NULL) { 607235783Skib /* Set count correctly so we free the proper amount. */ 608235783Skib entry->buf_count = count; 609235783Skib entry->seg_count = count; 610235783Skib drm_cleanup_buf_error(dev, entry); 611235783Skib free(temp_pagelist, DRM_MEM_PAGES); 612235783Skib return ENOMEM; 613235783Skib } 614235783Skib 615235783Skib entry->seglist[entry->seg_count++] = dmah; 616235783Skib for (i = 0; i < (1 << page_order); i++) { 617235783Skib DRM_DEBUG("page %d @ %p\n", 618235783Skib dma->page_count + page_count, 619235783Skib (char *)dmah->vaddr + PAGE_SIZE * i); 620235783Skib temp_pagelist[dma->page_count + page_count++] = 621235783Skib (long)dmah->vaddr + PAGE_SIZE * i; 622235783Skib } 623235783Skib for (offset = 0; 624235783Skib offset + size <= total && entry->buf_count < count; 625235783Skib offset += alignment, ++entry->buf_count) { 626235783Skib buf = &entry->buflist[entry->buf_count]; 627235783Skib buf->idx = dma->buf_count + entry->buf_count; 628235783Skib buf->total = alignment; 629235783Skib buf->order = order; 630235783Skib buf->used = 0; 631235783Skib buf->offset = (dma->byte_count + byte_count + offset); 632235783Skib buf->address = ((char *)dmah->vaddr + offset); 633235783Skib buf->bus_address = dmah->busaddr + offset; 634235783Skib buf->next = NULL; 635235783Skib buf->pending = 0; 636235783Skib buf->file_priv = NULL; 637235783Skib 638235783Skib buf->dev_priv_size = dev->driver->buf_priv_size; 639235783Skib buf->dev_private = malloc(buf->dev_priv_size, 640235783Skib DRM_MEM_BUFS, M_NOWAIT | M_ZERO); 641235783Skib if (buf->dev_private == NULL) { 642235783Skib /* Set count correctly so we free the proper amount. */ 643235783Skib entry->buf_count = count; 644235783Skib entry->seg_count = count; 645235783Skib drm_cleanup_buf_error(dev, entry); 646235783Skib free(temp_pagelist, DRM_MEM_PAGES); 647235783Skib return ENOMEM; 648235783Skib } 649235783Skib 650235783Skib DRM_DEBUG("buffer %d @ %p\n", 651235783Skib entry->buf_count, buf->address); 652235783Skib } 653235783Skib byte_count += PAGE_SIZE << page_order; 654235783Skib } 655235783Skib 656235783Skib temp_buflist = realloc(dma->buflist, 657235783Skib (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), 658235783Skib DRM_MEM_BUFS, M_NOWAIT); 659235783Skib if (temp_buflist == NULL) { 660235783Skib /* Free the entry because it isn't valid */ 661235783Skib drm_cleanup_buf_error(dev, entry); 662235783Skib free(temp_pagelist, DRM_MEM_PAGES); 663235783Skib return ENOMEM; 664235783Skib } 665235783Skib dma->buflist = temp_buflist; 666235783Skib 667235783Skib for (i = 0; i < entry->buf_count; i++) { 668235783Skib dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 669235783Skib } 670235783Skib 671235783Skib /* No allocations failed, so now we can replace the orginal pagelist 672235783Skib * with the new one. 673235783Skib */ 674235783Skib free(dma->pagelist, DRM_MEM_PAGES); 675235783Skib dma->pagelist = temp_pagelist; 676235783Skib 677235783Skib dma->buf_count += entry->buf_count; 678235783Skib dma->seg_count += entry->seg_count; 679235783Skib dma->page_count += entry->seg_count << page_order; 680235783Skib dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); 681235783Skib 682235783Skib request->count = entry->buf_count; 683235783Skib request->size = size; 684235783Skib 685235783Skib return 0; 686235783Skib 687235783Skib} 688235783Skib 689235783Skibstatic int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request) 690235783Skib{ 691235783Skib drm_device_dma_t *dma = dev->dma; 692235783Skib drm_buf_entry_t *entry; 693235783Skib drm_buf_t *buf; 694235783Skib unsigned long offset; 695235783Skib unsigned long agp_offset; 696235783Skib int count; 697235783Skib int order; 698235783Skib int size; 699235783Skib int alignment; 700235783Skib int page_order; 701235783Skib int total; 702235783Skib int byte_count; 703235783Skib int i; 704235783Skib drm_buf_t **temp_buflist; 705235783Skib 706235783Skib count = request->count; 707235783Skib order = drm_order(request->size); 708235783Skib size = 1 << order; 709235783Skib 710235783Skib alignment = (request->flags & _DRM_PAGE_ALIGN) 711235783Skib ? round_page(size) : size; 712235783Skib page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 713235783Skib total = PAGE_SIZE << page_order; 714235783Skib 715235783Skib byte_count = 0; 716235783Skib agp_offset = request->agp_start; 717235783Skib 718235783Skib DRM_DEBUG("count: %d\n", count); 719235783Skib DRM_DEBUG("order: %d\n", order); 720235783Skib DRM_DEBUG("size: %d\n", size); 721235783Skib DRM_DEBUG("agp_offset: %ld\n", agp_offset); 722235783Skib DRM_DEBUG("alignment: %d\n", alignment); 723235783Skib DRM_DEBUG("page_order: %d\n", page_order); 724235783Skib DRM_DEBUG("total: %d\n", total); 725235783Skib 726235783Skib entry = &dma->bufs[order]; 727235783Skib 728235783Skib entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS, 729235783Skib M_NOWAIT | M_ZERO); 730235783Skib if (entry->buflist == NULL) 731235783Skib return ENOMEM; 732235783Skib 733235783Skib entry->buf_size = size; 734235783Skib entry->page_order = page_order; 735235783Skib 736235783Skib offset = 0; 737235783Skib 738235783Skib while (entry->buf_count < count) { 739235783Skib buf = &entry->buflist[entry->buf_count]; 740235783Skib buf->idx = dma->buf_count + entry->buf_count; 741235783Skib buf->total = alignment; 742235783Skib buf->order = order; 743235783Skib buf->used = 0; 744235783Skib 745235783Skib buf->offset = (dma->byte_count + offset); 746235783Skib buf->bus_address = agp_offset + offset; 747235783Skib buf->address = (void *)(agp_offset + offset + dev->sg->vaddr); 748235783Skib buf->next = NULL; 749235783Skib buf->pending = 0; 750235783Skib buf->file_priv = NULL; 751235783Skib 752235783Skib buf->dev_priv_size = dev->driver->buf_priv_size; 753235783Skib buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS, 754235783Skib M_NOWAIT | M_ZERO); 755235783Skib if (buf->dev_private == NULL) { 756235783Skib /* Set count correctly so we free the proper amount. */ 757235783Skib entry->buf_count = count; 758235783Skib drm_cleanup_buf_error(dev, entry); 759235783Skib return ENOMEM; 760235783Skib } 761235783Skib 762235783Skib DRM_DEBUG("buffer %d @ %p\n", 763235783Skib entry->buf_count, buf->address); 764235783Skib 765235783Skib offset += alignment; 766235783Skib entry->buf_count++; 767235783Skib byte_count += PAGE_SIZE << page_order; 768235783Skib } 769235783Skib 770235783Skib DRM_DEBUG("byte_count: %d\n", byte_count); 771235783Skib 772235783Skib temp_buflist = realloc(dma->buflist, 773235783Skib (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), 774235783Skib DRM_MEM_BUFS, M_NOWAIT); 775235783Skib if (temp_buflist == NULL) { 776235783Skib /* Free the entry because it isn't valid */ 777235783Skib drm_cleanup_buf_error(dev, entry); 778235783Skib return ENOMEM; 779235783Skib } 780235783Skib dma->buflist = temp_buflist; 781235783Skib 782235783Skib for (i = 0; i < entry->buf_count; i++) { 783235783Skib dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 784235783Skib } 785235783Skib 786235783Skib dma->buf_count += entry->buf_count; 787235783Skib dma->byte_count += byte_count; 788235783Skib 789235783Skib DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 790235783Skib DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 791235783Skib 792235783Skib request->count = entry->buf_count; 793235783Skib request->size = size; 794235783Skib 795235783Skib dma->flags = _DRM_DMA_USE_SG; 796235783Skib 797235783Skib return 0; 798235783Skib} 799235783Skib 800235783Skibint drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request) 801235783Skib{ 802235783Skib int order, ret; 803235783Skib 804235783Skib if (request->count < 0 || request->count > 4096) 805235783Skib return EINVAL; 806235783Skib 807235783Skib order = drm_order(request->size); 808235783Skib if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 809235783Skib return EINVAL; 810235783Skib 811235783Skib DRM_SPINLOCK(&dev->dma_lock); 812235783Skib 813235783Skib /* No more allocations after first buffer-using ioctl. */ 814235783Skib if (dev->buf_use != 0) { 815235783Skib DRM_SPINUNLOCK(&dev->dma_lock); 816235783Skib return EBUSY; 817235783Skib } 818235783Skib /* No more than one allocation per order */ 819235783Skib if (dev->dma->bufs[order].buf_count != 0) { 820235783Skib DRM_SPINUNLOCK(&dev->dma_lock); 821235783Skib return ENOMEM; 822235783Skib } 823235783Skib 824235783Skib ret = drm_do_addbufs_agp(dev, request); 825235783Skib 826235783Skib DRM_SPINUNLOCK(&dev->dma_lock); 827235783Skib 828235783Skib return ret; 829235783Skib} 830235783Skib 831235783Skibint drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request) 832235783Skib{ 833235783Skib int order, ret; 834235783Skib 835235783Skib if (!DRM_SUSER(DRM_CURPROC)) 836235783Skib return EACCES; 837235783Skib 838235783Skib if (request->count < 0 || request->count > 4096) 839235783Skib return EINVAL; 840235783Skib 841235783Skib order = drm_order(request->size); 842235783Skib if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 843235783Skib return EINVAL; 844235783Skib 845235783Skib DRM_SPINLOCK(&dev->dma_lock); 846235783Skib 847235783Skib /* No more allocations after first buffer-using ioctl. */ 848235783Skib if (dev->buf_use != 0) { 849235783Skib DRM_SPINUNLOCK(&dev->dma_lock); 850235783Skib return EBUSY; 851235783Skib } 852235783Skib /* No more than one allocation per order */ 853235783Skib if (dev->dma->bufs[order].buf_count != 0) { 854235783Skib DRM_SPINUNLOCK(&dev->dma_lock); 855235783Skib return ENOMEM; 856235783Skib } 857235783Skib 858235783Skib ret = drm_do_addbufs_sg(dev, request); 859235783Skib 860235783Skib DRM_SPINUNLOCK(&dev->dma_lock); 861235783Skib 862235783Skib return ret; 863235783Skib} 864235783Skib 865235783Skibint drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request) 866235783Skib{ 867235783Skib int order, ret; 868235783Skib 869235783Skib if (!DRM_SUSER(DRM_CURPROC)) 870235783Skib return EACCES; 871235783Skib 872235783Skib if (request->count < 0 || request->count > 4096) 873235783Skib return EINVAL; 874235783Skib 875235783Skib order = drm_order(request->size); 876235783Skib if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 877235783Skib return EINVAL; 878235783Skib 879235783Skib DRM_SPINLOCK(&dev->dma_lock); 880235783Skib 881235783Skib /* No more allocations after first buffer-using ioctl. */ 882235783Skib if (dev->buf_use != 0) { 883235783Skib DRM_SPINUNLOCK(&dev->dma_lock); 884235783Skib return EBUSY; 885235783Skib } 886235783Skib /* No more than one allocation per order */ 887235783Skib if (dev->dma->bufs[order].buf_count != 0) { 888235783Skib DRM_SPINUNLOCK(&dev->dma_lock); 889235783Skib return ENOMEM; 890235783Skib } 891235783Skib 892235783Skib ret = drm_do_addbufs_pci(dev, request); 893235783Skib 894235783Skib DRM_SPINUNLOCK(&dev->dma_lock); 895235783Skib 896235783Skib return ret; 897235783Skib} 898235783Skib 899235783Skibint drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) 900235783Skib{ 901235783Skib struct drm_buf_desc *request = data; 902235783Skib int err; 903235783Skib 904235783Skib if (request->flags & _DRM_AGP_BUFFER) 905235783Skib err = drm_addbufs_agp(dev, request); 906235783Skib else if (request->flags & _DRM_SG_BUFFER) 907235783Skib err = drm_addbufs_sg(dev, request); 908235783Skib else 909235783Skib err = drm_addbufs_pci(dev, request); 910235783Skib 911235783Skib return err; 912235783Skib} 913235783Skib 914235783Skibint drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv) 915235783Skib{ 916235783Skib drm_device_dma_t *dma = dev->dma; 917235783Skib struct drm_buf_info *request = data; 918235783Skib int i; 919235783Skib int count; 920235783Skib int retcode = 0; 921235783Skib 922235783Skib DRM_SPINLOCK(&dev->dma_lock); 923235783Skib ++dev->buf_use; /* Can't allocate more after this call */ 924235783Skib DRM_SPINUNLOCK(&dev->dma_lock); 925235783Skib 926235783Skib for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 927235783Skib if (dma->bufs[i].buf_count) 928235783Skib ++count; 929235783Skib } 930235783Skib 931235783Skib DRM_DEBUG("count = %d\n", count); 932235783Skib 933235783Skib if (request->count >= count) { 934235783Skib for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 935235783Skib if (dma->bufs[i].buf_count) { 936235783Skib struct drm_buf_desc from; 937235783Skib 938235783Skib from.count = dma->bufs[i].buf_count; 939235783Skib from.size = dma->bufs[i].buf_size; 940235783Skib from.low_mark = dma->bufs[i].freelist.low_mark; 941235783Skib from.high_mark = dma->bufs[i].freelist.high_mark; 942235783Skib 943235783Skib if (DRM_COPY_TO_USER(&request->list[count], &from, 944235783Skib sizeof(struct drm_buf_desc)) != 0) { 945235783Skib retcode = EFAULT; 946235783Skib break; 947235783Skib } 948235783Skib 949235783Skib DRM_DEBUG("%d %d %d %d %d\n", 950235783Skib i, dma->bufs[i].buf_count, 951235783Skib dma->bufs[i].buf_size, 952235783Skib dma->bufs[i].freelist.low_mark, 953235783Skib dma->bufs[i].freelist.high_mark); 954235783Skib ++count; 955235783Skib } 956235783Skib } 957235783Skib } 958235783Skib request->count = count; 959235783Skib 960235783Skib return retcode; 961235783Skib} 962235783Skib 963235783Skibint drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) 964235783Skib{ 965235783Skib drm_device_dma_t *dma = dev->dma; 966235783Skib struct drm_buf_desc *request = data; 967235783Skib int order; 968235783Skib 969235783Skib DRM_DEBUG("%d, %d, %d\n", 970235783Skib request->size, request->low_mark, request->high_mark); 971235783Skib 972235783Skib 973235783Skib order = drm_order(request->size); 974235783Skib if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER || 975235783Skib request->low_mark < 0 || request->high_mark < 0) { 976235783Skib return EINVAL; 977235783Skib } 978235783Skib 979235783Skib DRM_SPINLOCK(&dev->dma_lock); 980235783Skib if (request->low_mark > dma->bufs[order].buf_count || 981235783Skib request->high_mark > dma->bufs[order].buf_count) { 982235783Skib DRM_SPINUNLOCK(&dev->dma_lock); 983235783Skib return EINVAL; 984235783Skib } 985235783Skib 986235783Skib dma->bufs[order].freelist.low_mark = request->low_mark; 987235783Skib dma->bufs[order].freelist.high_mark = request->high_mark; 988235783Skib DRM_SPINUNLOCK(&dev->dma_lock); 989235783Skib 990235783Skib return 0; 991235783Skib} 992235783Skib 993235783Skibint drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv) 994235783Skib{ 995235783Skib drm_device_dma_t *dma = dev->dma; 996235783Skib struct drm_buf_free *request = data; 997235783Skib int i; 998235783Skib int idx; 999235783Skib drm_buf_t *buf; 1000235783Skib int retcode = 0; 1001235783Skib 1002235783Skib DRM_DEBUG("%d\n", request->count); 1003235783Skib 1004235783Skib DRM_SPINLOCK(&dev->dma_lock); 1005235783Skib for (i = 0; i < request->count; i++) { 1006235783Skib if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) { 1007235783Skib retcode = EFAULT; 1008235783Skib break; 1009235783Skib } 1010235783Skib if (idx < 0 || idx >= dma->buf_count) { 1011235783Skib DRM_ERROR("Index %d (of %d max)\n", 1012235783Skib idx, dma->buf_count - 1); 1013235783Skib retcode = EINVAL; 1014235783Skib break; 1015235783Skib } 1016235783Skib buf = dma->buflist[idx]; 1017235783Skib if (buf->file_priv != file_priv) { 1018235783Skib DRM_ERROR("Process %d freeing buffer not owned\n", 1019235783Skib DRM_CURRENTPID); 1020235783Skib retcode = EINVAL; 1021235783Skib break; 1022235783Skib } 1023235783Skib drm_free_buffer(dev, buf); 1024235783Skib } 1025235783Skib DRM_SPINUNLOCK(&dev->dma_lock); 1026235783Skib 1027235783Skib return retcode; 1028235783Skib} 1029235783Skib 1030235783Skibint drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) 1031235783Skib{ 1032235783Skib drm_device_dma_t *dma = dev->dma; 1033235783Skib int retcode = 0; 1034235783Skib const int zero = 0; 1035235783Skib vm_offset_t address; 1036235783Skib struct vmspace *vms; 1037235783Skib vm_ooffset_t foff; 1038235783Skib vm_size_t size; 1039235783Skib vm_offset_t vaddr; 1040235783Skib struct drm_buf_map *request = data; 1041235783Skib int i; 1042235783Skib 1043235783Skib vms = DRM_CURPROC->td_proc->p_vmspace; 1044235783Skib 1045235783Skib DRM_SPINLOCK(&dev->dma_lock); 1046235783Skib dev->buf_use++; /* Can't allocate more after this call */ 1047235783Skib DRM_SPINUNLOCK(&dev->dma_lock); 1048235783Skib 1049235783Skib if (request->count < dma->buf_count) 1050235783Skib goto done; 1051235783Skib 1052235783Skib if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || 1053235783Skib (drm_core_check_feature(dev, DRIVER_SG) && 1054235783Skib (dma->flags & _DRM_DMA_USE_SG))) { 1055235783Skib drm_local_map_t *map = dev->agp_buffer_map; 1056235783Skib 1057235783Skib if (map == NULL) { 1058235783Skib retcode = EINVAL; 1059235783Skib goto done; 1060235783Skib } 1061235783Skib size = round_page(map->size); 1062235783Skib foff = (unsigned long)map->handle; 1063235783Skib } else { 1064235783Skib size = round_page(dma->byte_count), 1065235783Skib foff = 0; 1066235783Skib } 1067235783Skib 1068235783Skib vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ); 1069235783Skib#if __FreeBSD_version >= 600023 1070235783Skib retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE, 1071235783Skib VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE, 1072235783Skib dev->devnode, foff); 1073235783Skib#else 1074235783Skib retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE, 1075235783Skib VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, 1076235783Skib SLIST_FIRST(&dev->devnode->si_hlist), foff); 1077235783Skib#endif 1078235783Skib if (retcode) 1079235783Skib goto done; 1080235783Skib 1081235783Skib request->virtual = (void *)vaddr; 1082235783Skib 1083235783Skib for (i = 0; i < dma->buf_count; i++) { 1084235783Skib if (DRM_COPY_TO_USER(&request->list[i].idx, 1085235783Skib &dma->buflist[i]->idx, sizeof(request->list[0].idx))) { 1086235783Skib retcode = EFAULT; 1087235783Skib goto done; 1088235783Skib } 1089235783Skib if (DRM_COPY_TO_USER(&request->list[i].total, 1090235783Skib &dma->buflist[i]->total, sizeof(request->list[0].total))) { 1091235783Skib retcode = EFAULT; 1092235783Skib goto done; 1093235783Skib } 1094235783Skib if (DRM_COPY_TO_USER(&request->list[i].used, &zero, 1095235783Skib sizeof(zero))) { 1096235783Skib retcode = EFAULT; 1097235783Skib goto done; 1098235783Skib } 1099235783Skib address = vaddr + dma->buflist[i]->offset; /* *** */ 1100235783Skib if (DRM_COPY_TO_USER(&request->list[i].address, &address, 1101235783Skib sizeof(address))) { 1102235783Skib retcode = EFAULT; 1103235783Skib goto done; 1104235783Skib } 1105235783Skib } 1106235783Skib 1107235783Skib done: 1108235783Skib request->count = dma->buf_count; 1109235783Skib 1110235783Skib DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); 1111235783Skib 1112235783Skib return retcode; 1113235783Skib} 1114235783Skib 1115235783Skib/* 1116235783Skib * Compute order. Can be made faster. 1117235783Skib */ 1118235783Skibint drm_order(unsigned long size) 1119235783Skib{ 1120235783Skib int order; 1121235783Skib 1122235783Skib if (size == 0) 1123235783Skib return 0; 1124235783Skib 1125235783Skib order = flsl(size) - 1; 1126235783Skib if (size & ~(1ul << order)) 1127235783Skib ++order; 1128235783Skib 1129235783Skib return order; 1130235783Skib} 1131