1/* 2 * Copyright 2005-2006 Stephane Marchesin 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25#include "drmP.h" 26#include "drm.h" 27#include "nouveau_drv.h" 28#include "nouveau_drm.h" 29#include "nouveau_dma.h" 30 31static int 32nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) 33{ 34 struct drm_device *dev = chan->dev; 35 struct drm_nouveau_private *dev_priv = dev->dev_private; 36 struct nouveau_bo *pb = chan->pushbuf_bo; 37 struct nouveau_gpuobj *pushbuf = NULL; 38 int ret; 39 40 if (dev_priv->card_type >= NV_50) { 41 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 42 dev_priv->vm_end, NV_DMA_ACCESS_RO, 43 NV_DMA_TARGET_AGP, &pushbuf); 44 chan->pushbuf_base = pb->bo.offset; 45 } else 46 if (pb->bo.mem.mem_type == TTM_PL_TT) { 47 ret = nouveau_gpuobj_gart_dma_new(chan, 0, 48 dev_priv->gart_info.aper_size, 49 NV_DMA_ACCESS_RO, &pushbuf, 50 NULL); 51 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; 52 } else 53 if (dev_priv->card_type != NV_04) { 54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 55 dev_priv->fb_available_size, 56 NV_DMA_ACCESS_RO, 57 NV_DMA_TARGET_VIDMEM, &pushbuf); 58 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; 59 } else { 60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's 61 * exact reason for existing :) PCI access to cmdbuf in 62 * VRAM. 63 */ 64 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 65 pci_resource_start(dev->pdev, 66 1), 67 dev_priv->fb_available_size, 68 NV_DMA_ACCESS_RO, 69 NV_DMA_TARGET_PCI, &pushbuf); 70 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; 71 } 72 73 ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf); 74 if (ret) { 75 NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret); 76 if (pushbuf != dev_priv->gart_info.sg_ctxdma) 77 nouveau_gpuobj_del(dev, &pushbuf); 78 return ret; 79 } 80 81 return 0; 82} 83 84static struct nouveau_bo * 85nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) 86{ 87 struct nouveau_bo *pushbuf = NULL; 88 int location, ret; 89 90 if (nouveau_vram_pushbuf) 91 location = TTM_PL_FLAG_VRAM; 92 else 93 location = TTM_PL_FLAG_TT; 94 95 ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false, 96 true, &pushbuf); 97 if (ret) { 98 NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret); 99 return NULL; 100 } 101 102 ret = nouveau_bo_pin(pushbuf, location); 103 if (ret) { 104 NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret); 105 nouveau_bo_ref(NULL, &pushbuf); 106 return NULL; 107 } 108 109 return pushbuf; 110} 111 112/* allocates and initializes a fifo for user space consumption */ 113int 114nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, 115 struct drm_file *file_priv, 116 uint32_t vram_handle, uint32_t tt_handle) 117{ 118 struct drm_nouveau_private *dev_priv = dev->dev_private; 119 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 120 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 121 struct nouveau_channel *chan; 122 int channel, user; 123 int ret; 124 125 /* 126 * Alright, here is the full story 127 * Nvidia cards have multiple hw fifo contexts (praise them for that, 128 * no complicated crash-prone context switches) 129 * We allocate a new context for each app and let it write to it 130 * directly (woo, full userspace command submission !) 131 * When there are no more contexts, you lost 132 */ 133 for (channel = 0; channel < pfifo->channels; channel++) { 134 if (dev_priv->fifos[channel] == NULL) 135 break; 136 } 137 138 /* no more fifos. you lost. */ 139 if (channel == pfifo->channels) 140 return -EINVAL; 141 142 dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel), 143 GFP_KERNEL); 144 if (!dev_priv->fifos[channel]) 145 return -ENOMEM; 146 chan = dev_priv->fifos[channel]; 147 INIT_LIST_HEAD(&chan->nvsw.vbl_wait); 148 INIT_LIST_HEAD(&chan->fence.pending); 149 chan->dev = dev; 150 chan->id = channel; 151 chan->file_priv = file_priv; 152 chan->vram_handle = vram_handle; 153 chan->gart_handle = tt_handle; 154 155 NV_INFO(dev, "Allocating FIFO number %d\n", channel); 156 157 /* Allocate DMA push buffer */ 158 chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); 159 if (!chan->pushbuf_bo) { 160 ret = -ENOMEM; 161 NV_ERROR(dev, "pushbuf %d\n", ret); 162 nouveau_channel_free(chan); 163 return ret; 164 } 165 166 nouveau_dma_pre_init(chan); 167 168 /* Locate channel's user control regs */ 169 if (dev_priv->card_type < NV_40) 170 user = NV03_USER(channel); 171 else 172 if (dev_priv->card_type < NV_50) 173 user = NV40_USER(channel); 174 else 175 user = NV50_USER(channel); 176 177 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user, 178 PAGE_SIZE); 179 if (!chan->user) { 180 NV_ERROR(dev, "ioremap of regs failed.\n"); 181 nouveau_channel_free(chan); 182 return -ENOMEM; 183 } 184 chan->user_put = 0x40; 185 chan->user_get = 0x44; 186 187 /* Allocate space for per-channel fixed notifier memory */ 188 ret = nouveau_notifier_init_channel(chan); 189 if (ret) { 190 NV_ERROR(dev, "ntfy %d\n", ret); 191 nouveau_channel_free(chan); 192 return ret; 193 } 194 195 /* Setup channel's default objects */ 196 ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle); 197 if (ret) { 198 NV_ERROR(dev, "gpuobj %d\n", ret); 199 nouveau_channel_free(chan); 200 return ret; 201 } 202 203 /* Create a dma object for the push buffer */ 204 ret = nouveau_channel_pushbuf_ctxdma_init(chan); 205 if (ret) { 206 NV_ERROR(dev, "pbctxdma %d\n", ret); 207 nouveau_channel_free(chan); 208 return ret; 209 } 210 211 /* disable the fifo caches */ 212 pfifo->reassign(dev, false); 213 214 /* Create a graphics context for new channel */ 215 ret = pgraph->create_context(chan); 216 if (ret) { 217 nouveau_channel_free(chan); 218 return ret; 219 } 220 221 /* Construct inital RAMFC for new channel */ 222 ret = pfifo->create_context(chan); 223 if (ret) { 224 nouveau_channel_free(chan); 225 return ret; 226 } 227 228 pfifo->reassign(dev, true); 229 230 ret = nouveau_dma_init(chan); 231 if (!ret) 232 ret = nouveau_fence_init(chan); 233 if (ret) { 234 nouveau_channel_free(chan); 235 return ret; 236 } 237 238 nouveau_debugfs_channel_init(chan); 239 240 NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel); 241 *chan_ret = chan; 242 return 0; 243} 244 245/* stops a fifo */ 246void 247nouveau_channel_free(struct nouveau_channel *chan) 248{ 249 struct drm_device *dev = chan->dev; 250 struct drm_nouveau_private *dev_priv = dev->dev_private; 251 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 252 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 253 unsigned long flags; 254 int ret; 255 256 NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id); 257 258 nouveau_debugfs_channel_fini(chan); 259 260 /* Give outstanding push buffers a chance to complete */ 261 nouveau_fence_update(chan); 262 if (chan->fence.sequence != chan->fence.sequence_ack) { 263 struct nouveau_fence *fence = NULL; 264 265 ret = nouveau_fence_new(chan, &fence, true); 266 if (ret == 0) { 267 ret = nouveau_fence_wait(fence, NULL, false, false); 268 nouveau_fence_unref((void *)&fence); 269 } 270 271 if (ret) 272 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); 273 } 274 275 /* Ensure all outstanding fences are signaled. They should be if the 276 * above attempts at idling were OK, but if we failed this'll tell TTM 277 * we're done with the buffers. 278 */ 279 nouveau_fence_fini(chan); 280 281 /* This will prevent pfifo from switching channels. */ 282 pfifo->reassign(dev, false); 283 284 /* We want to give pgraph a chance to idle and get rid of all potential 285 * errors. We need to do this before the lock, otherwise the irq handler 286 * is unable to process them. 287 */ 288 if (pgraph->channel(dev) == chan) 289 nouveau_wait_for_idle(dev); 290 291 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 292 293 pgraph->fifo_access(dev, false); 294 if (pgraph->channel(dev) == chan) 295 pgraph->unload_context(dev); 296 pgraph->destroy_context(chan); 297 pgraph->fifo_access(dev, true); 298 299 if (pfifo->channel_id(dev) == chan->id) { 300 pfifo->disable(dev); 301 pfifo->unload_context(dev); 302 pfifo->enable(dev); 303 } 304 pfifo->destroy_context(chan); 305 306 pfifo->reassign(dev, true); 307 308 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 309 310 /* Release the channel's resources */ 311 nouveau_gpuobj_ref_del(dev, &chan->pushbuf); 312 if (chan->pushbuf_bo) { 313 nouveau_bo_unpin(chan->pushbuf_bo); 314 nouveau_bo_ref(NULL, &chan->pushbuf_bo); 315 } 316 nouveau_gpuobj_channel_takedown(chan); 317 nouveau_notifier_takedown_channel(chan); 318 if (chan->user) 319 iounmap(chan->user); 320 321 dev_priv->fifos[chan->id] = NULL; 322 kfree(chan); 323} 324 325/* cleans up all the fifos from file_priv */ 326void 327nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) 328{ 329 struct drm_nouveau_private *dev_priv = dev->dev_private; 330 struct nouveau_engine *engine = &dev_priv->engine; 331 int i; 332 333 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); 334 for (i = 0; i < engine->fifo.channels; i++) { 335 struct nouveau_channel *chan = dev_priv->fifos[i]; 336 337 if (chan && chan->file_priv == file_priv) 338 nouveau_channel_free(chan); 339 } 340} 341 342int 343nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv, 344 int channel) 345{ 346 struct drm_nouveau_private *dev_priv = dev->dev_private; 347 struct nouveau_engine *engine = &dev_priv->engine; 348 349 if (channel >= engine->fifo.channels) 350 return 0; 351 if (dev_priv->fifos[channel] == NULL) 352 return 0; 353 354 return (dev_priv->fifos[channel]->file_priv == file_priv); 355} 356 357/*********************************** 358 * ioctls wrapping the functions 359 ***********************************/ 360 361static int 362nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, 363 struct drm_file *file_priv) 364{ 365 struct drm_nouveau_private *dev_priv = dev->dev_private; 366 struct drm_nouveau_channel_alloc *init = data; 367 struct nouveau_channel *chan; 368 int ret; 369 370 if (dev_priv->engine.graph.accel_blocked) 371 return -ENODEV; 372 373 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) 374 return -EINVAL; 375 376 ret = nouveau_channel_alloc(dev, &chan, file_priv, 377 init->fb_ctxdma_handle, 378 init->tt_ctxdma_handle); 379 if (ret) 380 return ret; 381 init->channel = chan->id; 382 383 if (chan->dma.ib_max) 384 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | 385 NOUVEAU_GEM_DOMAIN_GART; 386 else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM) 387 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; 388 else 389 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; 390 391 init->subchan[0].handle = NvM2MF; 392 if (dev_priv->card_type < NV_50) 393 init->subchan[0].grclass = 0x0039; 394 else 395 init->subchan[0].grclass = 0x5039; 396 init->subchan[1].handle = NvSw; 397 init->subchan[1].grclass = NV_SW; 398 init->nr_subchan = 2; 399 400 /* Named memory object area */ 401 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, 402 &init->notifier_handle); 403 if (ret) { 404 nouveau_channel_free(chan); 405 return ret; 406 } 407 408 return 0; 409} 410 411static int 412nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, 413 struct drm_file *file_priv) 414{ 415 struct drm_nouveau_channel_free *cfree = data; 416 struct nouveau_channel *chan; 417 418 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); 419 420 nouveau_channel_free(chan); 421 return 0; 422} 423 424/*********************************** 425 * finally, the ioctl table 426 ***********************************/ 427 428struct drm_ioctl_desc nouveau_ioctls[] = { 429 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), 430 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 431 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), 432 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), 433 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), 434 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), 435 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), 436 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), 437 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), 438 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), 439 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), 440 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), 441}; 442 443int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); 444