1/* 2 * Copyright (C) 2007 Ben Skeggs. 3 * 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining 7 * a copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sublicense, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial 16 * portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 */ 27 28#include "drmP.h" 29#include "drm.h" 30#include "nouveau_drv.h" 31 32int 33nouveau_notifier_init_channel(struct nouveau_channel *chan) 34{ 35 struct drm_device *dev = chan->dev; 36 struct nouveau_bo *ntfy = NULL; 37 uint32_t flags; 38 int ret; 39 40 if (nouveau_vram_notify) 41 flags = TTM_PL_FLAG_VRAM; 42 else 43 flags = TTM_PL_FLAG_TT; 44 45 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 46 0, 0x0000, false, true, &ntfy); 47 if (ret) 48 return ret; 49 50 ret = nouveau_bo_pin(ntfy, flags); 51 if (ret) 52 goto out_err; 53 54 ret = nouveau_bo_map(ntfy); 55 if (ret) 56 goto out_err; 57 58 ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size); 59 if (ret) 60 goto out_err; 61 62 chan->notifier_bo = ntfy; 63out_err: 64 if (ret) 65 drm_gem_object_unreference_unlocked(ntfy->gem); 66 67 return ret; 68} 69 70void 71nouveau_notifier_takedown_channel(struct nouveau_channel *chan) 72{ 73 struct drm_device *dev = chan->dev; 74 75 if (!chan->notifier_bo) 76 return; 77 78 nouveau_bo_unmap(chan->notifier_bo); 79 mutex_lock(&dev->struct_mutex); 80 nouveau_bo_unpin(chan->notifier_bo); 81 mutex_unlock(&dev->struct_mutex); 82 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); 83 drm_mm_takedown(&chan->notifier_heap); 84} 85 86static void 87nouveau_notifier_gpuobj_dtor(struct drm_device *dev, 88 struct nouveau_gpuobj *gpuobj) 89{ 90 NV_DEBUG(dev, "\n"); 91 92 if (gpuobj->priv) 93 drm_mm_put_block(gpuobj->priv); 94} 95 96int 97nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, 98 int size, uint32_t *b_offset) 99{ 100 struct drm_device *dev = chan->dev; 101 struct drm_nouveau_private *dev_priv = dev->dev_private; 102 struct nouveau_gpuobj *nobj = NULL; 103 struct drm_mm_node *mem; 104 uint32_t offset; 105 int target, ret; 106 107 mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); 108 if (mem) 109 mem = drm_mm_get_block(mem, size, 0); 110 if (!mem) { 111 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); 112 return -ENOMEM; 113 } 114 115 offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT; 116 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) { 117 target = NV_DMA_TARGET_VIDMEM; 118 } else 119 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) { 120 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA && 121 dev_priv->card_type < NV_50) { 122 ret = nouveau_sgdma_get_page(dev, offset, &offset); 123 if (ret) 124 return ret; 125 target = NV_DMA_TARGET_PCI; 126 } else { 127 target = NV_DMA_TARGET_AGP; 128 if (dev_priv->card_type >= NV_50) 129 offset += dev_priv->vm_gart_base; 130 } 131 } else { 132 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", 133 chan->notifier_bo->bo.mem.mem_type); 134 return -EINVAL; 135 } 136 offset += mem->start; 137 138 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, 139 mem->size, NV_DMA_ACCESS_RW, target, 140 &nobj); 141 if (ret) { 142 drm_mm_put_block(mem); 143 NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret); 144 return ret; 145 } 146 nobj->dtor = nouveau_notifier_gpuobj_dtor; 147 nobj->priv = mem; 148 149 ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL); 150 if (ret) { 151 nouveau_gpuobj_del(dev, &nobj); 152 drm_mm_put_block(mem); 153 NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret); 154 return ret; 155 } 156 157 *b_offset = mem->start; 158 return 0; 159} 160 161int 162nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset) 163{ 164 if (!nobj || nobj->dtor != nouveau_notifier_gpuobj_dtor) 165 return -EINVAL; 166 167 if (poffset) { 168 struct drm_mm_node *mem = nobj->priv; 169 170 if (*poffset >= mem->size) 171 return false; 172 173 *poffset += mem->start; 174 } 175 176 return 0; 177} 178 179int 180nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, 181 struct drm_file *file_priv) 182{ 183 struct drm_nouveau_notifierobj_alloc *na = data; 184 struct nouveau_channel *chan; 185 int ret; 186 187 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); 188 189 ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); 190 if (ret) 191 return ret; 192 193 return 0; 194} 195