1/* $NetBSD: nouveau_nvkm_engine_fifo_gpfifogf100.c,v 1.3 2021/12/18 23:45:35 riastradh Exp $ */ 2 3/* 4 * Copyright 2012 Red Hat Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Ben Skeggs 25 */ 26#include <sys/cdefs.h> 27__KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_fifo_gpfifogf100.c,v 1.3 2021/12/18 23:45:35 riastradh Exp $"); 28 29#include "changf100.h" 30 31#include <core/client.h> 32#include <core/gpuobj.h> 33#include <subdev/fb.h> 34#include <subdev/timer.h> 35 36#include <nvif/class.h> 37#include <nvif/cl906f.h> 38#include <nvif/unpack.h> 39 40int 41gf100_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type, 42 struct nvkm_event **pevent) 43{ 44 switch (type) { 45 case NV906F_V0_NTFY_NON_STALL_INTERRUPT: 46 *pevent = &chan->fifo->uevent; 47 return 0; 48 case NV906F_V0_NTFY_KILLED: 49 *pevent = &chan->fifo->kevent; 50 return 0; 51 default: 52 break; 53 } 54 return -EINVAL; 55} 56 57static u32 58gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine) 59{ 60 switch (engine->subdev.index) { 61 case NVKM_ENGINE_SW : return 0; 62 case NVKM_ENGINE_GR : return 0x0210; 63 case NVKM_ENGINE_CE0 : return 0x0230; 64 case NVKM_ENGINE_CE1 : return 0x0240; 65 case NVKM_ENGINE_MSPDEC: return 0x0250; 66 case NVKM_ENGINE_MSPPP : return 0x0260; 67 case NVKM_ENGINE_MSVLD : return 0x0270; 68 default: 69 WARN_ON(1); 70 return 0; 71 } 72} 73 74static int 75gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base, 76 struct nvkm_engine *engine, bool suspend) 77{ 78 const u32 offset = gf100_fifo_gpfifo_engine_addr(engine); 79 struct gf100_fifo_chan *chan = gf100_fifo_chan(base); 80 struct nvkm_subdev *subdev = &chan->fifo->base.engine.subdev; 81 struct nvkm_device *device = subdev->device; 82 struct nvkm_gpuobj *inst = chan->base.inst; 83 int ret = 0; 84 85 mutex_lock(&subdev->mutex); 86 nvkm_wr32(device, 0x002634, chan->base.chid); 87 if (nvkm_msec(device, 2000, 88 if (nvkm_rd32(device, 0x002634) == chan->base.chid) 89 break; 90 ) < 0) { 91 nvkm_error(subdev, "channel %d [%s] kick timeout\n", 92 chan->base.chid, chan->base.object.client->name); 93 ret = -ETIMEDOUT; 94 } 95 mutex_unlock(&subdev->mutex); 96 97 if (ret && suspend) 98 return ret; 99 100 if (offset) { 101 nvkm_kmap(inst); 102 nvkm_wo32(inst, offset + 0x00, 0x00000000); 103 nvkm_wo32(inst, offset + 0x04, 0x00000000); 104 nvkm_done(inst); 105 } 106 107 return ret; 108} 109 110static int 111gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base, 112 struct nvkm_engine *engine) 113{ 114 const u32 offset = gf100_fifo_gpfifo_engine_addr(engine); 115 struct gf100_fifo_chan *chan = gf100_fifo_chan(base); 116 struct nvkm_gpuobj *inst = chan->base.inst; 117 118 if (offset) { 119 u64 addr = chan->engn[engine->subdev.index].vma->addr; 120 nvkm_kmap(inst); 121 nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4); 122 nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr)); 123 nvkm_done(inst); 124 } 125 126 return 0; 127} 128 129static void 130gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base, 131 struct nvkm_engine *engine) 132{ 133 struct gf100_fifo_chan *chan = gf100_fifo_chan(base); 134 nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma); 135 nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst); 136} 137 138static int 139gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base, 140 struct nvkm_engine *engine, 141 struct nvkm_object *object) 142{ 143 struct gf100_fifo_chan *chan = gf100_fifo_chan(base); 144 int engn = engine->subdev.index; 145 int ret; 146 147 if (!gf100_fifo_gpfifo_engine_addr(engine)) 148 return 0; 149 150 ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst); 151 if (ret) 152 return ret; 153 154 ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size, 155 &chan->engn[engn].vma); 156 if (ret) 157 return ret; 158 159 return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm, 160 chan->engn[engn].vma, NULL, 0); 161} 162 163static void 164gf100_fifo_gpfifo_fini(struct nvkm_fifo_chan *base) 165{ 166 struct gf100_fifo_chan *chan = gf100_fifo_chan(base); 167 struct gf100_fifo *fifo = chan->fifo; 168 struct nvkm_device *device = fifo->base.engine.subdev.device; 169 u32 coff = chan->base.chid * 8; 170 171 if (!list_empty(&chan->head) && !chan->killed) { 172 gf100_fifo_runlist_remove(fifo, chan); 173 nvkm_mask(device, 0x003004 + coff, 0x00000001, 0x00000000); 174 gf100_fifo_runlist_commit(fifo); 175 } 176 177 gf100_fifo_intr_engine(fifo); 178 179 nvkm_wr32(device, 0x003000 + coff, 0x00000000); 180} 181 182static void 183gf100_fifo_gpfifo_init(struct nvkm_fifo_chan *base) 184{ 185 struct gf100_fifo_chan *chan = gf100_fifo_chan(base); 186 struct gf100_fifo *fifo = chan->fifo; 187 struct nvkm_device *device = fifo->base.engine.subdev.device; 188 u32 addr = chan->base.inst->addr >> 12; 189 u32 coff = chan->base.chid * 8; 190 191 nvkm_wr32(device, 0x003000 + coff, 0xc0000000 | addr); 192 193 if (list_empty(&chan->head) && !chan->killed) { 194 gf100_fifo_runlist_insert(fifo, chan); 195 nvkm_wr32(device, 0x003004 + coff, 0x001f0001); 196 gf100_fifo_runlist_commit(fifo); 197 } 198} 199 200static void * 201gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base) 202{ 203 return gf100_fifo_chan(base); 204} 205 206static const struct nvkm_fifo_chan_func 207gf100_fifo_gpfifo_func = { 208 .dtor = gf100_fifo_gpfifo_dtor, 209 .init = gf100_fifo_gpfifo_init, 210 .fini = gf100_fifo_gpfifo_fini, 211 .ntfy = gf100_fifo_chan_ntfy, 212 .engine_ctor = gf100_fifo_gpfifo_engine_ctor, 213 .engine_dtor = gf100_fifo_gpfifo_engine_dtor, 214 .engine_init = gf100_fifo_gpfifo_engine_init, 215 .engine_fini = gf100_fifo_gpfifo_engine_fini, 216}; 217 218static int 219gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, 220 void *data, u32 size, struct nvkm_object **pobject) 221{ 222 union { 223 struct fermi_channel_gpfifo_v0 v0; 224 } *args = data; 225 struct gf100_fifo *fifo = gf100_fifo(base); 226 struct nvkm_object *parent = oclass->parent; 227 struct gf100_fifo_chan *chan; 228 u64 usermem, ioffset, ilength; 229 int ret = -ENOSYS, i; 230 231 nvif_ioctl(parent, "create channel gpfifo size %d\n", size); 232 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { 233 nvif_ioctl(parent, "create channel gpfifo vers %d vmm %"PRIx64" " 234 "ioffset %016"PRIx64" ilength %08x\n", 235 args->v0.version, args->v0.vmm, args->v0.ioffset, 236 args->v0.ilength); 237 if (!args->v0.vmm) 238 return -EINVAL; 239 } else 240 return ret; 241 242 /* allocate channel */ 243 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) 244 return -ENOMEM; 245 *pobject = &chan->base.object; 246 chan->fifo = fifo; 247 INIT_LIST_HEAD(&chan->head); 248 249 ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base, 250 0x1000, 0x1000, true, args->v0.vmm, 0, 251 (1ULL << NVKM_ENGINE_CE0) | 252 (1ULL << NVKM_ENGINE_CE1) | 253 (1ULL << NVKM_ENGINE_GR) | 254 (1ULL << NVKM_ENGINE_MSPDEC) | 255 (1ULL << NVKM_ENGINE_MSPPP) | 256 (1ULL << NVKM_ENGINE_MSVLD) | 257 (1ULL << NVKM_ENGINE_SW), 258 1, fifo->user.bar->addr, 0x1000, 259 oclass, &chan->base); 260 if (ret) 261 return ret; 262 263 args->v0.chid = chan->base.chid; 264 265 /* clear channel control registers */ 266 267 usermem = chan->base.chid * 0x1000; 268 ioffset = args->v0.ioffset; 269 ilength = order_base_2(args->v0.ilength / 8); 270 271 nvkm_kmap(fifo->user.mem); 272 for (i = 0; i < 0x1000; i += 4) 273 nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000); 274 nvkm_done(fifo->user.mem); 275 usermem = nvkm_memory_addr(fifo->user.mem) + usermem; 276 277 /* RAMFC */ 278 nvkm_kmap(chan->base.inst); 279 nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem)); 280 nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem)); 281 nvkm_wo32(chan->base.inst, 0x10, 0x0000face); 282 nvkm_wo32(chan->base.inst, 0x30, 0xfffff902); 283 nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset)); 284 nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) | 285 (ilength << 16)); 286 nvkm_wo32(chan->base.inst, 0x54, 0x00000002); 287 nvkm_wo32(chan->base.inst, 0x84, 0x20400000); 288 nvkm_wo32(chan->base.inst, 0x94, 0x30000001); 289 nvkm_wo32(chan->base.inst, 0x9c, 0x00000100); 290 nvkm_wo32(chan->base.inst, 0xa4, 0x1f1f1f1f); 291 nvkm_wo32(chan->base.inst, 0xa8, 0x1f1f1f1f); 292 nvkm_wo32(chan->base.inst, 0xac, 0x0000001f); 293 nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000); 294 nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */ 295 nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */ 296 nvkm_done(chan->base.inst); 297 return 0; 298} 299 300const struct nvkm_fifo_chan_oclass 301gf100_fifo_gpfifo_oclass = { 302 .base.oclass = FERMI_CHANNEL_GPFIFO, 303 .base.minver = 0, 304 .base.maxver = 0, 305 .ctor = gf100_fifo_gpfifo_new, 306}; 307