11590Srgrimes/* 21590Srgrimes * Copyright 2012 Red Hat Inc. 31590Srgrimes * 41590Srgrimes * Permission is hereby granted, free of charge, to any person obtaining a 51590Srgrimes * copy of this software and associated documentation files (the "Software"), 61590Srgrimes * to deal in the Software without restriction, including without limitation 71590Srgrimes * the rights to use, copy, modify, merge, publish, distribute, sublicense, 81590Srgrimes * and/or sell copies of the Software, and to permit persons to whom the 91590Srgrimes * Software is furnished to do so, subject to the following conditions: 101590Srgrimes * 111590Srgrimes * The above copyright notice and this permission notice shall be included in 121590Srgrimes * all copies or substantial portions of the Software. 131590Srgrimes * 141590Srgrimes * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 151590Srgrimes * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 161590Srgrimes * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 171590Srgrimes * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 181590Srgrimes * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 191590Srgrimes * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 201590Srgrimes * OTHER DEALINGS IN THE SOFTWARE. 211590Srgrimes * 221590Srgrimes * Authors: Ben Skeggs 231590Srgrimes */ 241590Srgrimes#include "priv.h" 251590Srgrimes#include "ram.h" 261590Srgrimes 271590Srgrimes#include <core/memory.h> 281590Srgrimes#include <core/option.h> 291590Srgrimes#include <subdev/bios.h> 301590Srgrimes#include <subdev/bios/M0203.h> 311590Srgrimes#include <engine/gr.h> 321590Srgrimes#include <engine/mpeg.h> 3387712Smarkm 3487712Smarkmvoid 3587712Smarkmnvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile) 3687712Smarkm{ 371590Srgrimes fb->func->tile.fini(fb, region, tile); 3887712Smarkm} 3969528Sasmodai 401590Srgrimesvoid 411590Srgrimesnvkm_fb_tile_init(struct nvkm_fb *fb, int region, u32 addr, u32 size, 421590Srgrimes u32 pitch, u32 flags, struct nvkm_fb_tile *tile) 4374876Sdwmalone{ 4487712Smarkm fb->func->tile.init(fb, region, addr, size, pitch, flags, tile); 4587712Smarkm} 461590Srgrimes 471590Srgrimesvoid 481590Srgrimesnvkm_fb_tile_prog(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile) 491590Srgrimes{ 5087712Smarkm struct nvkm_device *device = fb->subdev.device; 5187712Smarkm if (fb->func->tile.prog) { 521590Srgrimes fb->func->tile.prog(fb, region, tile); 531590Srgrimes if (device->gr) 541590Srgrimes nvkm_engine_tile(&device->gr->engine, region); 55193488Sbrian if (device->mpeg) 561590Srgrimes nvkm_engine_tile(device->mpeg, region); 5717833Sadam } 5817833Sadam} 591590Srgrimes 601590Srgrimesstatic void 611590Srgrimesnvkm_fb_sysmem_flush_page_init(struct nvkm_device *device) 62201382Sed{ 631590Srgrimes struct nvkm_fb *fb = device->fb; 6417825Speter 651590Srgrimes if (fb->func->sysmem.flush_page_init) 6674876Sdwmalone fb->func->sysmem.flush_page_init(fb); 6774876Sdwmalone} 6874876Sdwmalone 6974876Sdwmaloneint 7074876Sdwmalonenvkm_fb_bios_memtype(struct nvkm_bios *bios) 7174876Sdwmalone{ 72137157Spaul struct nvkm_subdev *subdev = &bios->subdev; 7374876Sdwmalone struct nvkm_device *device = subdev->device; 7474876Sdwmalone const u8 ramcfg = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2; 7574876Sdwmalone struct nvbios_M0203E M0203E; 7674876Sdwmalone u8 ver, hdr; 7774876Sdwmalone 78139994Sdwmalone if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) { 7974876Sdwmalone switch (M0203E.type) { 8074876Sdwmalone case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2; 8174876Sdwmalone case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3; 8274876Sdwmalone case M0203E_TYPE_GDDR3 : return NVKM_RAM_TYPE_GDDR3; 8374876Sdwmalone case M0203E_TYPE_GDDR5 : return NVKM_RAM_TYPE_GDDR5; 8474876Sdwmalone case M0203E_TYPE_GDDR5X: return NVKM_RAM_TYPE_GDDR5X; 8574876Sdwmalone case M0203E_TYPE_GDDR6 : return NVKM_RAM_TYPE_GDDR6; 8674876Sdwmalone case M0203E_TYPE_HBM2 : return NVKM_RAM_TYPE_HBM2; 8774876Sdwmalone default: 8874876Sdwmalone nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type); 8974876Sdwmalone return NVKM_RAM_TYPE_UNKNOWN; 9074876Sdwmalone } 9174876Sdwmalone } 9274876Sdwmalone 9374876Sdwmalone nvkm_warn(subdev, "M0203E not matched!\n"); 9474876Sdwmalone return NVKM_RAM_TYPE_UNKNOWN; 9574876Sdwmalone} 9674876Sdwmalone 9774876Sdwmalonestatic void 98137157Spaulnvkm_fb_intr(struct nvkm_subdev *subdev) 9974876Sdwmalone{ 10074876Sdwmalone struct nvkm_fb *fb = nvkm_fb(subdev); 10174876Sdwmalone if (fb->func->intr) 10274876Sdwmalone fb->func->intr(fb); 10374876Sdwmalone} 10474876Sdwmalone 10574876Sdwmalonestatic int 106139994Sdwmalonenvkm_fb_oneinit(struct nvkm_subdev *subdev) 10774876Sdwmalone{ 10874876Sdwmalone struct nvkm_fb *fb = nvkm_fb(subdev); 10974876Sdwmalone u32 tags = 0; 11074876Sdwmalone 11174876Sdwmalone if (fb->func->ram_new) { 11274876Sdwmalone int ret = fb->func->ram_new(fb, &fb->ram); 11374876Sdwmalone if (ret) { 11474876Sdwmalone nvkm_error(subdev, "vram setup failed, %d\n", ret); 11574876Sdwmalone return ret; 116251565Sjh } 117251565Sjh } 118251565Sjh 119251565Sjh if (fb->func->oneinit) { 120251565Sjh int ret = fb->func->oneinit(fb); 121251565Sjh if (ret) 122251565Sjh return ret; 123251565Sjh } 124251565Sjh 125251565Sjh /* Initialise compression tag allocator. 126251565Sjh * 127251565Sjh * LTC oneinit() will override this on Fermi and newer. 128251565Sjh */ 129251565Sjh if (fb->func->tags) { 130 tags = fb->func->tags(fb); 131 nvkm_debug(subdev, "%d comptags\n", tags); 132 } 133 134 return nvkm_mm_init(&fb->tags.mm, 0, 0, tags, 1); 135} 136 137int 138nvkm_fb_mem_unlock(struct nvkm_fb *fb) 139{ 140 struct nvkm_subdev *subdev = &fb->subdev; 141 int ret; 142 143 if (!fb->func->vpr.scrub_required) 144 return 0; 145 146 ret = nvkm_subdev_oneinit(subdev); 147 if (ret) 148 return ret; 149 150 if (!fb->func->vpr.scrub_required(fb)) { 151 nvkm_debug(subdev, "VPR not locked\n"); 152 return 0; 153 } 154 155 nvkm_debug(subdev, "VPR locked, running scrubber binary\n"); 156 157 if (!fb->vpr_scrubber.fw.img) { 158 nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n"); 159 return 0; 160 } 161 162 ret = fb->func->vpr.scrub(fb); 163 if (ret) { 164 nvkm_error(subdev, "VPR scrubber binary failed\n"); 165 return ret; 166 } 167 168 if (fb->func->vpr.scrub_required(fb)) { 169 nvkm_error(subdev, "VPR still locked after scrub!\n"); 170 return -EIO; 171 } 172 173 nvkm_debug(subdev, "VPR scrubber binary successful\n"); 174 return 0; 175} 176 177u64 178nvkm_fb_vidmem_size(struct nvkm_device *device) 179{ 180 struct nvkm_fb *fb = device->fb; 181 182 if (fb && fb->func->vidmem.size) 183 return fb->func->vidmem.size(fb); 184 185 WARN_ON(1); 186 return 0; 187} 188 189static int 190nvkm_fb_init(struct nvkm_subdev *subdev) 191{ 192 struct nvkm_fb *fb = nvkm_fb(subdev); 193 int ret, i; 194 195 if (fb->ram) { 196 ret = nvkm_ram_init(fb->ram); 197 if (ret) 198 return ret; 199 } 200 201 for (i = 0; i < fb->tile.regions; i++) 202 fb->func->tile.prog(fb, i, &fb->tile.region[i]); 203 204 nvkm_fb_sysmem_flush_page_init(subdev->device); 205 206 if (fb->func->init) 207 fb->func->init(fb); 208 209 if (fb->func->init_remapper) 210 fb->func->init_remapper(fb); 211 212 if (fb->func->init_page) { 213 ret = fb->func->init_page(fb); 214 if (WARN_ON(ret)) 215 return ret; 216 } 217 218 if (fb->func->init_unkn) 219 fb->func->init_unkn(fb); 220 221 return 0; 222} 223 224static int 225nvkm_fb_preinit(struct nvkm_subdev *subdev) 226{ 227 nvkm_fb_sysmem_flush_page_init(subdev->device); 228 return 0; 229} 230 231static void * 232nvkm_fb_dtor(struct nvkm_subdev *subdev) 233{ 234 struct nvkm_fb *fb = nvkm_fb(subdev); 235 int i; 236 237 nvkm_memory_unref(&fb->mmu_wr); 238 nvkm_memory_unref(&fb->mmu_rd); 239 240 for (i = 0; i < fb->tile.regions; i++) 241 fb->func->tile.fini(fb, i, &fb->tile.region[i]); 242 243 nvkm_mm_fini(&fb->tags.mm); 244 mutex_destroy(&fb->tags.mutex); 245 246 nvkm_ram_del(&fb->ram); 247 248 nvkm_falcon_fw_dtor(&fb->vpr_scrubber); 249 250 if (fb->sysmem.flush_page) { 251 dma_unmap_page(subdev->device->dev, fb->sysmem.flush_page_addr, 252 PAGE_SIZE, DMA_BIDIRECTIONAL); 253 __free_page(fb->sysmem.flush_page); 254 } 255 256 if (fb->func->dtor) 257 return fb->func->dtor(fb); 258 259 return fb; 260} 261 262static const struct nvkm_subdev_func 263nvkm_fb = { 264 .dtor = nvkm_fb_dtor, 265 .preinit = nvkm_fb_preinit, 266 .oneinit = nvkm_fb_oneinit, 267 .init = nvkm_fb_init, 268 .intr = nvkm_fb_intr, 269}; 270 271int 272nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device, 273 enum nvkm_subdev_type type, int inst, struct nvkm_fb *fb) 274{ 275 nvkm_subdev_ctor(&nvkm_fb, device, type, inst, &fb->subdev); 276 fb->func = func; 277 fb->tile.regions = fb->func->tile.regions; 278 fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", fb->func->default_bigpage); 279 mutex_init(&fb->tags.mutex); 280 281 if (func->sysmem.flush_page_init) { 282 fb->sysmem.flush_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 283 if (!fb->sysmem.flush_page) 284 return -ENOMEM; 285 286 fb->sysmem.flush_page_addr = dma_map_page(device->dev, fb->sysmem.flush_page, 287 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 288 if (dma_mapping_error(device->dev, fb->sysmem.flush_page_addr)) 289 return -EFAULT; 290 } 291 292 return 0; 293} 294 295int 296nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device, 297 enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) 298{ 299 if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL))) 300 return -ENOMEM; 301 return nvkm_fb_ctor(func, device, type, inst, *pfb); 302} 303