1/* 2 * Copyright (c) Intel Corp. 2007. 3 * All Rights Reserved. 4 * 5 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to 6 * develop this driver. 7 * 8 * This file is part of the Vermilion Range fb driver. 9 * The Vermilion Range fb driver is free software; 10 * you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * The Vermilion Range fb driver is distributed 16 * in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this driver; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 * 25 * Authors: 26 * Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com> 27 * Michel D�nzer <michel-at-tungstengraphics-dot-com> 28 * Alan Hourihane <alanh-at-tungstengraphics-dot-com> 29 */ 30 31#include <linux/module.h> 32#include <linux/kernel.h> 33#include <linux/errno.h> 34#include <linux/string.h> 35#include <linux/delay.h> 36#include <linux/mm.h> 37#include <linux/fb.h> 38#include <linux/pci.h> 39#include <asm/cacheflush.h> 40#include <asm/tlbflush.h> 41#include <linux/mmzone.h> 42#include <asm/uaccess.h> 43 44/* #define VERMILION_DEBUG */ 45 46#include "vermilion.h" 47 48#define MODULE_NAME "vmlfb" 49 50#define VML_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16) 51 52static struct mutex vml_mutex; 53static struct list_head global_no_mode; 54static struct list_head global_has_mode; 55static struct fb_ops vmlfb_ops; 56static struct vml_sys *subsys = NULL; 57static char *vml_default_mode = "1024x768@60"; 58static struct fb_videomode defaultmode = { 59 NULL, 60, 1024, 768, 12896, 144, 24, 29, 3, 136, 6, 60 0, FB_VMODE_NONINTERLACED 61}; 62 63static u32 vml_mem_requested = (10 * 1024 * 1024); 64static u32 vml_mem_contig = (4 * 1024 * 1024); 65static u32 vml_mem_min = (4 * 1024 * 1024); 66 67static u32 vml_clocks[] = { 68 6750, 69 13500, 70 27000, 71 29700, 72 37125, 73 54000, 74 59400, 75 74250, 76 120000, 77 148500 78}; 79 80static u32 vml_num_clocks = ARRAY_SIZE(vml_clocks); 81 82/* 83 * Allocate a contiguous vram area and make its linear kernel map 84 * uncached. 85 */ 86 87static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order, 88 unsigned min_order) 89{ 90 gfp_t flags; 91 unsigned long i; 92 pgprot_t wc_pageprot; 93 94 wc_pageprot = PAGE_KERNEL_NOCACHE; 95 max_order++; 96 do { 97 /* 98 * Really try hard to get the needed memory. 99 * We need memory below the first 32MB, so we 100 * add the __GFP_DMA flag that guarantees that we are 101 * below the first 16MB. 102 */ 103 104 flags = __GFP_DMA | __GFP_HIGH; 105 va->logical = 106 __get_free_pages(flags, --max_order); 107 } while (va->logical == 0 && max_order > min_order); 108 109 if (!va->logical) 110 return -ENOMEM; 111 112 va->phys = virt_to_phys((void *)va->logical); 113 va->size = PAGE_SIZE << max_order; 114 va->order = max_order; 115 116 /* 117 * It seems like __get_free_pages only ups the usage count 118 * of the first page. This doesn't work with nopage mapping, so 119 * up the usage count once more. 120 */ 121 122 memset((void *)va->logical, 0x00, va->size); 123 for (i = va->logical; i < va->logical + va->size; i += PAGE_SIZE) { 124 get_page(virt_to_page(i)); 125 } 126 127 /* 128 * Change caching policy of the linear kernel map to avoid 129 * mapping type conflicts with user-space mappings. 130 * The first global_flush_tlb() is really only there to do a global 131 * wbinvd(). 132 */ 133 134 global_flush_tlb(); 135 change_page_attr(virt_to_page(va->logical), va->size >> PAGE_SHIFT, 136 wc_pageprot); 137 global_flush_tlb(); 138 139 printk(KERN_DEBUG MODULE_NAME 140 ": Allocated %ld bytes vram area at 0x%08lx\n", 141 va->size, va->phys); 142 143 return 0; 144} 145 146/* 147 * Free a contiguous vram area and reset its linear kernel map 148 * mapping type. 149 */ 150 151static void vmlfb_free_vram_area(struct vram_area *va) 152{ 153 unsigned long j; 154 155 if (va->logical) { 156 157 /* 158 * Reset the linear kernel map caching policy. 159 */ 160 161 change_page_attr(virt_to_page(va->logical), 162 va->size >> PAGE_SHIFT, PAGE_KERNEL); 163 global_flush_tlb(); 164 165 /* 166 * Decrease the usage count on the pages we've used 167 * to compensate for upping when allocating. 168 */ 169 170 for (j = va->logical; j < va->logical + va->size; 171 j += PAGE_SIZE) { 172 (void)put_page_testzero(virt_to_page(j)); 173 } 174 175 printk(KERN_DEBUG MODULE_NAME 176 ": Freeing %ld bytes vram area at 0x%08lx\n", 177 va->size, va->phys); 178 free_pages(va->logical, va->order); 179 180 va->logical = 0; 181 } 182} 183 184/* 185 * Free allocated vram. 186 */ 187 188static void vmlfb_free_vram(struct vml_info *vinfo) 189{ 190 int i; 191 192 for (i = 0; i < vinfo->num_areas; ++i) { 193 vmlfb_free_vram_area(&vinfo->vram[i]); 194 } 195 vinfo->num_areas = 0; 196} 197 198/* 199 * Allocate vram. Currently we try to allocate contiguous areas from the 200 * __GFP_DMA zone and puzzle them together. A better approach would be to 201 * allocate one contiguous area for scanout and use one-page allocations for 202 * offscreen areas. This requires user-space and GPU virtual mappings. 203 */ 204 205static int vmlfb_alloc_vram(struct vml_info *vinfo, 206 size_t requested, 207 size_t min_total, size_t min_contig) 208{ 209 int i, j; 210 int order; 211 int contiguous; 212 int err; 213 struct vram_area *va; 214 struct vram_area *va2; 215 216 vinfo->num_areas = 0; 217 for (i = 0; i < VML_VRAM_AREAS; ++i) { 218 va = &vinfo->vram[i]; 219 order = 0; 220 221 while (requested > (PAGE_SIZE << order) && order < MAX_ORDER) 222 order++; 223 224 err = vmlfb_alloc_vram_area(va, order, 0); 225 226 if (err) 227 break; 228 229 if (i == 0) { 230 vinfo->vram_start = va->phys; 231 vinfo->vram_logical = (void __iomem *) va->logical; 232 vinfo->vram_contig_size = va->size; 233 vinfo->num_areas = 1; 234 } else { 235 contiguous = 0; 236 237 for (j = 0; j < i; ++j) { 238 va2 = &vinfo->vram[j]; 239 if (va->phys + va->size == va2->phys || 240 va2->phys + va2->size == va->phys) { 241 contiguous = 1; 242 break; 243 } 244 } 245 246 if (contiguous) { 247 vinfo->num_areas++; 248 if (va->phys < vinfo->vram_start) { 249 vinfo->vram_start = va->phys; 250 vinfo->vram_logical = 251 (void __iomem *)va->logical; 252 } 253 vinfo->vram_contig_size += va->size; 254 } else { 255 vmlfb_free_vram_area(va); 256 break; 257 } 258 } 259 260 if (requested < va->size) 261 break; 262 else 263 requested -= va->size; 264 } 265 266 if (vinfo->vram_contig_size > min_total && 267 vinfo->vram_contig_size > min_contig) { 268 269 printk(KERN_DEBUG MODULE_NAME 270 ": Contiguous vram: %ld bytes at physical 0x%08lx.\n", 271 (unsigned long)vinfo->vram_contig_size, 272 (unsigned long)vinfo->vram_start); 273 274 return 0; 275 } 276 277 printk(KERN_ERR MODULE_NAME 278 ": Could not allocate requested minimal amount of vram.\n"); 279 280 vmlfb_free_vram(vinfo); 281 282 return -ENOMEM; 283} 284 285/* 286 * Find the GPU to use with our display controller. 287 */ 288 289static int vmlfb_get_gpu(struct vml_par *par) 290{ 291 mutex_lock(&vml_mutex); 292 293 par->gpu = pci_get_device(PCI_VENDOR_ID_INTEL, VML_DEVICE_GPU, NULL); 294 295 if (!par->gpu) { 296 mutex_unlock(&vml_mutex); 297 return -ENODEV; 298 } 299 300 mutex_unlock(&vml_mutex); 301 302 if (pci_enable_device(par->gpu) < 0) 303 return -ENODEV; 304 305 return 0; 306} 307 308/* 309 * Find a contiguous vram area that contains a given offset from vram start. 310 */ 311static int vmlfb_vram_offset(struct vml_info *vinfo, unsigned long offset) 312{ 313 unsigned long aoffset; 314 unsigned i; 315 316 for (i = 0; i < vinfo->num_areas; ++i) { 317 aoffset = offset - (vinfo->vram[i].phys - vinfo->vram_start); 318 319 if (aoffset < vinfo->vram[i].size) { 320 return 0; 321 } 322 } 323 324 return -EINVAL; 325} 326 327/* 328 * Remap the MMIO register spaces of the VDC and the GPU. 329 */ 330 331static int vmlfb_enable_mmio(struct vml_par *par) 332{ 333 int err; 334 335 par->vdc_mem_base = pci_resource_start(par->vdc, 0); 336 par->vdc_mem_size = pci_resource_len(par->vdc, 0); 337 if (!request_mem_region(par->vdc_mem_base, par->vdc_mem_size, "vmlfb")) { 338 printk(KERN_ERR MODULE_NAME 339 ": Could not claim display controller MMIO.\n"); 340 return -EBUSY; 341 } 342 par->vdc_mem = ioremap_nocache(par->vdc_mem_base, par->vdc_mem_size); 343 if (par->vdc_mem == NULL) { 344 printk(KERN_ERR MODULE_NAME 345 ": Could not map display controller MMIO.\n"); 346 err = -ENOMEM; 347 goto out_err_0; 348 } 349 350 par->gpu_mem_base = pci_resource_start(par->gpu, 0); 351 par->gpu_mem_size = pci_resource_len(par->gpu, 0); 352 if (!request_mem_region(par->gpu_mem_base, par->gpu_mem_size, "vmlfb")) { 353 printk(KERN_ERR MODULE_NAME ": Could not claim GPU MMIO.\n"); 354 err = -EBUSY; 355 goto out_err_1; 356 } 357 par->gpu_mem = ioremap_nocache(par->gpu_mem_base, par->gpu_mem_size); 358 if (par->gpu_mem == NULL) { 359 printk(KERN_ERR MODULE_NAME ": Could not map GPU MMIO.\n"); 360 err = -ENOMEM; 361 goto out_err_2; 362 } 363 364 return 0; 365 366out_err_2: 367 release_mem_region(par->gpu_mem_base, par->gpu_mem_size); 368out_err_1: 369 iounmap(par->vdc_mem); 370out_err_0: 371 release_mem_region(par->vdc_mem_base, par->vdc_mem_size); 372 return err; 373} 374 375/* 376 * Unmap the VDC and GPU register spaces. 377 */ 378 379static void vmlfb_disable_mmio(struct vml_par *par) 380{ 381 iounmap(par->gpu_mem); 382 release_mem_region(par->gpu_mem_base, par->gpu_mem_size); 383 iounmap(par->vdc_mem); 384 release_mem_region(par->vdc_mem_base, par->vdc_mem_size); 385} 386 387/* 388 * Release and uninit the VDC and GPU. 389 */ 390 391static void vmlfb_release_devices(struct vml_par *par) 392{ 393 if (atomic_dec_and_test(&par->refcount)) { 394 pci_set_drvdata(par->vdc, NULL); 395 pci_disable_device(par->gpu); 396 pci_disable_device(par->vdc); 397 } 398} 399 400/* 401 * Free up allocated resources for a device. 402 */ 403 404static void __devexit vml_pci_remove(struct pci_dev *dev) 405{ 406 struct fb_info *info; 407 struct vml_info *vinfo; 408 struct vml_par *par; 409 410 info = pci_get_drvdata(dev); 411 if (info) { 412 vinfo = container_of(info, struct vml_info, info); 413 par = vinfo->par; 414 mutex_lock(&vml_mutex); 415 unregister_framebuffer(info); 416 fb_dealloc_cmap(&info->cmap); 417 vmlfb_free_vram(vinfo); 418 vmlfb_disable_mmio(par); 419 vmlfb_release_devices(par); 420 kfree(vinfo); 421 kfree(par); 422 mutex_unlock(&vml_mutex); 423 } 424} 425 426static void vmlfb_set_pref_pixel_format(struct fb_var_screeninfo *var) 427{ 428 switch (var->bits_per_pixel) { 429 case 16: 430 var->blue.offset = 0; 431 var->blue.length = 5; 432 var->green.offset = 5; 433 var->green.length = 5; 434 var->red.offset = 10; 435 var->red.length = 5; 436 var->transp.offset = 15; 437 var->transp.length = 1; 438 break; 439 case 32: 440 var->blue.offset = 0; 441 var->blue.length = 8; 442 var->green.offset = 8; 443 var->green.length = 8; 444 var->red.offset = 16; 445 var->red.length = 8; 446 var->transp.offset = 24; 447 var->transp.length = 0; 448 break; 449 default: 450 break; 451 } 452 453 var->blue.msb_right = var->green.msb_right = 454 var->red.msb_right = var->transp.msb_right = 0; 455} 456 457/* 458 * Device initialization. 459 * We initialize one vml_par struct per device and one vml_info 460 * struct per pipe. Currently we have only one pipe. 461 */ 462 463static int __devinit vml_pci_probe(struct pci_dev *dev, 464 const struct pci_device_id *id) 465{ 466 struct vml_info *vinfo; 467 struct fb_info *info; 468 struct vml_par *par; 469 int err = 0; 470 471 par = kzalloc(sizeof(*par), GFP_KERNEL); 472 if (par == NULL) 473 return -ENOMEM; 474 475 vinfo = kzalloc(sizeof(*vinfo), GFP_KERNEL); 476 if (vinfo == NULL) { 477 err = -ENOMEM; 478 goto out_err_0; 479 } 480 481 vinfo->par = par; 482 par->vdc = dev; 483 atomic_set(&par->refcount, 1); 484 485 switch (id->device) { 486 case VML_DEVICE_VDC: 487 if ((err = vmlfb_get_gpu(par))) 488 goto out_err_1; 489 pci_set_drvdata(dev, &vinfo->info); 490 break; 491 default: 492 err = -ENODEV; 493 goto out_err_1; 494 break; 495 } 496 497 info = &vinfo->info; 498 info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK; 499 500 err = vmlfb_enable_mmio(par); 501 if (err) 502 goto out_err_2; 503 504 err = vmlfb_alloc_vram(vinfo, vml_mem_requested, 505 vml_mem_contig, vml_mem_min); 506 if (err) 507 goto out_err_3; 508 509 strcpy(info->fix.id, "Vermilion Range"); 510 info->fix.mmio_start = 0; 511 info->fix.mmio_len = 0; 512 info->fix.smem_start = vinfo->vram_start; 513 info->fix.smem_len = vinfo->vram_contig_size; 514 info->fix.type = FB_TYPE_PACKED_PIXELS; 515 info->fix.visual = FB_VISUAL_TRUECOLOR; 516 info->fix.ypanstep = 1; 517 info->fix.xpanstep = 1; 518 info->fix.ywrapstep = 0; 519 info->fix.accel = FB_ACCEL_NONE; 520 info->screen_base = vinfo->vram_logical; 521 info->pseudo_palette = vinfo->pseudo_palette; 522 info->par = par; 523 info->fbops = &vmlfb_ops; 524 info->device = &dev->dev; 525 526 INIT_LIST_HEAD(&vinfo->head); 527 vinfo->pipe_disabled = 1; 528 vinfo->cur_blank_mode = FB_BLANK_UNBLANK; 529 530 info->var.grayscale = 0; 531 info->var.bits_per_pixel = 16; 532 vmlfb_set_pref_pixel_format(&info->var); 533 534 if (!fb_find_mode 535 (&info->var, info, vml_default_mode, NULL, 0, &defaultmode, 16)) { 536 printk(KERN_ERR MODULE_NAME ": Could not find initial mode\n"); 537 } 538 539 if (fb_alloc_cmap(&info->cmap, 256, 1) < 0) { 540 err = -ENOMEM; 541 goto out_err_4; 542 } 543 544 err = register_framebuffer(info); 545 if (err) { 546 printk(KERN_ERR MODULE_NAME ": Register framebuffer error.\n"); 547 goto out_err_5; 548 } 549 550 printk("Initialized vmlfb\n"); 551 552 return 0; 553 554out_err_5: 555 fb_dealloc_cmap(&info->cmap); 556out_err_4: 557 vmlfb_free_vram(vinfo); 558out_err_3: 559 vmlfb_disable_mmio(par); 560out_err_2: 561 vmlfb_release_devices(par); 562out_err_1: 563 kfree(vinfo); 564out_err_0: 565 kfree(par); 566 return err; 567} 568 569static int vmlfb_open(struct fb_info *info, int user) 570{ 571 /* 572 * Save registers here? 573 */ 574 return 0; 575} 576 577static int vmlfb_release(struct fb_info *info, int user) 578{ 579 /* 580 * Restore registers here. 581 */ 582 583 return 0; 584} 585 586static int vml_nearest_clock(int clock) 587{ 588 589 int i; 590 int cur_index; 591 int cur_diff; 592 int diff; 593 594 cur_index = 0; 595 cur_diff = clock - vml_clocks[0]; 596 cur_diff = (cur_diff < 0) ? -cur_diff : cur_diff; 597 for (i = 1; i < vml_num_clocks; ++i) { 598 diff = clock - vml_clocks[i]; 599 diff = (diff < 0) ? -diff : diff; 600 if (diff < cur_diff) { 601 cur_index = i; 602 cur_diff = diff; 603 } 604 } 605 return vml_clocks[cur_index]; 606} 607 608static int vmlfb_check_var_locked(struct fb_var_screeninfo *var, 609 struct vml_info *vinfo) 610{ 611 u32 pitch; 612 u64 mem; 613 int nearest_clock; 614 int clock; 615 int clock_diff; 616 struct fb_var_screeninfo v; 617 618 v = *var; 619 clock = PICOS2KHZ(var->pixclock); 620 621 if (subsys && subsys->nearest_clock) { 622 nearest_clock = subsys->nearest_clock(subsys, clock); 623 } else { 624 nearest_clock = vml_nearest_clock(clock); 625 } 626 627 /* 628 * Accept a 20% diff. 629 */ 630 631 clock_diff = nearest_clock - clock; 632 clock_diff = (clock_diff < 0) ? -clock_diff : clock_diff; 633 if (clock_diff > clock / 5) { 634 return -EINVAL; 635 } 636 637 v.pixclock = KHZ2PICOS(nearest_clock); 638 639 if (var->xres > VML_MAX_XRES || var->yres > VML_MAX_YRES) { 640 printk(KERN_DEBUG MODULE_NAME ": Resolution failure.\n"); 641 return -EINVAL; 642 } 643 if (var->xres_virtual > VML_MAX_XRES_VIRTUAL) { 644 printk(KERN_DEBUG MODULE_NAME 645 ": Virtual resolution failure.\n"); 646 return -EINVAL; 647 } 648 switch (v.bits_per_pixel) { 649 case 0 ... 16: 650 v.bits_per_pixel = 16; 651 break; 652 case 17 ... 32: 653 v.bits_per_pixel = 32; 654 break; 655 default: 656 printk(KERN_DEBUG MODULE_NAME ": Invalid bpp: %d.\n", 657 var->bits_per_pixel); 658 return -EINVAL; 659 } 660 661 pitch = __ALIGN_MASK((var->xres * var->bits_per_pixel) >> 3, 0x3F); 662 mem = pitch * var->yres_virtual; 663 if (mem > vinfo->vram_contig_size) { 664 return -ENOMEM; 665 } 666 667 switch (v.bits_per_pixel) { 668 case 16: 669 if (var->blue.offset != 0 || 670 var->blue.length != 5 || 671 var->green.offset != 5 || 672 var->green.length != 5 || 673 var->red.offset != 10 || 674 var->red.length != 5 || 675 var->transp.offset != 15 || var->transp.length != 1) { 676 vmlfb_set_pref_pixel_format(&v); 677 } 678 break; 679 case 32: 680 if (var->blue.offset != 0 || 681 var->blue.length != 8 || 682 var->green.offset != 8 || 683 var->green.length != 8 || 684 var->red.offset != 16 || 685 var->red.length != 8 || 686 (var->transp.length != 0 && var->transp.length != 8) || 687 (var->transp.length == 8 && var->transp.offset != 24)) { 688 vmlfb_set_pref_pixel_format(&v); 689 } 690 break; 691 default: 692 return -EINVAL; 693 } 694 695 *var = v; 696 697 return 0; 698} 699 700static int vmlfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) 701{ 702 struct vml_info *vinfo = container_of(info, struct vml_info, info); 703 int ret; 704 705 mutex_lock(&vml_mutex); 706 ret = vmlfb_check_var_locked(var, vinfo); 707 mutex_unlock(&vml_mutex); 708 709 return ret; 710} 711 712static void vml_wait_vblank(struct vml_info *vinfo) 713{ 714 /* Wait for vblank. For now, just wait for a 50Hz cycle (20ms)) */ 715 mdelay(20); 716} 717 718static void vmlfb_disable_pipe(struct vml_info *vinfo) 719{ 720 struct vml_par *par = vinfo->par; 721 722 /* Disable the MDVO pad */ 723 VML_WRITE32(par, VML_RCOMPSTAT, 0); 724 while (!(VML_READ32(par, VML_RCOMPSTAT) & VML_MDVO_VDC_I_RCOMP)) ; 725 726 /* Disable display planes */ 727 VML_WRITE32(par, VML_DSPCCNTR, 728 VML_READ32(par, VML_DSPCCNTR) & ~VML_GFX_ENABLE); 729 (void)VML_READ32(par, VML_DSPCCNTR); 730 /* Wait for vblank for the disable to take effect */ 731 vml_wait_vblank(vinfo); 732 733 /* Next, disable display pipes */ 734 VML_WRITE32(par, VML_PIPEACONF, 0); 735 (void)VML_READ32(par, VML_PIPEACONF); 736 737 vinfo->pipe_disabled = 1; 738} 739 740#ifdef VERMILION_DEBUG 741static void vml_dump_regs(struct vml_info *vinfo) 742{ 743 struct vml_par *par = vinfo->par; 744 745 printk(KERN_DEBUG MODULE_NAME ": Modesetting register dump:\n"); 746 printk(KERN_DEBUG MODULE_NAME ": \tHTOTAL_A : 0x%08x\n", 747 (unsigned)VML_READ32(par, VML_HTOTAL_A)); 748 printk(KERN_DEBUG MODULE_NAME ": \tHBLANK_A : 0x%08x\n", 749 (unsigned)VML_READ32(par, VML_HBLANK_A)); 750 printk(KERN_DEBUG MODULE_NAME ": \tHSYNC_A : 0x%08x\n", 751 (unsigned)VML_READ32(par, VML_HSYNC_A)); 752 printk(KERN_DEBUG MODULE_NAME ": \tVTOTAL_A : 0x%08x\n", 753 (unsigned)VML_READ32(par, VML_VTOTAL_A)); 754 printk(KERN_DEBUG MODULE_NAME ": \tVBLANK_A : 0x%08x\n", 755 (unsigned)VML_READ32(par, VML_VBLANK_A)); 756 printk(KERN_DEBUG MODULE_NAME ": \tVSYNC_A : 0x%08x\n", 757 (unsigned)VML_READ32(par, VML_VSYNC_A)); 758 printk(KERN_DEBUG MODULE_NAME ": \tDSPCSTRIDE : 0x%08x\n", 759 (unsigned)VML_READ32(par, VML_DSPCSTRIDE)); 760 printk(KERN_DEBUG MODULE_NAME ": \tDSPCSIZE : 0x%08x\n", 761 (unsigned)VML_READ32(par, VML_DSPCSIZE)); 762 printk(KERN_DEBUG MODULE_NAME ": \tDSPCPOS : 0x%08x\n", 763 (unsigned)VML_READ32(par, VML_DSPCPOS)); 764 printk(KERN_DEBUG MODULE_NAME ": \tDSPARB : 0x%08x\n", 765 (unsigned)VML_READ32(par, VML_DSPARB)); 766 printk(KERN_DEBUG MODULE_NAME ": \tDSPCADDR : 0x%08x\n", 767 (unsigned)VML_READ32(par, VML_DSPCADDR)); 768 printk(KERN_DEBUG MODULE_NAME ": \tBCLRPAT_A : 0x%08x\n", 769 (unsigned)VML_READ32(par, VML_BCLRPAT_A)); 770 printk(KERN_DEBUG MODULE_NAME ": \tCANVSCLR_A : 0x%08x\n", 771 (unsigned)VML_READ32(par, VML_CANVSCLR_A)); 772 printk(KERN_DEBUG MODULE_NAME ": \tPIPEASRC : 0x%08x\n", 773 (unsigned)VML_READ32(par, VML_PIPEASRC)); 774 printk(KERN_DEBUG MODULE_NAME ": \tPIPEACONF : 0x%08x\n", 775 (unsigned)VML_READ32(par, VML_PIPEACONF)); 776 printk(KERN_DEBUG MODULE_NAME ": \tDSPCCNTR : 0x%08x\n", 777 (unsigned)VML_READ32(par, VML_DSPCCNTR)); 778 printk(KERN_DEBUG MODULE_NAME ": \tRCOMPSTAT : 0x%08x\n", 779 (unsigned)VML_READ32(par, VML_RCOMPSTAT)); 780 printk(KERN_DEBUG MODULE_NAME ": End of modesetting register dump.\n"); 781} 782#endif 783 784static int vmlfb_set_par_locked(struct vml_info *vinfo) 785{ 786 struct vml_par *par = vinfo->par; 787 struct fb_info *info = &vinfo->info; 788 struct fb_var_screeninfo *var = &info->var; 789 u32 htotal, hactive, hblank_start, hblank_end, hsync_start, hsync_end; 790 u32 vtotal, vactive, vblank_start, vblank_end, vsync_start, vsync_end; 791 u32 dspcntr; 792 int clock; 793 794 vinfo->bytes_per_pixel = var->bits_per_pixel >> 3; 795 vinfo->stride = 796 __ALIGN_MASK(var->xres_virtual * vinfo->bytes_per_pixel, 0x3F); 797 info->fix.line_length = vinfo->stride; 798 799 if (!subsys) 800 return 0; 801 802 htotal = 803 var->xres + var->right_margin + var->hsync_len + var->left_margin; 804 hactive = var->xres; 805 hblank_start = var->xres; 806 hblank_end = htotal; 807 hsync_start = hactive + var->right_margin; 808 hsync_end = hsync_start + var->hsync_len; 809 810 vtotal = 811 var->yres + var->lower_margin + var->vsync_len + var->upper_margin; 812 vactive = var->yres; 813 vblank_start = var->yres; 814 vblank_end = vtotal; 815 vsync_start = vactive + var->lower_margin; 816 vsync_end = vsync_start + var->vsync_len; 817 818 dspcntr = VML_GFX_ENABLE | VML_GFX_GAMMABYPASS; 819 clock = PICOS2KHZ(var->pixclock); 820 821 if (subsys->nearest_clock) { 822 clock = subsys->nearest_clock(subsys, clock); 823 } else { 824 clock = vml_nearest_clock(clock); 825 } 826 printk(KERN_DEBUG MODULE_NAME 827 ": Set mode Hfreq : %d kHz, Vfreq : %d Hz.\n", clock / htotal, 828 ((clock / htotal) * 1000) / vtotal); 829 830 switch (var->bits_per_pixel) { 831 case 16: 832 dspcntr |= VML_GFX_ARGB1555; 833 break; 834 case 32: 835 if (var->transp.length == 8) 836 dspcntr |= VML_GFX_ARGB8888 | VML_GFX_ALPHAMULT; 837 else 838 dspcntr |= VML_GFX_RGB0888; 839 break; 840 default: 841 return -EINVAL; 842 } 843 844 vmlfb_disable_pipe(vinfo); 845 mb(); 846 847 if (subsys->set_clock) 848 subsys->set_clock(subsys, clock); 849 else 850 return -EINVAL; 851 852 VML_WRITE32(par, VML_HTOTAL_A, ((htotal - 1) << 16) | (hactive - 1)); 853 VML_WRITE32(par, VML_HBLANK_A, 854 ((hblank_end - 1) << 16) | (hblank_start - 1)); 855 VML_WRITE32(par, VML_HSYNC_A, 856 ((hsync_end - 1) << 16) | (hsync_start - 1)); 857 VML_WRITE32(par, VML_VTOTAL_A, ((vtotal - 1) << 16) | (vactive - 1)); 858 VML_WRITE32(par, VML_VBLANK_A, 859 ((vblank_end - 1) << 16) | (vblank_start - 1)); 860 VML_WRITE32(par, VML_VSYNC_A, 861 ((vsync_end - 1) << 16) | (vsync_start - 1)); 862 VML_WRITE32(par, VML_DSPCSTRIDE, vinfo->stride); 863 VML_WRITE32(par, VML_DSPCSIZE, 864 ((var->yres - 1) << 16) | (var->xres - 1)); 865 VML_WRITE32(par, VML_DSPCPOS, 0x00000000); 866 VML_WRITE32(par, VML_DSPARB, VML_FIFO_DEFAULT); 867 VML_WRITE32(par, VML_BCLRPAT_A, 0x00000000); 868 VML_WRITE32(par, VML_CANVSCLR_A, 0x00000000); 869 VML_WRITE32(par, VML_PIPEASRC, 870 ((var->xres - 1) << 16) | (var->yres - 1)); 871 872 wmb(); 873 VML_WRITE32(par, VML_PIPEACONF, VML_PIPE_ENABLE); 874 wmb(); 875 VML_WRITE32(par, VML_DSPCCNTR, dspcntr); 876 wmb(); 877 VML_WRITE32(par, VML_DSPCADDR, (u32) vinfo->vram_start + 878 var->yoffset * vinfo->stride + 879 var->xoffset * vinfo->bytes_per_pixel); 880 881 VML_WRITE32(par, VML_RCOMPSTAT, VML_MDVO_PAD_ENABLE); 882 883 while (!(VML_READ32(par, VML_RCOMPSTAT) & 884 (VML_MDVO_VDC_I_RCOMP | VML_MDVO_PAD_ENABLE))) ; 885 886 vinfo->pipe_disabled = 0; 887#ifdef VERMILION_DEBUG 888 vml_dump_regs(vinfo); 889#endif 890 891 return 0; 892} 893 894static int vmlfb_set_par(struct fb_info *info) 895{ 896 struct vml_info *vinfo = container_of(info, struct vml_info, info); 897 int ret; 898 899 mutex_lock(&vml_mutex); 900 list_del(&vinfo->head); 901 list_add(&vinfo->head, (subsys) ? &global_has_mode : &global_no_mode); 902 ret = vmlfb_set_par_locked(vinfo); 903 904 mutex_unlock(&vml_mutex); 905 return ret; 906} 907 908static int vmlfb_blank_locked(struct vml_info *vinfo) 909{ 910 struct vml_par *par = vinfo->par; 911 u32 cur = VML_READ32(par, VML_PIPEACONF); 912 913 switch (vinfo->cur_blank_mode) { 914 case FB_BLANK_UNBLANK: 915 if (vinfo->pipe_disabled) { 916 vmlfb_set_par_locked(vinfo); 917 } 918 VML_WRITE32(par, VML_PIPEACONF, cur & ~VML_PIPE_FORCE_BORDER); 919 (void)VML_READ32(par, VML_PIPEACONF); 920 break; 921 case FB_BLANK_NORMAL: 922 if (vinfo->pipe_disabled) { 923 vmlfb_set_par_locked(vinfo); 924 } 925 VML_WRITE32(par, VML_PIPEACONF, cur | VML_PIPE_FORCE_BORDER); 926 (void)VML_READ32(par, VML_PIPEACONF); 927 break; 928 case FB_BLANK_VSYNC_SUSPEND: 929 case FB_BLANK_HSYNC_SUSPEND: 930 if (!vinfo->pipe_disabled) { 931 vmlfb_disable_pipe(vinfo); 932 } 933 break; 934 case FB_BLANK_POWERDOWN: 935 if (!vinfo->pipe_disabled) { 936 vmlfb_disable_pipe(vinfo); 937 } 938 break; 939 default: 940 return -EINVAL; 941 } 942 943 return 0; 944} 945 946static int vmlfb_blank(int blank_mode, struct fb_info *info) 947{ 948 struct vml_info *vinfo = container_of(info, struct vml_info, info); 949 int ret; 950 951 mutex_lock(&vml_mutex); 952 vinfo->cur_blank_mode = blank_mode; 953 ret = vmlfb_blank_locked(vinfo); 954 mutex_unlock(&vml_mutex); 955 return ret; 956} 957 958static int vmlfb_pan_display(struct fb_var_screeninfo *var, 959 struct fb_info *info) 960{ 961 struct vml_info *vinfo = container_of(info, struct vml_info, info); 962 struct vml_par *par = vinfo->par; 963 964 mutex_lock(&vml_mutex); 965 VML_WRITE32(par, VML_DSPCADDR, (u32) vinfo->vram_start + 966 var->yoffset * vinfo->stride + 967 var->xoffset * vinfo->bytes_per_pixel); 968 (void)VML_READ32(par, VML_DSPCADDR); 969 mutex_unlock(&vml_mutex); 970 971 return 0; 972} 973 974static int vmlfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, 975 u_int transp, struct fb_info *info) 976{ 977 u32 v; 978 979 if (regno >= 16) 980 return -EINVAL; 981 982 if (info->var.grayscale) { 983 red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8; 984 } 985 986 if (info->fix.visual != FB_VISUAL_TRUECOLOR) 987 return -EINVAL; 988 989 red = VML_TOHW(red, info->var.red.length); 990 blue = VML_TOHW(blue, info->var.blue.length); 991 green = VML_TOHW(green, info->var.green.length); 992 transp = VML_TOHW(transp, info->var.transp.length); 993 994 v = (red << info->var.red.offset) | 995 (green << info->var.green.offset) | 996 (blue << info->var.blue.offset) | 997 (transp << info->var.transp.offset); 998 999 switch (info->var.bits_per_pixel) { 1000 case 16: 1001 ((u32 *) info->pseudo_palette)[regno] = v; 1002 break; 1003 case 24: 1004 case 32: 1005 ((u32 *) info->pseudo_palette)[regno] = v; 1006 break; 1007 } 1008 return 0; 1009} 1010 1011static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma) 1012{ 1013 struct vml_info *vinfo = container_of(info, struct vml_info, info); 1014 unsigned long size = vma->vm_end - vma->vm_start; 1015 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 1016 int ret; 1017 1018 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) 1019 return -EINVAL; 1020 if (offset + size > vinfo->vram_contig_size) 1021 return -EINVAL; 1022 ret = vmlfb_vram_offset(vinfo, offset); 1023 if (ret) 1024 return -EINVAL; 1025 offset += vinfo->vram_start; 1026 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; 1027 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; 1028 vma->vm_flags |= VM_RESERVED | VM_IO; 1029 if (remap_pfn_range(vma, vma->vm_start, offset >> PAGE_SHIFT, 1030 size, vma->vm_page_prot)) 1031 return -EAGAIN; 1032 return 0; 1033} 1034 1035static int vmlfb_sync(struct fb_info *info) 1036{ 1037 return 0; 1038} 1039 1040static int vmlfb_cursor(struct fb_info *info, struct fb_cursor *cursor) 1041{ 1042 return -EINVAL; /* just to force soft_cursor() call */ 1043} 1044 1045static struct fb_ops vmlfb_ops = { 1046 .owner = THIS_MODULE, 1047 .fb_open = vmlfb_open, 1048 .fb_release = vmlfb_release, 1049 .fb_check_var = vmlfb_check_var, 1050 .fb_set_par = vmlfb_set_par, 1051 .fb_blank = vmlfb_blank, 1052 .fb_pan_display = vmlfb_pan_display, 1053 .fb_fillrect = cfb_fillrect, 1054 .fb_copyarea = cfb_copyarea, 1055 .fb_imageblit = cfb_imageblit, 1056 .fb_cursor = vmlfb_cursor, 1057 .fb_sync = vmlfb_sync, 1058 .fb_mmap = vmlfb_mmap, 1059 .fb_setcolreg = vmlfb_setcolreg 1060}; 1061 1062static struct pci_device_id vml_ids[] = { 1063 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, VML_DEVICE_VDC)}, 1064 {0} 1065}; 1066 1067static struct pci_driver vmlfb_pci_driver = { 1068 .name = "vmlfb", 1069 .id_table = vml_ids, 1070 .probe = vml_pci_probe, 1071 .remove = __devexit_p(vml_pci_remove) 1072}; 1073 1074static void __exit vmlfb_cleanup(void) 1075{ 1076 pci_unregister_driver(&vmlfb_pci_driver); 1077} 1078 1079static int __init vmlfb_init(void) 1080{ 1081 1082#ifndef MODULE 1083 char *option = NULL; 1084 1085 if (fb_get_options(MODULE_NAME, &option)) 1086 return -ENODEV; 1087#endif 1088 1089 printk(KERN_DEBUG MODULE_NAME ": initializing\n"); 1090 mutex_init(&vml_mutex); 1091 INIT_LIST_HEAD(&global_no_mode); 1092 INIT_LIST_HEAD(&global_has_mode); 1093 1094 return pci_register_driver(&vmlfb_pci_driver); 1095} 1096 1097int vmlfb_register_subsys(struct vml_sys *sys) 1098{ 1099 struct vml_info *entry; 1100 struct list_head *list; 1101 u32 save_activate; 1102 1103 mutex_lock(&vml_mutex); 1104 if (subsys != NULL) { 1105 subsys->restore(subsys); 1106 } 1107 subsys = sys; 1108 subsys->save(subsys); 1109 1110 /* 1111 * We need to restart list traversal for each item, since we 1112 * release the list mutex in the loop. 1113 */ 1114 1115 list = global_no_mode.next; 1116 while (list != &global_no_mode) { 1117 list_del_init(list); 1118 entry = list_entry(list, struct vml_info, head); 1119 1120 /* 1121 * First, try the current mode which might not be 1122 * completely validated with respect to the pixel clock. 1123 */ 1124 1125 if (!vmlfb_check_var_locked(&entry->info.var, entry)) { 1126 vmlfb_set_par_locked(entry); 1127 list_add_tail(list, &global_has_mode); 1128 } else { 1129 1130 /* 1131 * Didn't work. Try to find another mode, 1132 * that matches this subsys. 1133 */ 1134 1135 mutex_unlock(&vml_mutex); 1136 save_activate = entry->info.var.activate; 1137 entry->info.var.bits_per_pixel = 16; 1138 vmlfb_set_pref_pixel_format(&entry->info.var); 1139 if (fb_find_mode(&entry->info.var, 1140 &entry->info, 1141 vml_default_mode, NULL, 0, NULL, 16)) { 1142 entry->info.var.activate |= 1143 FB_ACTIVATE_FORCE | FB_ACTIVATE_NOW; 1144 fb_set_var(&entry->info, &entry->info.var); 1145 } else { 1146 printk(KERN_ERR MODULE_NAME 1147 ": Sorry. no mode found for this subsys.\n"); 1148 } 1149 entry->info.var.activate = save_activate; 1150 mutex_lock(&vml_mutex); 1151 } 1152 vmlfb_blank_locked(entry); 1153 list = global_no_mode.next; 1154 } 1155 mutex_unlock(&vml_mutex); 1156 1157 printk(KERN_DEBUG MODULE_NAME ": Registered %s subsystem.\n", 1158 subsys->name ? subsys->name : "unknown"); 1159 return 0; 1160} 1161 1162EXPORT_SYMBOL_GPL(vmlfb_register_subsys); 1163 1164void vmlfb_unregister_subsys(struct vml_sys *sys) 1165{ 1166 struct vml_info *entry, *next; 1167 1168 mutex_lock(&vml_mutex); 1169 if (subsys != sys) { 1170 mutex_unlock(&vml_mutex); 1171 return; 1172 } 1173 subsys->restore(subsys); 1174 subsys = NULL; 1175 list_for_each_entry_safe(entry, next, &global_has_mode, head) { 1176 printk(KERN_DEBUG MODULE_NAME ": subsys disable pipe\n"); 1177 vmlfb_disable_pipe(entry); 1178 list_del(&entry->head); 1179 list_add_tail(&entry->head, &global_no_mode); 1180 } 1181 mutex_unlock(&vml_mutex); 1182} 1183 1184EXPORT_SYMBOL_GPL(vmlfb_unregister_subsys); 1185 1186module_init(vmlfb_init); 1187module_exit(vmlfb_cleanup); 1188 1189MODULE_AUTHOR("Tungsten Graphics"); 1190MODULE_DESCRIPTION("Initialization of the Vermilion display devices"); 1191MODULE_VERSION("1.0.0"); 1192MODULE_LICENSE("GPL"); 1193