Deleted Added
full compact
intel_display.c (255013) intel_display.c (277487)
1/*
2 * Copyright �� 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the

--- 11 unchanged lines hidden (view full) ---

20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27#include <sys/cdefs.h>
1/*
2 * Copyright �� 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the

--- 11 unchanged lines hidden (view full) ---

20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/intel_display.c 255013 2013-08-28 23:59:38Z jkim $");
28__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/intel_display.c 277487 2015-01-21 16:10:37Z kib $");
29
30#include <dev/drm2/drmP.h>
31#include <dev/drm2/drm.h>
32#include <dev/drm2/i915/i915_drm.h>
33#include <dev/drm2/i915/i915_drv.h>
34#include <dev/drm2/i915/intel_drv.h>
35#include <dev/drm2/drm_edid.h>
36#include <dev/drm2/drm_dp_helper.h>
37#include <dev/drm2/drm_crtc_helper.h>
29
30#include <dev/drm2/drmP.h>
31#include <dev/drm2/drm.h>
32#include <dev/drm2/i915/i915_drm.h>
33#include <dev/drm2/i915/i915_drv.h>
34#include <dev/drm2/i915/intel_drv.h>
35#include <dev/drm2/drm_edid.h>
36#include <dev/drm2/drm_dp_helper.h>
37#include <dev/drm2/drm_crtc_helper.h>
38#include <sys/kdb.h>
39#include <sys/limits.h>
40
41#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
42
43bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
38#include <sys/limits.h>
39
40#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
41
42bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
44static void intel_update_watermarks(struct drm_device *dev);
45static void intel_increase_pllclock(struct drm_crtc *crtc);
46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
47
48typedef struct {
49 /* given values */
50 int n;
51 int m1, m2;
52 int p1, p2;

--- 299 unchanged lines hidden (view full) ---

352 .m2 = { .min = 5, .max = 9 },
353 .p = { .min = 10, .max = 20 },
354 .p1 = { .min = 1, .max = 2},
355 .p2 = { .dot_limit = 0,
356 .p2_slow = 10, .p2_fast = 10 },
357 .find_pll = intel_find_pll_ironlake_dp,
358};
359
43static void intel_increase_pllclock(struct drm_crtc *crtc);
44static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
45
46typedef struct {
47 /* given values */
48 int n;
49 int m1, m2;
50 int p1, p2;

--- 299 unchanged lines hidden (view full) ---

350 .m2 = { .min = 5, .max = 9 },
351 .p = { .min = 10, .max = 20 },
352 .p1 = { .min = 1, .max = 2},
353 .p2 = { .dot_limit = 0,
354 .p2_slow = 10, .p2_fast = 10 },
355 .find_pll = intel_find_pll_ironlake_dp,
356};
357
358u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
359{
360 u32 val = 0;
361
362 mtx_lock(&dev_priv->dpio_lock);
363 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
364 DRM_ERROR("DPIO idle wait timed out\n");
365 goto out_unlock;
366 }
367
368 I915_WRITE(DPIO_REG, reg);
369 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
370 DPIO_BYTE);
371 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
372 DRM_ERROR("DPIO read wait timed out\n");
373 goto out_unlock;
374 }
375 val = I915_READ(DPIO_DATA);
376
377out_unlock:
378 mtx_unlock(&dev_priv->dpio_lock);
379 return val;
380}
381
382#if 0
383static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
384 u32 val)
385{
386
387 mtx_lock(&dev_priv->dpio_lock);
388 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
389 DRM_ERROR("DPIO idle wait timed out\n");
390 goto out_unlock;
391 }
392
393 I915_WRITE(DPIO_DATA, val);
394 I915_WRITE(DPIO_REG, reg);
395 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
396 DPIO_BYTE);
397 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
398 DRM_ERROR("DPIO write wait timed out\n");
399
400out_unlock:
401 mtx_unlock(&dev_priv->dpio_lock);
402}
403#endif
404
405static void vlv_init_dpio(struct drm_device *dev)
406{
407 struct drm_i915_private *dev_priv = dev->dev_private;
408
409 /* Reset the DPIO config */
410 I915_WRITE(DPIO_CTL, 0);
411 POSTING_READ(DPIO_CTL);
412 I915_WRITE(DPIO_CTL, 1);
413 POSTING_READ(DPIO_CTL);
414}
415
416static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
417{
418 DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
419 return 1;
420}
421
422static const struct dmi_system_id intel_dual_link_lvds[] = {
423 {
424 .callback = intel_dual_link_lvds_callback,
425 .ident = "Apple MacBook Pro (Core i5/i7 Series)",
426 .matches = {
427 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
428 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
429 },
430 },
431 { } /* terminating entry */
432};
433
434static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
435 unsigned int reg)
436{
437 unsigned int val;
438
439 /* use the module option value if specified */
440 if (i915_lvds_channel_mode > 0)
441 return i915_lvds_channel_mode == 2;
442
443 if (dmi_check_system(intel_dual_link_lvds))
444 return true;
445
446 if (dev_priv->lvds_val)
447 val = dev_priv->lvds_val;
448 else {
449 /* BIOS should set the proper LVDS register value at boot, but
450 * in reality, it doesn't set the value when the lid is closed;
451 * we need to check "the value to be set" in VBT when LVDS
452 * register is uninitialized.
453 */
454 val = I915_READ(reg);
455 if (!(val & ~LVDS_DETECTED))
456 val = dev_priv->bios_lvds_val;
457 dev_priv->lvds_val = val;
458 }
459 return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
460}
461
360static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
361 int refclk)
362{
363 struct drm_device *dev = crtc->dev;
364 struct drm_i915_private *dev_priv = dev->dev_private;
365 const intel_limit_t *limit;
366
367 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
462static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
463 int refclk)
464{
465 struct drm_device *dev = crtc->dev;
466 struct drm_i915_private *dev_priv = dev->dev_private;
467 const intel_limit_t *limit;
468
469 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
368 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
369 LVDS_CLKB_POWER_UP) {
470 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
370 /* LVDS dual channel */
371 if (refclk == 100000)
372 limit = &intel_limits_ironlake_dual_lvds_100m;
373 else
374 limit = &intel_limits_ironlake_dual_lvds;
375 } else {
376 if (refclk == 100000)
377 limit = &intel_limits_ironlake_single_lvds_100m;

--- 11 unchanged lines hidden (view full) ---

389
390static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
391{
392 struct drm_device *dev = crtc->dev;
393 struct drm_i915_private *dev_priv = dev->dev_private;
394 const intel_limit_t *limit;
395
396 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
471 /* LVDS dual channel */
472 if (refclk == 100000)
473 limit = &intel_limits_ironlake_dual_lvds_100m;
474 else
475 limit = &intel_limits_ironlake_dual_lvds;
476 } else {
477 if (refclk == 100000)
478 limit = &intel_limits_ironlake_single_lvds_100m;

--- 11 unchanged lines hidden (view full) ---

490
491static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
492{
493 struct drm_device *dev = crtc->dev;
494 struct drm_i915_private *dev_priv = dev->dev_private;
495 const intel_limit_t *limit;
496
497 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
397 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
398 LVDS_CLKB_POWER_UP)
498 if (is_dual_link_lvds(dev_priv, LVDS))
399 /* LVDS with dual channel */
400 limit = &intel_limits_g4x_dual_channel_lvds;
401 else
402 /* LVDS with dual channel */
403 limit = &intel_limits_g4x_single_channel_lvds;
404 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
405 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
406 limit = &intel_limits_g4x_hdmi;

--- 121 unchanged lines hidden (view full) ---

528 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
529 (I915_READ(LVDS)) != 0) {
530 /*
531 * For LVDS, if the panel is on, just rely on its current
532 * settings for dual-channel. We haven't figured out how to
533 * reliably set up different single/dual channel state, if we
534 * even can.
535 */
499 /* LVDS with dual channel */
500 limit = &intel_limits_g4x_dual_channel_lvds;
501 else
502 /* LVDS with dual channel */
503 limit = &intel_limits_g4x_single_channel_lvds;
504 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
505 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
506 limit = &intel_limits_g4x_hdmi;

--- 121 unchanged lines hidden (view full) ---

628 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
629 (I915_READ(LVDS)) != 0) {
630 /*
631 * For LVDS, if the panel is on, just rely on its current
632 * settings for dual-channel. We haven't figured out how to
633 * reliably set up different single/dual channel state, if we
634 * even can.
635 */
536 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
537 LVDS_CLKB_POWER_UP)
636 if (is_dual_link_lvds(dev_priv, LVDS))
538 clock.p2 = limit->p2.p2_fast;
539 else
540 clock.p2 = limit->p2.p2_slow;
541 } else {
542 if (target < limit->p2.dot_limit)
543 clock.p2 = limit->p2.p2_slow;
544 else
545 clock.p2 = limit->p2.p2_fast;

--- 152 unchanged lines hidden (view full) ---

698 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
699 clock.p = (clock.p1 * clock.p2);
700 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
701 clock.vco = 0;
702 memcpy(best_clock, &clock, sizeof(intel_clock_t));
703 return true;
704}
705
637 clock.p2 = limit->p2.p2_fast;
638 else
639 clock.p2 = limit->p2.p2_slow;
640 } else {
641 if (target < limit->p2.dot_limit)
642 clock.p2 = limit->p2.p2_slow;
643 else
644 clock.p2 = limit->p2.p2_fast;

--- 152 unchanged lines hidden (view full) ---

797 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
798 clock.p = (clock.p1 * clock.p2);
799 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
800 clock.vco = 0;
801 memcpy(best_clock, &clock, sizeof(intel_clock_t));
802 return true;
803}
804
805static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
806{
807 struct drm_i915_private *dev_priv = dev->dev_private;
808 u32 frame, frame_reg = PIPEFRAME(pipe);
809
810 frame = I915_READ(frame_reg);
811
812 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
813 DRM_DEBUG_KMS("vblank wait timed out\n");
814}
815
706/**
707 * intel_wait_for_vblank - wait for vblank on a given pipe
708 * @dev: drm device
709 * @pipe: pipe to wait for
710 *
711 * Wait for vblank to occur on a given pipe. Needed for various bits of
712 * mode setting code.
713 */
714void intel_wait_for_vblank(struct drm_device *dev, int pipe)
715{
716 struct drm_i915_private *dev_priv = dev->dev_private;
717 int pipestat_reg = PIPESTAT(pipe);
718
816/**
817 * intel_wait_for_vblank - wait for vblank on a given pipe
818 * @dev: drm device
819 * @pipe: pipe to wait for
820 *
821 * Wait for vblank to occur on a given pipe. Needed for various bits of
822 * mode setting code.
823 */
824void intel_wait_for_vblank(struct drm_device *dev, int pipe)
825{
826 struct drm_i915_private *dev_priv = dev->dev_private;
827 int pipestat_reg = PIPESTAT(pipe);
828
829 if (INTEL_INFO(dev)->gen >= 5) {
830 ironlake_wait_for_vblank(dev, pipe);
831 return;
832 }
833
719 /* Clear existing vblank status. Note this will clear any other
720 * sticky status fields as well.
721 *
722 * This races with i915_driver_irq_handler() with the result
723 * that either function could miss a vblank event. Here it is not
724 * fatal, as we will either wait upon the next vblank interrupt or
725 * timeout. Generally speaking intel_wait_for_vblank() is only
726 * called during modeset at which time the GPU should be idle and

--- 37 unchanged lines hidden (view full) ---

764 int reg = PIPECONF(pipe);
765
766 /* Wait for the Pipe State to go off */
767 if (_intel_wait_for(dev,
768 (I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100,
769 1, "915pip"))
770 DRM_DEBUG_KMS("pipe_off wait timed out\n");
771 } else {
834 /* Clear existing vblank status. Note this will clear any other
835 * sticky status fields as well.
836 *
837 * This races with i915_driver_irq_handler() with the result
838 * that either function could miss a vblank event. Here it is not
839 * fatal, as we will either wait upon the next vblank interrupt or
840 * timeout. Generally speaking intel_wait_for_vblank() is only
841 * called during modeset at which time the GPU should be idle and

--- 37 unchanged lines hidden (view full) ---

879 int reg = PIPECONF(pipe);
880
881 /* Wait for the Pipe State to go off */
882 if (_intel_wait_for(dev,
883 (I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100,
884 1, "915pip"))
885 DRM_DEBUG_KMS("pipe_off wait timed out\n");
886 } else {
772 u32 last_line;
887 u32 last_line, line_mask;
773 int reg = PIPEDSL(pipe);
774 unsigned long timeout = jiffies + msecs_to_jiffies(100);
775
888 int reg = PIPEDSL(pipe);
889 unsigned long timeout = jiffies + msecs_to_jiffies(100);
890
891 if (IS_GEN2(dev))
892 line_mask = DSL_LINEMASK_GEN2;
893 else
894 line_mask = DSL_LINEMASK_GEN3;
895
776 /* Wait for the display line to settle */
777 do {
896 /* Wait for the display line to settle */
897 do {
778 last_line = I915_READ(reg) & DSL_LINEMASK;
898 last_line = I915_READ(reg) & line_mask;
779 DELAY(5000);
899 DELAY(5000);
780 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
900 } while (((I915_READ(reg) & line_mask) != last_line) &&
781 time_after(timeout, jiffies));
782 if (time_after(jiffies, timeout))
783 DRM_DEBUG_KMS("pipe_off wait timed out\n");
784 }
785}
786
787static const char *state_string(bool enabled)
788{

--- 15 unchanged lines hidden (view full) ---

804 printf("PLL state assertion failure (expected %s, current %s)\n",
805 state_string(state), state_string(cur_state));
806}
807#define assert_pll_enabled(d, p) assert_pll(d, p, true)
808#define assert_pll_disabled(d, p) assert_pll(d, p, false)
809
810/* For ILK+ */
811static void assert_pch_pll(struct drm_i915_private *dev_priv,
901 time_after(timeout, jiffies));
902 if (time_after(jiffies, timeout))
903 DRM_DEBUG_KMS("pipe_off wait timed out\n");
904 }
905}
906
907static const char *state_string(bool enabled)
908{

--- 15 unchanged lines hidden (view full) ---

924 printf("PLL state assertion failure (expected %s, current %s)\n",
925 state_string(state), state_string(cur_state));
926}
927#define assert_pll_enabled(d, p) assert_pll(d, p, true)
928#define assert_pll_disabled(d, p) assert_pll(d, p, false)
929
930/* For ILK+ */
931static void assert_pch_pll(struct drm_i915_private *dev_priv,
812 enum pipe pipe, bool state)
932 struct intel_crtc *intel_crtc, bool state)
813{
814 int reg;
815 u32 val;
816 bool cur_state;
817
933{
934 int reg;
935 u32 val;
936 bool cur_state;
937
938 if (HAS_PCH_LPT(dev_priv->dev)) {
939 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
940 return;
941 }
942
943 if (!intel_crtc->pch_pll) {
944 printf("asserting PCH PLL enabled with no PLL\n");
945 return;
946 }
947
818 if (HAS_PCH_CPT(dev_priv->dev)) {
819 u32 pch_dpll;
820
821 pch_dpll = I915_READ(PCH_DPLL_SEL);
822
823 /* Make sure the selected PLL is enabled to the transcoder */
948 if (HAS_PCH_CPT(dev_priv->dev)) {
949 u32 pch_dpll;
950
951 pch_dpll = I915_READ(PCH_DPLL_SEL);
952
953 /* Make sure the selected PLL is enabled to the transcoder */
824 KASSERT(((pch_dpll >> (4 * pipe)) & 8) != 0,
825 ("transcoder %d PLL not enabled\n", pipe));
826
827 /* Convert the transcoder pipe number to a pll pipe number */
828 pipe = (pch_dpll >> (4 * pipe)) & 1;
954 KASSERT(((pch_dpll >> (4 * intel_crtc->pipe)) & 8) != 0,
955 ("transcoder %d PLL not enabled\n", intel_crtc->pipe));
829 }
830
956 }
957
831 reg = PCH_DPLL(pipe);
958 reg = intel_crtc->pch_pll->pll_reg;
832 val = I915_READ(reg);
833 cur_state = !!(val & DPLL_VCO_ENABLE);
834 if (cur_state != state)
835 printf("PCH PLL state assertion failure (expected %s, current %s)\n",
836 state_string(state), state_string(cur_state));
837}
838#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
839#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
840
841static void assert_fdi_tx(struct drm_i915_private *dev_priv,
842 enum pipe pipe, bool state)
843{
844 int reg;
845 u32 val;
846 bool cur_state;
847
959 val = I915_READ(reg);
960 cur_state = !!(val & DPLL_VCO_ENABLE);
961 if (cur_state != state)
962 printf("PCH PLL state assertion failure (expected %s, current %s)\n",
963 state_string(state), state_string(cur_state));
964}
965#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
966#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
967
968static void assert_fdi_tx(struct drm_i915_private *dev_priv,
969 enum pipe pipe, bool state)
970{
971 int reg;
972 u32 val;
973 bool cur_state;
974
848 reg = FDI_TX_CTL(pipe);
849 val = I915_READ(reg);
850 cur_state = !!(val & FDI_TX_ENABLE);
975 if (IS_HASWELL(dev_priv->dev)) {
976 /* On Haswell, DDI is used instead of FDI_TX_CTL */
977 reg = DDI_FUNC_CTL(pipe);
978 val = I915_READ(reg);
979 cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
980 } else {
981 reg = FDI_TX_CTL(pipe);
982 val = I915_READ(reg);
983 cur_state = !!(val & FDI_TX_ENABLE);
984 }
851 if (cur_state != state)
852 printf("FDI TX state assertion failure (expected %s, current %s)\n",
853 state_string(state), state_string(cur_state));
854}
855#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
856#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
857
858static void assert_fdi_rx(struct drm_i915_private *dev_priv,
859 enum pipe pipe, bool state)
860{
861 int reg;
862 u32 val;
863 bool cur_state;
864
985 if (cur_state != state)
986 printf("FDI TX state assertion failure (expected %s, current %s)\n",
987 state_string(state), state_string(cur_state));
988}
989#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
990#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
991
992static void assert_fdi_rx(struct drm_i915_private *dev_priv,
993 enum pipe pipe, bool state)
994{
995 int reg;
996 u32 val;
997 bool cur_state;
998
865 reg = FDI_RX_CTL(pipe);
866 val = I915_READ(reg);
867 cur_state = !!(val & FDI_RX_ENABLE);
999 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1000 DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
1001 return;
1002 } else {
1003 reg = FDI_RX_CTL(pipe);
1004 val = I915_READ(reg);
1005 cur_state = !!(val & FDI_RX_ENABLE);
1006 }
868 if (cur_state != state)
869 printf("FDI RX state assertion failure (expected %s, current %s)\n",
870 state_string(state), state_string(cur_state));
871}
872#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
873#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
874
875static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
876 enum pipe pipe)
877{
878 int reg;
879 u32 val;
880
881 /* ILK FDI PLL is always enabled */
882 if (dev_priv->info->gen == 5)
883 return;
884
1007 if (cur_state != state)
1008 printf("FDI RX state assertion failure (expected %s, current %s)\n",
1009 state_string(state), state_string(cur_state));
1010}
1011#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1012#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1013
1014static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1015 enum pipe pipe)
1016{
1017 int reg;
1018 u32 val;
1019
1020 /* ILK FDI PLL is always enabled */
1021 if (dev_priv->info->gen == 5)
1022 return;
1023
1024 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1025 if (IS_HASWELL(dev_priv->dev))
1026 return;
1027
885 reg = FDI_TX_CTL(pipe);
886 val = I915_READ(reg);
887 if (!(val & FDI_TX_PLL_ENABLE))
888 printf("FDI TX PLL assertion failure, should be active but is disabled\n");
889}
890
891static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
892 enum pipe pipe)
893{
894 int reg;
895 u32 val;
896
1028 reg = FDI_TX_CTL(pipe);
1029 val = I915_READ(reg);
1030 if (!(val & FDI_TX_PLL_ENABLE))
1031 printf("FDI TX PLL assertion failure, should be active but is disabled\n");
1032}
1033
1034static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1035 enum pipe pipe)
1036{
1037 int reg;
1038 u32 val;
1039
1040 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1041 DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
1042 return;
1043 }
897 reg = FDI_RX_CTL(pipe);
898 val = I915_READ(reg);
899 if (!(val & FDI_RX_PLL_ENABLE))
900 printf("FDI RX PLL assertion failure, should be active but is disabled\n");
901}
902
903static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
904 enum pipe pipe)

--- 90 unchanged lines hidden (view full) ---

995 }
996}
997
998static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
999{
1000 u32 val;
1001 bool enabled;
1002
1044 reg = FDI_RX_CTL(pipe);
1045 val = I915_READ(reg);
1046 if (!(val & FDI_RX_PLL_ENABLE))
1047 printf("FDI RX PLL assertion failure, should be active but is disabled\n");
1048}
1049
1050static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1051 enum pipe pipe)

--- 90 unchanged lines hidden (view full) ---

1142 }
1143}
1144
1145static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1146{
1147 u32 val;
1148 bool enabled;
1149
1150 if (HAS_PCH_LPT(dev_priv->dev)) {
1151 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1152 return;
1153 }
1154
1003 val = I915_READ(PCH_DREF_CONTROL);
1004 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1005 DREF_SUPERSPREAD_SOURCE_MASK));
1006 if (!enabled)
1007 printf("PCH refclk assertion failure, should be active but is disabled\n");
1008}
1009
1010static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,

--- 183 unchanged lines hidden (view full) ---

1194
1195 reg = DPLL(pipe);
1196 val = I915_READ(reg);
1197 val &= ~DPLL_VCO_ENABLE;
1198 I915_WRITE(reg, val);
1199 POSTING_READ(reg);
1200}
1201
1155 val = I915_READ(PCH_DREF_CONTROL);
1156 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1157 DREF_SUPERSPREAD_SOURCE_MASK));
1158 if (!enabled)
1159 printf("PCH refclk assertion failure, should be active but is disabled\n");
1160}
1161
1162static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,

--- 183 unchanged lines hidden (view full) ---

1346
1347 reg = DPLL(pipe);
1348 val = I915_READ(reg);
1349 val &= ~DPLL_VCO_ENABLE;
1350 I915_WRITE(reg, val);
1351 POSTING_READ(reg);
1352}
1353
1354/* SBI access */
1355static void
1356intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
1357{
1358
1359 mtx_lock(&dev_priv->dpio_lock);
1360 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
1361 100)) {
1362 DRM_ERROR("timeout waiting for SBI to become ready\n");
1363 goto out_unlock;
1364 }
1365
1366 I915_WRITE(SBI_ADDR,
1367 (reg << 16));
1368 I915_WRITE(SBI_DATA,
1369 value);
1370 I915_WRITE(SBI_CTL_STAT,
1371 SBI_BUSY |
1372 SBI_CTL_OP_CRWR);
1373
1374 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
1375 100)) {
1376 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1377 goto out_unlock;
1378 }
1379
1380out_unlock:
1381 mtx_unlock(&dev_priv->dpio_lock);
1382}
1383
1384static u32
1385intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
1386{
1387 u32 value;
1388
1389 value = 0;
1390 mtx_lock(&dev_priv->dpio_lock);
1391 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
1392 100)) {
1393 DRM_ERROR("timeout waiting for SBI to become ready\n");
1394 goto out_unlock;
1395 }
1396
1397 I915_WRITE(SBI_ADDR,
1398 (reg << 16));
1399 I915_WRITE(SBI_CTL_STAT,
1400 SBI_BUSY |
1401 SBI_CTL_OP_CRRD);
1402
1403 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
1404 100)) {
1405 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1406 goto out_unlock;
1407 }
1408
1409 value = I915_READ(SBI_DATA);
1410
1411out_unlock:
1412 mtx_unlock(&dev_priv->dpio_lock);
1413 return value;
1414}
1415
1202/**
1203 * intel_enable_pch_pll - enable PCH PLL
1204 * @dev_priv: i915 private structure
1205 * @pipe: pipe PLL to enable
1206 *
1207 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1208 * drives the transcoder clock.
1209 */
1416/**
1417 * intel_enable_pch_pll - enable PCH PLL
1418 * @dev_priv: i915 private structure
1419 * @pipe: pipe PLL to enable
1420 *
1421 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1422 * drives the transcoder clock.
1423 */
1210static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1211 enum pipe pipe)
1424static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
1212{
1425{
1426 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1427 struct intel_pch_pll *pll;
1213 int reg;
1214 u32 val;
1215
1428 int reg;
1429 u32 val;
1430
1216 if (pipe > 1)
1431 /* PCH PLLs only available on ILK, SNB and IVB */
1432 KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1433 pll = intel_crtc->pch_pll;
1434 if (pll == NULL)
1217 return;
1218
1435 return;
1436
1219 /* PCH only available on ILK+ */
1220 KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1437 if (pll->refcount == 0) {
1438 DRM_DEBUG_KMS("pll->refcount == 0\n");
1439 return;
1440 }
1221
1441
1442 DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1443 pll->pll_reg, pll->active, pll->on,
1444 intel_crtc->base.base.id);
1445
1222 /* PCH refclock must be enabled first */
1223 assert_pch_refclk_enabled(dev_priv);
1224
1446 /* PCH refclock must be enabled first */
1447 assert_pch_refclk_enabled(dev_priv);
1448
1225 reg = PCH_DPLL(pipe);
1449 if (pll->active++ && pll->on) {
1450 assert_pch_pll_enabled(dev_priv, intel_crtc);
1451 return;
1452 }
1453
1454 DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
1455
1456 reg = pll->pll_reg;
1226 val = I915_READ(reg);
1227 val |= DPLL_VCO_ENABLE;
1228 I915_WRITE(reg, val);
1229 POSTING_READ(reg);
1230 DELAY(200);
1457 val = I915_READ(reg);
1458 val |= DPLL_VCO_ENABLE;
1459 I915_WRITE(reg, val);
1460 POSTING_READ(reg);
1461 DELAY(200);
1462
1463 pll->on = true;
1231}
1232
1464}
1465
1233static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1234 enum pipe pipe)
1466static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
1235{
1467{
1468 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1469 struct intel_pch_pll *pll = intel_crtc->pch_pll;
1236 int reg;
1470 int reg;
1237 u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
1238 pll_sel = TRANSC_DPLL_ENABLE;
1471 u32 val;
1239
1472
1240 if (pipe > 1)
1241 return;
1242
1243 /* PCH only available on ILK+ */
1244 KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1473 /* PCH only available on ILK+ */
1474 KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1475 if (pll == NULL)
1476 return;
1245
1477
1246 /* Make sure transcoder isn't still depending on us */
1247 assert_transcoder_disabled(dev_priv, pipe);
1478 if (pll->refcount == 0) {
1479 DRM_DEBUG_KMS("pll->refcount == 0\n");
1480 return;
1481 }
1248
1482
1249 if (pipe == 0)
1250 pll_sel |= TRANSC_DPLLA_SEL;
1251 else if (pipe == 1)
1252 pll_sel |= TRANSC_DPLLB_SEL;
1483 DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1484 pll->pll_reg, pll->active, pll->on,
1485 intel_crtc->base.base.id);
1253
1486
1487 if (pll->active == 0) {
1488 DRM_DEBUG_KMS("pll->active == 0\n");
1489 assert_pch_pll_disabled(dev_priv, intel_crtc);
1490 return;
1491 }
1254
1492
1255 if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
1493 if (--pll->active) {
1494 assert_pch_pll_enabled(dev_priv, intel_crtc);
1256 return;
1495 return;
1496 }
1257
1497
1258 reg = PCH_DPLL(pipe);
1498 DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
1499
1500 /* Make sure transcoder isn't still depending on us */
1501 assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
1502
1503 reg = pll->pll_reg;
1259 val = I915_READ(reg);
1260 val &= ~DPLL_VCO_ENABLE;
1261 I915_WRITE(reg, val);
1262 POSTING_READ(reg);
1263 DELAY(200);
1504 val = I915_READ(reg);
1505 val &= ~DPLL_VCO_ENABLE;
1506 I915_WRITE(reg, val);
1507 POSTING_READ(reg);
1508 DELAY(200);
1509
1510 pll->on = false;
1264}
1265
1266static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1267 enum pipe pipe)
1268{
1269 int reg;
1270 u32 val, pipeconf_val;
1271 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1272
1273 /* PCH only available on ILK+ */
1274 KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1275
1276 /* Make sure PCH DPLL is enabled */
1511}
1512
1513static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1514 enum pipe pipe)
1515{
1516 int reg;
1517 u32 val, pipeconf_val;
1518 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1519
1520 /* PCH only available on ILK+ */
1521 KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1522
1523 /* Make sure PCH DPLL is enabled */
1277 assert_pch_pll_enabled(dev_priv, pipe);
1524 assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
1278
1279 /* FDI must be feeding us bits for PCH ports */
1280 assert_fdi_tx_enabled(dev_priv, pipe);
1281 assert_fdi_rx_enabled(dev_priv, pipe);
1282
1525
1526 /* FDI must be feeding us bits for PCH ports */
1527 assert_fdi_tx_enabled(dev_priv, pipe);
1528 assert_fdi_rx_enabled(dev_priv, pipe);
1529
1283
1530 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1531 DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
1532 return;
1533 }
1284 reg = TRANSCONF(pipe);
1285 val = I915_READ(reg);
1286 pipeconf_val = I915_READ(PIPECONF(pipe));
1534 reg = TRANSCONF(pipe);
1535 val = I915_READ(reg);
1536 pipeconf_val = I915_READ(PIPECONF(pipe));
1287
1288 if (HAS_PCH_IBX(dev_priv->dev)) {
1289 /*
1290 * make the BPC in transcoder be consistent with
1291 * that in pipeconf reg.
1292 */
1293 val &= ~PIPE_BPC_MASK;
1294 val |= pipeconf_val & PIPE_BPC_MASK;
1295 }

--- 119 unchanged lines hidden (view full) ---

1415 I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1416 intel_wait_for_pipe_off(dev_priv->dev, pipe);
1417}
1418
1419/*
1420 * Plane regs are double buffered, going from enabled->disabled needs a
1421 * trigger in order to latch. The display address reg provides this.
1422 */
1537 if (HAS_PCH_IBX(dev_priv->dev)) {
1538 /*
1539 * make the BPC in transcoder be consistent with
1540 * that in pipeconf reg.
1541 */
1542 val &= ~PIPE_BPC_MASK;
1543 val |= pipeconf_val & PIPE_BPC_MASK;
1544 }

--- 119 unchanged lines hidden (view full) ---

1664 I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1665 intel_wait_for_pipe_off(dev_priv->dev, pipe);
1666}
1667
1668/*
1669 * Plane regs are double buffered, going from enabled->disabled needs a
1670 * trigger in order to latch. The display address reg provides this.
1671 */
1423static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1672void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1424 enum plane plane)
1425{
1426 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1427 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1428}
1429
1430/**
1431 * intel_enable_plane - enable a display plane on a given pipe

--- 94 unchanged lines hidden (view full) ---

1526 DELAY(100);
1527 }
1528
1529 disable_pch_hdmi(dev_priv, pipe, HDMIB);
1530 disable_pch_hdmi(dev_priv, pipe, HDMIC);
1531 disable_pch_hdmi(dev_priv, pipe, HDMID);
1532}
1533
1673 enum plane plane)
1674{
1675 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1676 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1677}
1678
1679/**
1680 * intel_enable_plane - enable a display plane on a given pipe

--- 94 unchanged lines hidden (view full) ---

1775 DELAY(100);
1776 }
1777
1778 disable_pch_hdmi(dev_priv, pipe, HDMIB);
1779 disable_pch_hdmi(dev_priv, pipe, HDMIC);
1780 disable_pch_hdmi(dev_priv, pipe, HDMID);
1781}
1782
1534static void i8xx_disable_fbc(struct drm_device *dev)
1535{
1536 struct drm_i915_private *dev_priv = dev->dev_private;
1537 u32 fbc_ctl;
1538
1539 /* Disable compression */
1540 fbc_ctl = I915_READ(FBC_CONTROL);
1541 if ((fbc_ctl & FBC_CTL_EN) == 0)
1542 return;
1543
1544 fbc_ctl &= ~FBC_CTL_EN;
1545 I915_WRITE(FBC_CONTROL, fbc_ctl);
1546
1547 /* Wait for compressing bit to clear */
1548 if (_intel_wait_for(dev,
1549 (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10,
1550 1, "915fbd")) {
1551 DRM_DEBUG_KMS("FBC idle timed out\n");
1552 return;
1553 }
1554
1555 DRM_DEBUG_KMS("disabled FBC\n");
1556}
1557
1558static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1559{
1560 struct drm_device *dev = crtc->dev;
1561 struct drm_i915_private *dev_priv = dev->dev_private;
1562 struct drm_framebuffer *fb = crtc->fb;
1563 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1564 struct drm_i915_gem_object *obj = intel_fb->obj;
1565 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1566 int cfb_pitch;
1567 int plane, i;
1568 u32 fbc_ctl, fbc_ctl2;
1569
1570 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1571 if (fb->pitches[0] < cfb_pitch)
1572 cfb_pitch = fb->pitches[0];
1573
1574 /* FBC_CTL wants 64B units */
1575 cfb_pitch = (cfb_pitch / 64) - 1;
1576 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1577
1578 /* Clear old tags */
1579 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1580 I915_WRITE(FBC_TAG + (i * 4), 0);
1581
1582 /* Set it up... */
1583 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1584 fbc_ctl2 |= plane;
1585 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1586 I915_WRITE(FBC_FENCE_OFF, crtc->y);
1587
1588 /* enable it... */
1589 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1590 if (IS_I945GM(dev))
1591 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1592 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1593 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1594 fbc_ctl |= obj->fence_reg;
1595 I915_WRITE(FBC_CONTROL, fbc_ctl);
1596
1597 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1598 cfb_pitch, crtc->y, intel_crtc->plane);
1599}
1600
1601static bool i8xx_fbc_enabled(struct drm_device *dev)
1602{
1603 struct drm_i915_private *dev_priv = dev->dev_private;
1604
1605 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1606}
1607
1608static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1609{
1610 struct drm_device *dev = crtc->dev;
1611 struct drm_i915_private *dev_priv = dev->dev_private;
1612 struct drm_framebuffer *fb = crtc->fb;
1613 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1614 struct drm_i915_gem_object *obj = intel_fb->obj;
1615 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1616 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1617 unsigned long stall_watermark = 200;
1618 u32 dpfc_ctl;
1619
1620 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1621 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1622 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1623
1624 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1625 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1626 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1627 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1628
1629 /* enable it... */
1630 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1631
1632 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1633}
1634
1635static void g4x_disable_fbc(struct drm_device *dev)
1636{
1637 struct drm_i915_private *dev_priv = dev->dev_private;
1638 u32 dpfc_ctl;
1639
1640 /* Disable compression */
1641 dpfc_ctl = I915_READ(DPFC_CONTROL);
1642 if (dpfc_ctl & DPFC_CTL_EN) {
1643 dpfc_ctl &= ~DPFC_CTL_EN;
1644 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1645
1646 DRM_DEBUG_KMS("disabled FBC\n");
1647 }
1648}
1649
1650static bool g4x_fbc_enabled(struct drm_device *dev)
1651{
1652 struct drm_i915_private *dev_priv = dev->dev_private;
1653
1654 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1655}
1656
1657static void sandybridge_blit_fbc_update(struct drm_device *dev)
1658{
1659 struct drm_i915_private *dev_priv = dev->dev_private;
1660 u32 blt_ecoskpd;
1661
1662 /* Make sure blitter notifies FBC of writes */
1663 gen6_gt_force_wake_get(dev_priv);
1664 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1665 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1666 GEN6_BLITTER_LOCK_SHIFT;
1667 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1668 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1669 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1670 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1671 GEN6_BLITTER_LOCK_SHIFT);
1672 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1673 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1674 gen6_gt_force_wake_put(dev_priv);
1675}
1676
1677static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1678{
1679 struct drm_device *dev = crtc->dev;
1680 struct drm_i915_private *dev_priv = dev->dev_private;
1681 struct drm_framebuffer *fb = crtc->fb;
1682 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1683 struct drm_i915_gem_object *obj = intel_fb->obj;
1684 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1685 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1686 unsigned long stall_watermark = 200;
1687 u32 dpfc_ctl;
1688
1689 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1690 dpfc_ctl &= DPFC_RESERVED;
1691 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1692 /* Set persistent mode for front-buffer rendering, ala X. */
1693 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1694 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1695 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1696
1697 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1698 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1699 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1700 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1701 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1702 /* enable it... */
1703 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1704
1705 if (IS_GEN6(dev)) {
1706 I915_WRITE(SNB_DPFC_CTL_SA,
1707 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1708 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1709 sandybridge_blit_fbc_update(dev);
1710 }
1711
1712 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1713}
1714
1715static void ironlake_disable_fbc(struct drm_device *dev)
1716{
1717 struct drm_i915_private *dev_priv = dev->dev_private;
1718 u32 dpfc_ctl;
1719
1720 /* Disable compression */
1721 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1722 if (dpfc_ctl & DPFC_CTL_EN) {
1723 dpfc_ctl &= ~DPFC_CTL_EN;
1724 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1725
1726 DRM_DEBUG_KMS("disabled FBC\n");
1727 }
1728}
1729
1730static bool ironlake_fbc_enabled(struct drm_device *dev)
1731{
1732 struct drm_i915_private *dev_priv = dev->dev_private;
1733
1734 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1735}
1736
1737bool intel_fbc_enabled(struct drm_device *dev)
1738{
1739 struct drm_i915_private *dev_priv = dev->dev_private;
1740
1741 if (!dev_priv->display.fbc_enabled)
1742 return false;
1743
1744 return dev_priv->display.fbc_enabled(dev);
1745}
1746
1747static void intel_fbc_work_fn(void *arg, int pending)
1748{
1749 struct intel_fbc_work *work = arg;
1750 struct drm_device *dev = work->crtc->dev;
1751 struct drm_i915_private *dev_priv = dev->dev_private;
1752
1753 DRM_LOCK(dev);
1754 if (work == dev_priv->fbc_work) {
1755 /* Double check that we haven't switched fb without cancelling
1756 * the prior work.
1757 */
1758 if (work->crtc->fb == work->fb) {
1759 dev_priv->display.enable_fbc(work->crtc,
1760 work->interval);
1761
1762 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1763 dev_priv->cfb_fb = work->crtc->fb->base.id;
1764 dev_priv->cfb_y = work->crtc->y;
1765 }
1766
1767 dev_priv->fbc_work = NULL;
1768 }
1769 DRM_UNLOCK(dev);
1770
1771 free(work, DRM_MEM_KMS);
1772}
1773
1774static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1775{
1776 u_int pending;
1777
1778 if (dev_priv->fbc_work == NULL)
1779 return;
1780
1781 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1782
1783 /* Synchronisation is provided by struct_mutex and checking of
1784 * dev_priv->fbc_work, so we can perform the cancellation
1785 * entirely asynchronously.
1786 */
1787 if (taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->fbc_work->task,
1788 &pending) == 0)
1789 /* tasklet was killed before being run, clean up */
1790 free(dev_priv->fbc_work, DRM_MEM_KMS);
1791
1792 /* Mark the work as no longer wanted so that if it does
1793 * wake-up (because the work was already running and waiting
1794 * for our mutex), it will discover that is no longer
1795 * necessary to run.
1796 */
1797 dev_priv->fbc_work = NULL;
1798}
1799
1800static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1801{
1802 struct intel_fbc_work *work;
1803 struct drm_device *dev = crtc->dev;
1804 struct drm_i915_private *dev_priv = dev->dev_private;
1805
1806 if (!dev_priv->display.enable_fbc)
1807 return;
1808
1809 intel_cancel_fbc_work(dev_priv);
1810
1811 work = malloc(sizeof(*work), DRM_MEM_KMS, M_WAITOK | M_ZERO);
1812 work->crtc = crtc;
1813 work->fb = crtc->fb;
1814 work->interval = interval;
1815 TIMEOUT_TASK_INIT(dev_priv->tq, &work->task, 0, intel_fbc_work_fn,
1816 work);
1817
1818 dev_priv->fbc_work = work;
1819
1820 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1821
1822 /* Delay the actual enabling to let pageflipping cease and the
1823 * display to settle before starting the compression. Note that
1824 * this delay also serves a second purpose: it allows for a
1825 * vblank to pass after disabling the FBC before we attempt
1826 * to modify the control registers.
1827 *
1828 * A more complicated solution would involve tracking vblanks
1829 * following the termination of the page-flipping sequence
1830 * and indeed performing the enable as a co-routine and not
1831 * waiting synchronously upon the vblank.
1832 */
1833 taskqueue_enqueue_timeout(dev_priv->tq, &work->task,
1834 msecs_to_jiffies(50));
1835}
1836
1837void intel_disable_fbc(struct drm_device *dev)
1838{
1839 struct drm_i915_private *dev_priv = dev->dev_private;
1840
1841 intel_cancel_fbc_work(dev_priv);
1842
1843 if (!dev_priv->display.disable_fbc)
1844 return;
1845
1846 dev_priv->display.disable_fbc(dev);
1847 dev_priv->cfb_plane = -1;
1848}
1849
1850/**
1851 * intel_update_fbc - enable/disable FBC as needed
1852 * @dev: the drm_device
1853 *
1854 * Set up the framebuffer compression hardware at mode set time. We
1855 * enable it if possible:
1856 * - plane A only (on pre-965)
1857 * - no pixel mulitply/line duplication
1858 * - no alpha buffer discard
1859 * - no dual wide
1860 * - framebuffer <= 2048 in width, 1536 in height
1861 *
1862 * We can't assume that any compression will take place (worst case),
1863 * so the compressed buffer has to be the same size as the uncompressed
1864 * one. It also must reside (along with the line length buffer) in
1865 * stolen memory.
1866 *
1867 * We need to enable/disable FBC on a global basis.
1868 */
1869static void intel_update_fbc(struct drm_device *dev)
1870{
1871 struct drm_i915_private *dev_priv = dev->dev_private;
1872 struct drm_crtc *crtc = NULL, *tmp_crtc;
1873 struct intel_crtc *intel_crtc;
1874 struct drm_framebuffer *fb;
1875 struct intel_framebuffer *intel_fb;
1876 struct drm_i915_gem_object *obj;
1877 int enable_fbc;
1878
1879 DRM_DEBUG_KMS("\n");
1880
1881 if (!i915_powersave)
1882 return;
1883
1884 if (!I915_HAS_FBC(dev))
1885 return;
1886
1887 /*
1888 * If FBC is already on, we just have to verify that we can
1889 * keep it that way...
1890 * Need to disable if:
1891 * - more than one pipe is active
1892 * - changing FBC params (stride, fence, mode)
1893 * - new fb is too large to fit in compressed buffer
1894 * - going to an unsupported config (interlace, pixel multiply, etc.)
1895 */
1896 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1897 if (tmp_crtc->enabled && tmp_crtc->fb) {
1898 if (crtc) {
1899 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1900 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1901 goto out_disable;
1902 }
1903 crtc = tmp_crtc;
1904 }
1905 }
1906
1907 if (!crtc || crtc->fb == NULL) {
1908 DRM_DEBUG_KMS("no output, disabling\n");
1909 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1910 goto out_disable;
1911 }
1912
1913 intel_crtc = to_intel_crtc(crtc);
1914 fb = crtc->fb;
1915 intel_fb = to_intel_framebuffer(fb);
1916 obj = intel_fb->obj;
1917
1918 enable_fbc = i915_enable_fbc;
1919 if (enable_fbc < 0) {
1920 DRM_DEBUG_KMS("fbc set to per-chip default\n");
1921 enable_fbc = 1;
1922 if (INTEL_INFO(dev)->gen <= 6)
1923 enable_fbc = 0;
1924 }
1925 if (!enable_fbc) {
1926 DRM_DEBUG_KMS("fbc disabled per module param\n");
1927 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1928 goto out_disable;
1929 }
1930 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1931 DRM_DEBUG_KMS("framebuffer too large, disabling "
1932 "compression\n");
1933 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1934 goto out_disable;
1935 }
1936 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1937 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1938 DRM_DEBUG_KMS("mode incompatible with compression, "
1939 "disabling\n");
1940 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1941 goto out_disable;
1942 }
1943 if ((crtc->mode.hdisplay > 2048) ||
1944 (crtc->mode.vdisplay > 1536)) {
1945 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1946 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1947 goto out_disable;
1948 }
1949 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1950 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1951 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1952 goto out_disable;
1953 }
1954 if (obj->tiling_mode != I915_TILING_X ||
1955 obj->fence_reg == I915_FENCE_REG_NONE) {
1956 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1957 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1958 goto out_disable;
1959 }
1960
1961 /* If the kernel debugger is active, always disable compression */
1962 if (kdb_active)
1963 goto out_disable;
1964
1965 /* If the scanout has not changed, don't modify the FBC settings.
1966 * Note that we make the fundamental assumption that the fb->obj
1967 * cannot be unpinned (and have its GTT offset and fence revoked)
1968 * without first being decoupled from the scanout and FBC disabled.
1969 */
1970 if (dev_priv->cfb_plane == intel_crtc->plane &&
1971 dev_priv->cfb_fb == fb->base.id &&
1972 dev_priv->cfb_y == crtc->y)
1973 return;
1974
1975 if (intel_fbc_enabled(dev)) {
1976 /* We update FBC along two paths, after changing fb/crtc
1977 * configuration (modeswitching) and after page-flipping
1978 * finishes. For the latter, we know that not only did
1979 * we disable the FBC at the start of the page-flip
1980 * sequence, but also more than one vblank has passed.
1981 *
1982 * For the former case of modeswitching, it is possible
1983 * to switch between two FBC valid configurations
1984 * instantaneously so we do need to disable the FBC
1985 * before we can modify its control registers. We also
1986 * have to wait for the next vblank for that to take
1987 * effect. However, since we delay enabling FBC we can
1988 * assume that a vblank has passed since disabling and
1989 * that we can safely alter the registers in the deferred
1990 * callback.
1991 *
1992 * In the scenario that we go from a valid to invalid
1993 * and then back to valid FBC configuration we have
1994 * no strict enforcement that a vblank occurred since
1995 * disabling the FBC. However, along all current pipe
1996 * disabling paths we do need to wait for a vblank at
1997 * some point. And we wait before enabling FBC anyway.
1998 */
1999 DRM_DEBUG_KMS("disabling active FBC for update\n");
2000 intel_disable_fbc(dev);
2001 }
2002
2003 intel_enable_fbc(crtc, 500);
2004 return;
2005
2006out_disable:
2007 /* Multiple disables should be harmless */
2008 if (intel_fbc_enabled(dev)) {
2009 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
2010 intel_disable_fbc(dev);
2011 }
2012}
2013
2014int
2015intel_pin_and_fence_fb_obj(struct drm_device *dev,
2016 struct drm_i915_gem_object *obj,
2017 struct intel_ring_buffer *pipelined)
2018{
2019 struct drm_i915_private *dev_priv = dev->dev_private;
2020 u32 alignment;
2021 int ret;

--- 25 unchanged lines hidden (view full) ---

2047 if (ret)
2048 goto err_interruptible;
2049
2050 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2051 * fence, whereas 965+ only requires a fence if using
2052 * framebuffer compression. For simplicity, we always install
2053 * a fence as the cost is not that onerous.
2054 */
1783int
1784intel_pin_and_fence_fb_obj(struct drm_device *dev,
1785 struct drm_i915_gem_object *obj,
1786 struct intel_ring_buffer *pipelined)
1787{
1788 struct drm_i915_private *dev_priv = dev->dev_private;
1789 u32 alignment;
1790 int ret;

--- 25 unchanged lines hidden (view full) ---

1816 if (ret)
1817 goto err_interruptible;
1818
1819 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1820 * fence, whereas 965+ only requires a fence if using
1821 * framebuffer compression. For simplicity, we always install
1822 * a fence as the cost is not that onerous.
1823 */
2055 if (obj->tiling_mode != I915_TILING_NONE) {
2056 ret = i915_gem_object_get_fence(obj, pipelined);
2057 if (ret)
2058 goto err_unpin;
1824 ret = i915_gem_object_get_fence(obj);
1825 if (ret)
1826 goto err_unpin;
2059
1827
2060 i915_gem_object_pin_fence(obj);
2061 }
1828 i915_gem_object_pin_fence(obj);
2062
2063 dev_priv->mm.interruptible = true;
2064 return 0;
2065
2066err_unpin:
1829
1830 dev_priv->mm.interruptible = true;
1831 return 0;
1832
1833err_unpin:
2067 i915_gem_object_unpin(obj);
1834 i915_gem_object_unpin_from_display_plane(obj);
2068err_interruptible:
2069 dev_priv->mm.interruptible = true;
2070 return ret;
2071}
2072
2073void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2074{
2075 i915_gem_object_unpin_fence(obj);
1835err_interruptible:
1836 dev_priv->mm.interruptible = true;
1837 return ret;
1838}
1839
1840void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1841{
1842 i915_gem_object_unpin_fence(obj);
2076 i915_gem_object_unpin(obj);
1843 i915_gem_object_unpin_from_display_plane(obj);
2077}
2078
2079static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2080 int x, int y)
2081{
2082 struct drm_device *dev = crtc->dev;
2083 struct drm_i915_private *dev_priv = dev->dev_private;
2084 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);

--- 49 unchanged lines hidden (view full) ---

2134
2135 Start = obj->gtt_offset;
2136 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2137
2138 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2139 Start, Offset, x, y, fb->pitches[0]);
2140 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2141 if (INTEL_INFO(dev)->gen >= 4) {
1844}
1845
1846static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1847 int x, int y)
1848{
1849 struct drm_device *dev = crtc->dev;
1850 struct drm_i915_private *dev_priv = dev->dev_private;
1851 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);

--- 49 unchanged lines hidden (view full) ---

1901
1902 Start = obj->gtt_offset;
1903 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
1904
1905 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1906 Start, Offset, x, y, fb->pitches[0]);
1907 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
1908 if (INTEL_INFO(dev)->gen >= 4) {
2142 I915_WRITE(DSPSURF(plane), Start);
1909 I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
2143 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2144 I915_WRITE(DSPADDR(plane), Offset);
2145 } else
2146 I915_WRITE(DSPADDR(plane), Start + Offset);
2147 POSTING_READ(reg);
2148
2149 return (0);
2150}

--- 68 unchanged lines hidden (view full) ---

2219 I915_WRITE(reg, dspcntr);
2220
2221 Start = obj->gtt_offset;
2222 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2223
2224 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2225 Start, Offset, x, y, fb->pitches[0]);
2226 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
1910 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1911 I915_WRITE(DSPADDR(plane), Offset);
1912 } else
1913 I915_WRITE(DSPADDR(plane), Start + Offset);
1914 POSTING_READ(reg);
1915
1916 return (0);
1917}

--- 68 unchanged lines hidden (view full) ---

1986 I915_WRITE(reg, dspcntr);
1987
1988 Start = obj->gtt_offset;
1989 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
1990
1991 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1992 Start, Offset, x, y, fb->pitches[0]);
1993 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2227 I915_WRITE(DSPSURF(plane), Start);
1994 I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
2228 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2229 I915_WRITE(DSPADDR(plane), Offset);
2230 POSTING_READ(reg);
2231
2232 return 0;
2233}
2234
2235/* Assume fb object is pinned & idle & fenced and just update base pointers */
2236static int
2237intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2238 int x, int y, enum mode_set_atomic state)
2239{
2240 struct drm_device *dev = crtc->dev;
2241 struct drm_i915_private *dev_priv = dev->dev_private;
1995 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1996 I915_WRITE(DSPADDR(plane), Offset);
1997 POSTING_READ(reg);
1998
1999 return 0;
2000}
2001
2002/* Assume fb object is pinned & idle & fenced and just update base pointers */
2003static int
2004intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2005 int x, int y, enum mode_set_atomic state)
2006{
2007 struct drm_device *dev = crtc->dev;
2008 struct drm_i915_private *dev_priv = dev->dev_private;
2242 int ret;
2243
2009
2244 ret = dev_priv->display.update_plane(crtc, fb, x, y);
2245 if (ret)
2246 return ret;
2247
2248 intel_update_fbc(dev);
2010 if (dev_priv->display.disable_fbc)
2011 dev_priv->display.disable_fbc(dev);
2249 intel_increase_pllclock(crtc);
2250
2012 intel_increase_pllclock(crtc);
2013
2251 return 0;
2014 return dev_priv->display.update_plane(crtc, fb, x, y);
2252}
2253
2254static int
2255intel_finish_fb(struct drm_framebuffer *old_fb)
2256{
2257 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2258 struct drm_device *dev = obj->base.dev;
2259 struct drm_i915_private *dev_priv = dev->dev_private;

--- 22 unchanged lines hidden (view full) ---

2282 return ret;
2283}
2284
2285static int
2286intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2287 struct drm_framebuffer *old_fb)
2288{
2289 struct drm_device *dev = crtc->dev;
2015}
2016
2017static int
2018intel_finish_fb(struct drm_framebuffer *old_fb)
2019{
2020 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2021 struct drm_device *dev = obj->base.dev;
2022 struct drm_i915_private *dev_priv = dev->dev_private;

--- 22 unchanged lines hidden (view full) ---

2045 return ret;
2046}
2047
2048static int
2049intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2050 struct drm_framebuffer *old_fb)
2051{
2052 struct drm_device *dev = crtc->dev;
2053 struct drm_i915_private *dev_priv = dev->dev_private;
2290#if 0
2291 struct drm_i915_master_private *master_priv;
2054#if 0
2055 struct drm_i915_master_private *master_priv;
2292#else
2293 drm_i915_private_t *dev_priv = dev->dev_private;
2294#endif
2295 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2296 int ret;
2297
2298 /* no fb bound */
2299 if (!crtc->fb) {
2300 DRM_ERROR("No FB bound\n");
2301 return 0;
2302 }
2303
2056#endif
2057 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2058 int ret;
2059
2060 /* no fb bound */
2061 if (!crtc->fb) {
2062 DRM_ERROR("No FB bound\n");
2063 return 0;
2064 }
2065
2304 switch (intel_crtc->plane) {
2305 case 0:
2306 case 1:
2307 break;
2308 case 2:
2309 if (IS_IVYBRIDGE(dev))
2310 break;
2311 /* fall through otherwise */
2312 default:
2313 DRM_ERROR("no plane for crtc\n");
2066 if(intel_crtc->plane > dev_priv->num_pipe) {
2067 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
2068 intel_crtc->plane,
2069 dev_priv->num_pipe);
2314 return -EINVAL;
2315 }
2316
2317 DRM_LOCK(dev);
2318 ret = intel_pin_and_fence_fb_obj(dev,
2319 to_intel_framebuffer(crtc->fb)->obj,
2320 NULL);
2321 if (ret != 0) {
2322 DRM_UNLOCK(dev);
2323 DRM_ERROR("pin & fence failed\n");
2324 return ret;
2325 }
2326
2327 if (old_fb)
2328 intel_finish_fb(old_fb);
2329
2070 return -EINVAL;
2071 }
2072
2073 DRM_LOCK(dev);
2074 ret = intel_pin_and_fence_fb_obj(dev,
2075 to_intel_framebuffer(crtc->fb)->obj,
2076 NULL);
2077 if (ret != 0) {
2078 DRM_UNLOCK(dev);
2079 DRM_ERROR("pin & fence failed\n");
2080 return ret;
2081 }
2082
2083 if (old_fb)
2084 intel_finish_fb(old_fb);
2085
2330 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2331 LEAVE_ATOMIC_MODE_SET);
2086 ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
2332 if (ret) {
2333 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2334 DRM_UNLOCK(dev);
2335 DRM_ERROR("failed to update base address\n");
2336 return ret;
2337 }
2338
2339 if (old_fb) {
2340 intel_wait_for_vblank(dev, intel_crtc->pipe);
2341 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2342 }
2343
2087 if (ret) {
2088 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2089 DRM_UNLOCK(dev);
2090 DRM_ERROR("failed to update base address\n");
2091 return ret;
2092 }
2093
2094 if (old_fb) {
2095 intel_wait_for_vblank(dev, intel_crtc->pipe);
2096 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2097 }
2098
2099 intel_update_fbc(dev);
2344 DRM_UNLOCK(dev);
2345
2346#if 0
2347 if (!dev->primary->master)
2348 return 0;
2349
2350 master_priv = dev->primary->master->driver_priv;
2351 if (!master_priv->sarea_priv)

--- 219 unchanged lines hidden (view full) ---

2571
2572/* The FDI link training functions for SNB/Cougarpoint. */
2573static void gen6_fdi_link_train(struct drm_crtc *crtc)
2574{
2575 struct drm_device *dev = crtc->dev;
2576 struct drm_i915_private *dev_priv = dev->dev_private;
2577 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2578 int pipe = intel_crtc->pipe;
2100 DRM_UNLOCK(dev);
2101
2102#if 0
2103 if (!dev->primary->master)
2104 return 0;
2105
2106 master_priv = dev->primary->master->driver_priv;
2107 if (!master_priv->sarea_priv)

--- 219 unchanged lines hidden (view full) ---

2327
2328/* The FDI link training functions for SNB/Cougarpoint. */
2329static void gen6_fdi_link_train(struct drm_crtc *crtc)
2330{
2331 struct drm_device *dev = crtc->dev;
2332 struct drm_i915_private *dev_priv = dev->dev_private;
2333 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2334 int pipe = intel_crtc->pipe;
2579 u32 reg, temp, i;
2335 u32 reg, temp, i, retry;
2580
2581 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2582 for train result */
2583 reg = FDI_RX_IMR(pipe);
2584 temp = I915_READ(reg);
2585 temp &= ~FDI_RX_SYMBOL_LOCK;
2586 temp &= ~FDI_RX_BIT_LOCK;
2587 I915_WRITE(reg, temp);

--- 35 unchanged lines hidden (view full) ---

2623 temp = I915_READ(reg);
2624 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2625 temp |= snb_b_fdi_train_param[i];
2626 I915_WRITE(reg, temp);
2627
2628 POSTING_READ(reg);
2629 DELAY(500);
2630
2336
2337 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2338 for train result */
2339 reg = FDI_RX_IMR(pipe);
2340 temp = I915_READ(reg);
2341 temp &= ~FDI_RX_SYMBOL_LOCK;
2342 temp &= ~FDI_RX_BIT_LOCK;
2343 I915_WRITE(reg, temp);

--- 35 unchanged lines hidden (view full) ---

2379 temp = I915_READ(reg);
2380 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2381 temp |= snb_b_fdi_train_param[i];
2382 I915_WRITE(reg, temp);
2383
2384 POSTING_READ(reg);
2385 DELAY(500);
2386
2631 reg = FDI_RX_IIR(pipe);
2632 temp = I915_READ(reg);
2633 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2387 for (retry = 0; retry < 5; retry++) {
2388 reg = FDI_RX_IIR(pipe);
2389 temp = I915_READ(reg);
2390 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2634
2391
2635 if (temp & FDI_RX_BIT_LOCK) {
2636 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2637 DRM_DEBUG_KMS("FDI train 1 done.\n");
2638 break;
2392 if (temp & FDI_RX_BIT_LOCK) {
2393 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2394 DRM_DEBUG_KMS("FDI train 1 done.\n");
2395 break;
2396 }
2397 DELAY(50);
2639 }
2398 }
2399 if (retry < 5)
2400 break;
2640 }
2641 if (i == 4)
2642 DRM_ERROR("FDI train 1 fail!\n");
2643
2644 /* Train 2 */
2645 reg = FDI_TX_CTL(pipe);
2646 temp = I915_READ(reg);
2647 temp &= ~FDI_LINK_TRAIN_NONE;

--- 24 unchanged lines hidden (view full) ---

2672 temp = I915_READ(reg);
2673 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2674 temp |= snb_b_fdi_train_param[i];
2675 I915_WRITE(reg, temp);
2676
2677 POSTING_READ(reg);
2678 DELAY(500);
2679
2401 }
2402 if (i == 4)
2403 DRM_ERROR("FDI train 1 fail!\n");
2404
2405 /* Train 2 */
2406 reg = FDI_TX_CTL(pipe);
2407 temp = I915_READ(reg);
2408 temp &= ~FDI_LINK_TRAIN_NONE;

--- 24 unchanged lines hidden (view full) ---

2433 temp = I915_READ(reg);
2434 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2435 temp |= snb_b_fdi_train_param[i];
2436 I915_WRITE(reg, temp);
2437
2438 POSTING_READ(reg);
2439 DELAY(500);
2440
2680 reg = FDI_RX_IIR(pipe);
2681 temp = I915_READ(reg);
2682 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2441 for (retry = 0; retry < 5; retry++) {
2442 reg = FDI_RX_IIR(pipe);
2443 temp = I915_READ(reg);
2444 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2683
2445
2684 if (temp & FDI_RX_SYMBOL_LOCK) {
2685 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2686 DRM_DEBUG_KMS("FDI train 2 done.\n");
2687 break;
2446 if (temp & FDI_RX_SYMBOL_LOCK) {
2447 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2448 DRM_DEBUG_KMS("FDI train 2 done.\n");
2449 break;
2450 }
2451 DELAY(50);
2688 }
2452 }
2453 if (retry < 5)
2454 break;
2689 }
2690 if (i == 4)
2691 DRM_ERROR("FDI train 2 fail!\n");
2692
2693 DRM_DEBUG_KMS("FDI train done.\n");
2694}
2695
2696/* Manual link training for Ivy Bridge A0 parts */

--- 132 unchanged lines hidden (view full) ---

2829
2830 /* Switch from Rawclk to PCDclk */
2831 temp = I915_READ(reg);
2832 I915_WRITE(reg, temp | FDI_PCDCLK);
2833
2834 POSTING_READ(reg);
2835 DELAY(200);
2836
2455 }
2456 if (i == 4)
2457 DRM_ERROR("FDI train 2 fail!\n");
2458
2459 DRM_DEBUG_KMS("FDI train done.\n");
2460}
2461
2462/* Manual link training for Ivy Bridge A0 parts */

--- 132 unchanged lines hidden (view full) ---

2595
2596 /* Switch from Rawclk to PCDclk */
2597 temp = I915_READ(reg);
2598 I915_WRITE(reg, temp | FDI_PCDCLK);
2599
2600 POSTING_READ(reg);
2601 DELAY(200);
2602
2837 /* Enable CPU FDI TX PLL, always on for Ironlake */
2838 reg = FDI_TX_CTL(pipe);
2839 temp = I915_READ(reg);
2840 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2841 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2603 /* On Haswell, the PLL configuration for ports and pipes is handled
2604 * separately, as part of DDI setup */
2605 if (!IS_HASWELL(dev)) {
2606 /* Enable CPU FDI TX PLL, always on for Ironlake */
2607 reg = FDI_TX_CTL(pipe);
2608 temp = I915_READ(reg);
2609 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2610 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2842
2611
2843 POSTING_READ(reg);
2844 DELAY(100);
2845 }
2612 POSTING_READ(reg);
2613 DELAY(100);
2614 }
2615 }
2846}
2847
2848static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2849{
2850 struct drm_i915_private *dev_priv = dev->dev_private;
2851 u32 flags = I915_READ(SOUTH_CHICKEN1);
2852
2853 flags &= ~(FDI_PHASE_SYNC_EN(pipe));

--- 56 unchanged lines hidden (view full) ---

2910 temp &= ~(0x07 << 16);
2911 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2912 I915_WRITE(reg, temp);
2913
2914 POSTING_READ(reg);
2915 DELAY(100);
2916}
2917
2616}
2617
2618static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2619{
2620 struct drm_i915_private *dev_priv = dev->dev_private;
2621 u32 flags = I915_READ(SOUTH_CHICKEN1);
2622
2623 flags &= ~(FDI_PHASE_SYNC_EN(pipe));

--- 56 unchanged lines hidden (view full) ---

2680 temp &= ~(0x07 << 16);
2681 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2682 I915_WRITE(reg, temp);
2683
2684 POSTING_READ(reg);
2685 DELAY(100);
2686}
2687
2918/*
2919 * When we disable a pipe, we need to clear any pending scanline wait events
2920 * to avoid hanging the ring, which we assume we are waiting on.
2921 */
2922static void intel_clear_scanline_wait(struct drm_device *dev)
2923{
2924 struct drm_i915_private *dev_priv = dev->dev_private;
2925 struct intel_ring_buffer *ring;
2926 u32 tmp;
2927
2928 if (IS_GEN2(dev))
2929 /* Can't break the hang on i8xx */
2930 return;
2931
2932 ring = LP_RING(dev_priv);
2933 tmp = I915_READ_CTL(ring);
2934 if (tmp & RING_WAIT)
2935 I915_WRITE_CTL(ring, tmp);
2936}
2937
2938static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2939{
2688static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2689{
2940 struct drm_i915_gem_object *obj;
2941 struct drm_i915_private *dev_priv;
2942 struct drm_device *dev;
2690 struct drm_device *dev = crtc->dev;
2943
2944 if (crtc->fb == NULL)
2945 return;
2946
2691
2692 if (crtc->fb == NULL)
2693 return;
2694
2947 obj = to_intel_framebuffer(crtc->fb)->obj;
2948 dev = crtc->dev;
2949 dev_priv = dev->dev_private;
2950 mtx_lock(&dev->event_lock);
2951 while (atomic_load_acq_int(&obj->pending_flip) != 0)
2952 msleep(&obj->pending_flip, &dev->event_lock, 0, "915wfl", 0);
2953 mtx_unlock(&dev->event_lock);
2695 DRM_LOCK(dev);
2696 intel_finish_fb(crtc->fb);
2697 DRM_UNLOCK(dev);
2954}
2955
2956static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2957{
2958 struct drm_device *dev = crtc->dev;
2959 struct drm_mode_config *mode_config = &dev->mode_config;
2960 struct intel_encoder *encoder;
2961
2962 /*
2963 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2964 * must be driven by its own crtc; no sharing is possible.
2965 */
2966 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2967 if (encoder->base.crtc != crtc)
2968 continue;
2969
2698}
2699
2700static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2701{
2702 struct drm_device *dev = crtc->dev;
2703 struct drm_mode_config *mode_config = &dev->mode_config;
2704 struct intel_encoder *encoder;
2705
2706 /*
2707 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2708 * must be driven by its own crtc; no sharing is possible.
2709 */
2710 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2711 if (encoder->base.crtc != crtc)
2712 continue;
2713
2714 /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
2715 * CPU handles all others */
2716 if (IS_HASWELL(dev)) {
2717 /* It is still unclear how this will work on PPT, so throw up a warning */
2718 if (!HAS_PCH_LPT(dev))
2719 DRM_DEBUG_KMS("Haswell: PPT\n");
2720
2721 if (encoder->type == DRM_MODE_ENCODER_DAC) {
2722 DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
2723 return true;
2724 } else {
2725 DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
2726 encoder->type);
2727 return false;
2728 }
2729 }
2730
2970 switch (encoder->type) {
2971 case INTEL_OUTPUT_EDP:
2972 if (!intel_encoder_is_pch_edp(&encoder->base))
2973 return false;
2974 continue;
2975 }
2976 }
2977
2978 return true;
2979}
2980
2731 switch (encoder->type) {
2732 case INTEL_OUTPUT_EDP:
2733 if (!intel_encoder_is_pch_edp(&encoder->base))
2734 return false;
2735 continue;
2736 }
2737 }
2738
2739 return true;
2740}
2741
2742/* Program iCLKIP clock to the desired frequency */
2743static void lpt_program_iclkip(struct drm_crtc *crtc)
2744{
2745 struct drm_device *dev = crtc->dev;
2746 struct drm_i915_private *dev_priv = dev->dev_private;
2747 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2748 u32 temp;
2749
2750 /* It is necessary to ungate the pixclk gate prior to programming
2751 * the divisors, and gate it back when it is done.
2752 */
2753 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2754
2755 /* Disable SSCCTL */
2756 intel_sbi_write(dev_priv, SBI_SSCCTL6,
2757 intel_sbi_read(dev_priv, SBI_SSCCTL6) |
2758 SBI_SSCCTL_DISABLE);
2759
2760 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
2761 if (crtc->mode.clock == 20000) {
2762 auxdiv = 1;
2763 divsel = 0x41;
2764 phaseinc = 0x20;
2765 } else {
2766 /* The iCLK virtual clock root frequency is in MHz,
2767 * but the crtc->mode.clock in in KHz. To get the divisors,
2768 * it is necessary to divide one by another, so we
2769 * convert the virtual clock precision to KHz here for higher
2770 * precision.
2771 */
2772 u32 iclk_virtual_root_freq = 172800 * 1000;
2773 u32 iclk_pi_range = 64;
2774 u32 desired_divisor, msb_divisor_value, pi_value;
2775
2776 desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
2777 msb_divisor_value = desired_divisor / iclk_pi_range;
2778 pi_value = desired_divisor % iclk_pi_range;
2779
2780 auxdiv = 0;
2781 divsel = msb_divisor_value - 2;
2782 phaseinc = pi_value;
2783 }
2784
2785 /* This should not happen with any sane values */
2786 if ((SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2787 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK))
2788 DRM_DEBUG_KMS("DIVSEL_MASK");
2789 if ((SBI_SSCDIVINTPHASE_DIR(phasedir) &
2790 ~SBI_SSCDIVINTPHASE_INCVAL_MASK))
2791 DRM_DEBUG_KMS("INCVAL_MASK");
2792
2793 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2794 crtc->mode.clock,
2795 auxdiv,
2796 divsel,
2797 phasedir,
2798 phaseinc);
2799
2800 /* Program SSCDIVINTPHASE6 */
2801 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
2802 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2803 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2804 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2805 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2806 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2807 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2808
2809 intel_sbi_write(dev_priv,
2810 SBI_SSCDIVINTPHASE6,
2811 temp);
2812
2813 /* Program SSCAUXDIV */
2814 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
2815 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2816 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2817 intel_sbi_write(dev_priv,
2818 SBI_SSCAUXDIV6,
2819 temp);
2820
2821
2822 /* Enable modulator and associated divider */
2823 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
2824 temp &= ~SBI_SSCCTL_DISABLE;
2825 intel_sbi_write(dev_priv,
2826 SBI_SSCCTL6,
2827 temp);
2828
2829 /* Wait for initialization time */
2830 DELAY(24);
2831
2832 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2833}
2834
2981/*
2982 * Enable PCH resources required for PCH ports:
2983 * - PCH PLLs
2984 * - FDI training & RX/TX
2985 * - update transcoder timings
2986 * - DP transcoding bits
2987 * - transcoder
2988 */
2989static void ironlake_pch_enable(struct drm_crtc *crtc)
2990{
2991 struct drm_device *dev = crtc->dev;
2992 struct drm_i915_private *dev_priv = dev->dev_private;
2993 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2994 int pipe = intel_crtc->pipe;
2835/*
2836 * Enable PCH resources required for PCH ports:
2837 * - PCH PLLs
2838 * - FDI training & RX/TX
2839 * - update transcoder timings
2840 * - DP transcoding bits
2841 * - transcoder
2842 */
2843static void ironlake_pch_enable(struct drm_crtc *crtc)
2844{
2845 struct drm_device *dev = crtc->dev;
2846 struct drm_i915_private *dev_priv = dev->dev_private;
2847 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2848 int pipe = intel_crtc->pipe;
2995 u32 reg, temp, transc_sel;
2849 u32 reg, temp;
2996
2850
2851 assert_transcoder_disabled(dev_priv, pipe);
2852
2997 /* For PCH output, training FDI link */
2998 dev_priv->display.fdi_link_train(crtc);
2999
2853 /* For PCH output, training FDI link */
2854 dev_priv->display.fdi_link_train(crtc);
2855
3000 intel_enable_pch_pll(dev_priv, pipe);
2856 intel_enable_pch_pll(intel_crtc);
3001
2857
3002 if (HAS_PCH_CPT(dev)) {
3003 transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
3004 TRANSC_DPLLB_SEL;
2858 if (HAS_PCH_LPT(dev)) {
2859 DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
2860 lpt_program_iclkip(crtc);
2861 } else if (HAS_PCH_CPT(dev)) {
2862 u32 sel;
3005
2863
3006 /* Be sure PCH DPLL SEL is set */
3007 temp = I915_READ(PCH_DPLL_SEL);
2864 temp = I915_READ(PCH_DPLL_SEL);
3008 if (pipe == 0) {
3009 temp &= ~(TRANSA_DPLLB_SEL);
3010 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
3011 } else if (pipe == 1) {
3012 temp &= ~(TRANSB_DPLLB_SEL);
3013 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3014 } else if (pipe == 2) {
3015 temp &= ~(TRANSC_DPLLB_SEL);
3016 temp |= (TRANSC_DPLL_ENABLE | transc_sel);
2865 switch (pipe) {
2866 default:
2867 case 0:
2868 temp |= TRANSA_DPLL_ENABLE;
2869 sel = TRANSA_DPLLB_SEL;
2870 break;
2871 case 1:
2872 temp |= TRANSB_DPLL_ENABLE;
2873 sel = TRANSB_DPLLB_SEL;
2874 break;
2875 case 2:
2876 temp |= TRANSC_DPLL_ENABLE;
2877 sel = TRANSC_DPLLB_SEL;
2878 break;
3017 }
2879 }
2880 if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
2881 temp |= sel;
2882 else
2883 temp &= ~sel;
3018 I915_WRITE(PCH_DPLL_SEL, temp);
3019 }
3020
3021 /* set transcoder timing, panel must allow it */
3022 assert_panel_unlocked(dev_priv, pipe);
3023 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
3024 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3025 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
3026
3027 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3028 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3029 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
3030 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
3031
2884 I915_WRITE(PCH_DPLL_SEL, temp);
2885 }
2886
2887 /* set transcoder timing, panel must allow it */
2888 assert_panel_unlocked(dev_priv, pipe);
2889 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2890 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2891 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
2892
2893 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2894 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2895 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
2896 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
2897
3032 intel_fdi_normal_train(crtc);
2898 if (!IS_HASWELL(dev))
2899 intel_fdi_normal_train(crtc);
3033
3034 /* For PCH DP, enable TRANS_DP_CTL */
3035 if (HAS_PCH_CPT(dev) &&
3036 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3037 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3038 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
3039 reg = TRANS_DP_CTL(pipe);
3040 temp = I915_READ(reg);

--- 26 unchanged lines hidden (view full) ---

3067 }
3068
3069 I915_WRITE(reg, temp);
3070 }
3071
3072 intel_enable_transcoder(dev_priv, pipe);
3073}
3074
2900
2901 /* For PCH DP, enable TRANS_DP_CTL */
2902 if (HAS_PCH_CPT(dev) &&
2903 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
2904 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2905 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2906 reg = TRANS_DP_CTL(pipe);
2907 temp = I915_READ(reg);

--- 26 unchanged lines hidden (view full) ---

2934 }
2935
2936 I915_WRITE(reg, temp);
2937 }
2938
2939 intel_enable_transcoder(dev_priv, pipe);
2940}
2941
2942static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
2943{
2944 struct intel_pch_pll *pll = intel_crtc->pch_pll;
2945
2946 if (pll == NULL)
2947 return;
2948
2949 if (pll->refcount == 0) {
2950 printf("bad PCH PLL refcount\n");
2951 return;
2952 }
2953
2954 --pll->refcount;
2955 intel_crtc->pch_pll = NULL;
2956}
2957
2958static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
2959{
2960 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
2961 struct intel_pch_pll *pll;
2962 int i;
2963
2964 pll = intel_crtc->pch_pll;
2965 if (pll) {
2966 DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
2967 intel_crtc->base.base.id, pll->pll_reg);
2968 goto prepare;
2969 }
2970
2971 if (HAS_PCH_IBX(dev_priv->dev)) {
2972 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
2973 i = intel_crtc->pipe;
2974 pll = &dev_priv->pch_plls[i];
2975
2976 DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
2977 intel_crtc->base.base.id, pll->pll_reg);
2978
2979 goto found;
2980 }
2981
2982 for (i = 0; i < dev_priv->num_pch_pll; i++) {
2983 pll = &dev_priv->pch_plls[i];
2984
2985 /* Only want to check enabled timings first */
2986 if (pll->refcount == 0)
2987 continue;
2988
2989 if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
2990 fp == I915_READ(pll->fp0_reg)) {
2991 DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
2992 intel_crtc->base.base.id,
2993 pll->pll_reg, pll->refcount, pll->active);
2994
2995 goto found;
2996 }
2997 }
2998
2999 /* Ok no matching timings, maybe there's a free one? */
3000 for (i = 0; i < dev_priv->num_pch_pll; i++) { /* XXXKIB: HACK */
3001 pll = &dev_priv->pch_plls[i];
3002 if (pll->refcount == 0) {
3003 DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
3004 intel_crtc->base.base.id, pll->pll_reg);
3005 goto found;
3006 }
3007 }
3008
3009 return NULL;
3010
3011found:
3012 intel_crtc->pch_pll = pll;
3013 pll->refcount++;
3014 DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
3015prepare: /* separate function? */
3016 DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
3017
3018 /* Wait for the clocks to stabilize before rewriting the regs */
3019 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
3020 POSTING_READ(pll->pll_reg);
3021 DELAY(150);
3022
3023 I915_WRITE(pll->fp0_reg, fp);
3024 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
3025 pll->on = false;
3026 return pll;
3027}
3028
3075void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3076{
3077 struct drm_i915_private *dev_priv = dev->dev_private;
3078 int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
3079 u32 temp;
3080
3081 temp = I915_READ(dslreg);
3082 DELAY(500);

--- 125 unchanged lines hidden (view full) ---

3208 break;
3209 default:
3210 KASSERT(1, ("Wrong pipe %d", pipe)); /* wtf */
3211 }
3212 I915_WRITE(PCH_DPLL_SEL, temp);
3213 }
3214
3215 /* disable PCH DPLL */
3029void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3030{
3031 struct drm_i915_private *dev_priv = dev->dev_private;
3032 int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
3033 u32 temp;
3034
3035 temp = I915_READ(dslreg);
3036 DELAY(500);

--- 125 unchanged lines hidden (view full) ---

3162 break;
3163 default:
3164 KASSERT(1, ("Wrong pipe %d", pipe)); /* wtf */
3165 }
3166 I915_WRITE(PCH_DPLL_SEL, temp);
3167 }
3168
3169 /* disable PCH DPLL */
3216 if (!intel_crtc->no_pll)
3217 intel_disable_pch_pll(dev_priv, pipe);
3170 intel_disable_pch_pll(intel_crtc);
3218
3219 /* Switch from PCDclk to Rawclk */
3220 reg = FDI_RX_CTL(pipe);
3221 temp = I915_READ(reg);
3222 I915_WRITE(reg, temp & ~FDI_PCDCLK);
3223
3224 /* Disable CPU FDI TX PLL */
3225 reg = FDI_TX_CTL(pipe);

--- 11 unchanged lines hidden (view full) ---

3237 POSTING_READ(reg);
3238 DELAY(100);
3239
3240 intel_crtc->active = false;
3241 intel_update_watermarks(dev);
3242
3243 DRM_LOCK(dev);
3244 intel_update_fbc(dev);
3171
3172 /* Switch from PCDclk to Rawclk */
3173 reg = FDI_RX_CTL(pipe);
3174 temp = I915_READ(reg);
3175 I915_WRITE(reg, temp & ~FDI_PCDCLK);
3176
3177 /* Disable CPU FDI TX PLL */
3178 reg = FDI_TX_CTL(pipe);

--- 11 unchanged lines hidden (view full) ---

3190 POSTING_READ(reg);
3191 DELAY(100);
3192
3193 intel_crtc->active = false;
3194 intel_update_watermarks(dev);
3195
3196 DRM_LOCK(dev);
3197 intel_update_fbc(dev);
3245 intel_clear_scanline_wait(dev);
3246 DRM_UNLOCK(dev);
3247}
3248
3249static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3250{
3251 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3252 int pipe = intel_crtc->pipe;
3253 int plane = intel_crtc->plane;

--- 11 unchanged lines hidden (view full) ---

3265
3266 case DRM_MODE_DPMS_OFF:
3267 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3268 ironlake_crtc_disable(crtc);
3269 break;
3270 }
3271}
3272
3198 DRM_UNLOCK(dev);
3199}
3200
3201static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3202{
3203 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3204 int pipe = intel_crtc->pipe;
3205 int plane = intel_crtc->plane;

--- 11 unchanged lines hidden (view full) ---

3217
3218 case DRM_MODE_DPMS_OFF:
3219 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3220 ironlake_crtc_disable(crtc);
3221 break;
3222 }
3223}
3224
3225static void ironlake_crtc_off(struct drm_crtc *crtc)
3226{
3227 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3228 intel_put_pch_pll(intel_crtc);
3229}
3230
3273static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3274{
3275 if (!enable && intel_crtc->overlay) {
3276 struct drm_device *dev = intel_crtc->base.dev;
3277 struct drm_i915_private *dev_priv = dev->dev_private;
3278
3279 DRM_LOCK(dev);
3280 dev_priv->mm.interruptible = false;

--- 55 unchanged lines hidden (view full) ---

3336
3337 intel_disable_plane(dev_priv, plane, pipe);
3338 intel_disable_pipe(dev_priv, pipe);
3339 intel_disable_pll(dev_priv, pipe);
3340
3341 intel_crtc->active = false;
3342 intel_update_fbc(dev);
3343 intel_update_watermarks(dev);
3231static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3232{
3233 if (!enable && intel_crtc->overlay) {
3234 struct drm_device *dev = intel_crtc->base.dev;
3235 struct drm_i915_private *dev_priv = dev->dev_private;
3236
3237 DRM_LOCK(dev);
3238 dev_priv->mm.interruptible = false;

--- 55 unchanged lines hidden (view full) ---

3294
3295 intel_disable_plane(dev_priv, plane, pipe);
3296 intel_disable_pipe(dev_priv, pipe);
3297 intel_disable_pll(dev_priv, pipe);
3298
3299 intel_crtc->active = false;
3300 intel_update_fbc(dev);
3301 intel_update_watermarks(dev);
3344 intel_clear_scanline_wait(dev);
3345}
3346
3347static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3348{
3349 /* XXX: When our outputs are all unaware of DPMS modes other than off
3350 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3351 */
3352 switch (mode) {
3353 case DRM_MODE_DPMS_ON:
3354 case DRM_MODE_DPMS_STANDBY:
3355 case DRM_MODE_DPMS_SUSPEND:
3356 i9xx_crtc_enable(crtc);
3357 break;
3358 case DRM_MODE_DPMS_OFF:
3359 i9xx_crtc_disable(crtc);
3360 break;
3361 }
3362}
3363
3302}
3303
3304static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3305{
3306 /* XXX: When our outputs are all unaware of DPMS modes other than off
3307 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3308 */
3309 switch (mode) {
3310 case DRM_MODE_DPMS_ON:
3311 case DRM_MODE_DPMS_STANDBY:
3312 case DRM_MODE_DPMS_SUSPEND:
3313 i9xx_crtc_enable(crtc);
3314 break;
3315 case DRM_MODE_DPMS_OFF:
3316 i9xx_crtc_disable(crtc);
3317 break;
3318 }
3319}
3320
3321static void i9xx_crtc_off(struct drm_crtc *crtc)
3322{
3323}
3324
3364/**
3365 * Sets the power management mode of the pipe and plane.
3366 */
3367static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3368{
3369 struct drm_device *dev = crtc->dev;
3370 struct drm_i915_private *dev_priv = dev->dev_private;
3371#if 0

--- 48 unchanged lines hidden (view full) ---

3420 break;
3421 }
3422}
3423
3424static void intel_crtc_disable(struct drm_crtc *crtc)
3425{
3426 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3427 struct drm_device *dev = crtc->dev;
3325/**
3326 * Sets the power management mode of the pipe and plane.
3327 */
3328static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3329{
3330 struct drm_device *dev = crtc->dev;
3331 struct drm_i915_private *dev_priv = dev->dev_private;
3332#if 0

--- 48 unchanged lines hidden (view full) ---

3381 break;
3382 }
3383}
3384
3385static void intel_crtc_disable(struct drm_crtc *crtc)
3386{
3387 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3388 struct drm_device *dev = crtc->dev;
3389 struct drm_i915_private *dev_priv = dev->dev_private;
3428
3390
3429 /* Flush any pending WAITs before we disable the pipe. Note that
3430 * we need to drop the struct_mutex in order to acquire it again
3431 * during the lowlevel dpms routines around a couple of the
3432 * operations. It does not look trivial nor desirable to move
3433 * that locking higher. So instead we leave a window for the
3434 * submission of further commands on the fb before we can actually
3435 * disable it. This race with userspace exists anyway, and we can
3436 * only rely on the pipe being disabled by userspace after it
3437 * receives the hotplug notification and has flushed any pending
3438 * batches.
3439 */
3440 if (crtc->fb) {
3441 DRM_LOCK(dev);
3442 intel_finish_fb(crtc->fb);
3443 DRM_UNLOCK(dev);
3444 }
3445
3446 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3391 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3447 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3392 dev_priv->display.off(crtc);
3393
3394 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3448 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3449
3450 if (crtc->fb) {
3451 DRM_LOCK(dev);
3452 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3453 DRM_UNLOCK(dev);
3454 }
3455}

--- 32 unchanged lines hidden (view full) ---

3488 /* lvds has its own version of prepare see intel_lvds_prepare */
3489 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3490}
3491
3492void intel_encoder_commit(struct drm_encoder *encoder)
3493{
3494 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3495 struct drm_device *dev = encoder->dev;
3395 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3396
3397 if (crtc->fb) {
3398 DRM_LOCK(dev);
3399 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3400 DRM_UNLOCK(dev);
3401 }
3402}

--- 32 unchanged lines hidden (view full) ---

3435 /* lvds has its own version of prepare see intel_lvds_prepare */
3436 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3437}
3438
3439void intel_encoder_commit(struct drm_encoder *encoder)
3440{
3441 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3442 struct drm_device *dev = encoder->dev;
3496 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3497 struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3443 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
3498
3499 /* lvds has its own version of commit see intel_lvds_commit */
3500 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3501
3502 if (HAS_PCH_CPT(dev))
3503 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3504}
3505

--- 21 unchanged lines hidden (view full) ---

3527 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3528 * timings, so we need to be careful not to clobber these.*/
3529 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3530 drm_mode_set_crtcinfo(adjusted_mode, 0);
3531
3532 return true;
3533}
3534
3444
3445 /* lvds has its own version of commit see intel_lvds_commit */
3446 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3447
3448 if (HAS_PCH_CPT(dev))
3449 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3450}
3451

--- 21 unchanged lines hidden (view full) ---

3473 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3474 * timings, so we need to be careful not to clobber these.*/
3475 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3476 drm_mode_set_crtcinfo(adjusted_mode, 0);
3477
3478 return true;
3479}
3480
3481static int valleyview_get_display_clock_speed(struct drm_device *dev)
3482{
3483 return 400000; /* FIXME */
3484}
3485
3535static int i945_get_display_clock_speed(struct drm_device *dev)
3536{
3537 return 400000;
3538}
3539
3540static int i915_get_display_clock_speed(struct drm_device *dev)
3541{
3542 return 333000;

--- 81 unchanged lines hidden (view full) ---

3624 m_n->gmch_n = link_clock * nlanes * 8;
3625 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3626
3627 m_n->link_m = pixel_clock;
3628 m_n->link_n = link_clock;
3629 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3630}
3631
3486static int i945_get_display_clock_speed(struct drm_device *dev)
3487{
3488 return 400000;
3489}
3490
3491static int i915_get_display_clock_speed(struct drm_device *dev)
3492{
3493 return 333000;

--- 81 unchanged lines hidden (view full) ---

3575 m_n->gmch_n = link_clock * nlanes * 8;
3576 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3577
3578 m_n->link_m = pixel_clock;
3579 m_n->link_n = link_clock;
3580 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3581}
3582
3632
3633struct intel_watermark_params {
3634 unsigned long fifo_size;
3635 unsigned long max_wm;
3636 unsigned long default_wm;
3637 unsigned long guard_size;
3638 unsigned long cacheline_size;
3639};
3640
3641/* Pineview has different values for various configs */
3642static const struct intel_watermark_params pineview_display_wm = {
3643 PINEVIEW_DISPLAY_FIFO,
3644 PINEVIEW_MAX_WM,
3645 PINEVIEW_DFT_WM,
3646 PINEVIEW_GUARD_WM,
3647 PINEVIEW_FIFO_LINE_SIZE
3648};
3649static const struct intel_watermark_params pineview_display_hplloff_wm = {
3650 PINEVIEW_DISPLAY_FIFO,
3651 PINEVIEW_MAX_WM,
3652 PINEVIEW_DFT_HPLLOFF_WM,
3653 PINEVIEW_GUARD_WM,
3654 PINEVIEW_FIFO_LINE_SIZE
3655};
3656static const struct intel_watermark_params pineview_cursor_wm = {
3657 PINEVIEW_CURSOR_FIFO,
3658 PINEVIEW_CURSOR_MAX_WM,
3659 PINEVIEW_CURSOR_DFT_WM,
3660 PINEVIEW_CURSOR_GUARD_WM,
3661 PINEVIEW_FIFO_LINE_SIZE,
3662};
3663static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3664 PINEVIEW_CURSOR_FIFO,
3665 PINEVIEW_CURSOR_MAX_WM,
3666 PINEVIEW_CURSOR_DFT_WM,
3667 PINEVIEW_CURSOR_GUARD_WM,
3668 PINEVIEW_FIFO_LINE_SIZE
3669};
3670static const struct intel_watermark_params g4x_wm_info = {
3671 G4X_FIFO_SIZE,
3672 G4X_MAX_WM,
3673 G4X_MAX_WM,
3674 2,
3675 G4X_FIFO_LINE_SIZE,
3676};
3677static const struct intel_watermark_params g4x_cursor_wm_info = {
3678 I965_CURSOR_FIFO,
3679 I965_CURSOR_MAX_WM,
3680 I965_CURSOR_DFT_WM,
3681 2,
3682 G4X_FIFO_LINE_SIZE,
3683};
3684static const struct intel_watermark_params i965_cursor_wm_info = {
3685 I965_CURSOR_FIFO,
3686 I965_CURSOR_MAX_WM,
3687 I965_CURSOR_DFT_WM,
3688 2,
3689 I915_FIFO_LINE_SIZE,
3690};
3691static const struct intel_watermark_params i945_wm_info = {
3692 I945_FIFO_SIZE,
3693 I915_MAX_WM,
3694 1,
3695 2,
3696 I915_FIFO_LINE_SIZE
3697};
3698static const struct intel_watermark_params i915_wm_info = {
3699 I915_FIFO_SIZE,
3700 I915_MAX_WM,
3701 1,
3702 2,
3703 I915_FIFO_LINE_SIZE
3704};
3705static const struct intel_watermark_params i855_wm_info = {
3706 I855GM_FIFO_SIZE,
3707 I915_MAX_WM,
3708 1,
3709 2,
3710 I830_FIFO_LINE_SIZE
3711};
3712static const struct intel_watermark_params i830_wm_info = {
3713 I830_FIFO_SIZE,
3714 I915_MAX_WM,
3715 1,
3716 2,
3717 I830_FIFO_LINE_SIZE
3718};
3719
3720static const struct intel_watermark_params ironlake_display_wm_info = {
3721 ILK_DISPLAY_FIFO,
3722 ILK_DISPLAY_MAXWM,
3723 ILK_DISPLAY_DFTWM,
3724 2,
3725 ILK_FIFO_LINE_SIZE
3726};
3727static const struct intel_watermark_params ironlake_cursor_wm_info = {
3728 ILK_CURSOR_FIFO,
3729 ILK_CURSOR_MAXWM,
3730 ILK_CURSOR_DFTWM,
3731 2,
3732 ILK_FIFO_LINE_SIZE
3733};
3734static const struct intel_watermark_params ironlake_display_srwm_info = {
3735 ILK_DISPLAY_SR_FIFO,
3736 ILK_DISPLAY_MAX_SRWM,
3737 ILK_DISPLAY_DFT_SRWM,
3738 2,
3739 ILK_FIFO_LINE_SIZE
3740};
3741static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3742 ILK_CURSOR_SR_FIFO,
3743 ILK_CURSOR_MAX_SRWM,
3744 ILK_CURSOR_DFT_SRWM,
3745 2,
3746 ILK_FIFO_LINE_SIZE
3747};
3748
3749static const struct intel_watermark_params sandybridge_display_wm_info = {
3750 SNB_DISPLAY_FIFO,
3751 SNB_DISPLAY_MAXWM,
3752 SNB_DISPLAY_DFTWM,
3753 2,
3754 SNB_FIFO_LINE_SIZE
3755};
3756static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3757 SNB_CURSOR_FIFO,
3758 SNB_CURSOR_MAXWM,
3759 SNB_CURSOR_DFTWM,
3760 2,
3761 SNB_FIFO_LINE_SIZE
3762};
3763static const struct intel_watermark_params sandybridge_display_srwm_info = {
3764 SNB_DISPLAY_SR_FIFO,
3765 SNB_DISPLAY_MAX_SRWM,
3766 SNB_DISPLAY_DFT_SRWM,
3767 2,
3768 SNB_FIFO_LINE_SIZE
3769};
3770static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3771 SNB_CURSOR_SR_FIFO,
3772 SNB_CURSOR_MAX_SRWM,
3773 SNB_CURSOR_DFT_SRWM,
3774 2,
3775 SNB_FIFO_LINE_SIZE
3776};
3777
3778
3779/**
3780 * intel_calculate_wm - calculate watermark level
3781 * @clock_in_khz: pixel clock
3782 * @wm: chip FIFO params
3783 * @pixel_size: display pixel size
3784 * @latency_ns: memory latency for the platform
3785 *
3786 * Calculate the watermark level (the level at which the display plane will
3787 * start fetching from memory again). Each chip has a different display
3788 * FIFO size and allocation, so the caller needs to figure that out and pass
3789 * in the correct intel_watermark_params structure.
3790 *
3791 * As the pixel clock runs, the FIFO will be drained at a rate that depends
3792 * on the pixel size. When it reaches the watermark level, it'll start
3793 * fetching FIFO line sized based chunks from memory until the FIFO fills
3794 * past the watermark point. If the FIFO drains completely, a FIFO underrun
3795 * will occur, and a display engine hang could result.
3796 */
3797static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3798 const struct intel_watermark_params *wm,
3799 int fifo_size,
3800 int pixel_size,
3801 unsigned long latency_ns)
3802{
3803 long entries_required, wm_size;
3804
3805 /*
3806 * Note: we need to make sure we don't overflow for various clock &
3807 * latency values.
3808 * clocks go from a few thousand to several hundred thousand.
3809 * latency is usually a few thousand
3810 */
3811 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3812 1000;
3813 entries_required = howmany(entries_required, wm->cacheline_size);
3814
3815 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3816
3817 wm_size = fifo_size - (entries_required + wm->guard_size);
3818
3819 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3820
3821 /* Don't promote wm_size to unsigned... */
3822 if (wm_size > (long)wm->max_wm)
3823 wm_size = wm->max_wm;
3824 if (wm_size <= 0)
3825 wm_size = wm->default_wm;
3826 return wm_size;
3827}
3828
3829struct cxsr_latency {
3830 int is_desktop;
3831 int is_ddr3;
3832 unsigned long fsb_freq;
3833 unsigned long mem_freq;
3834 unsigned long display_sr;
3835 unsigned long display_hpll_disable;
3836 unsigned long cursor_sr;
3837 unsigned long cursor_hpll_disable;
3838};
3839
3840static const struct cxsr_latency cxsr_latency_table[] = {
3841 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
3842 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
3843 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
3844 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
3845 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
3846
3847 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
3848 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
3849 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
3850 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
3851 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
3852
3853 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
3854 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
3855 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
3856 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
3857 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
3858
3859 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
3860 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
3861 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
3862 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
3863 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
3864
3865 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
3866 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
3867 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
3868 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
3869 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
3870
3871 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
3872 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
3873 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
3874 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
3875 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
3876};
3877
3878static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3879 int is_ddr3,
3880 int fsb,
3881 int mem)
3882{
3883 const struct cxsr_latency *latency;
3884 int i;
3885
3886 if (fsb == 0 || mem == 0)
3887 return NULL;
3888
3889 for (i = 0; i < DRM_ARRAY_SIZE(cxsr_latency_table); i++) {
3890 latency = &cxsr_latency_table[i];
3891 if (is_desktop == latency->is_desktop &&
3892 is_ddr3 == latency->is_ddr3 &&
3893 fsb == latency->fsb_freq && mem == latency->mem_freq)
3894 return latency;
3895 }
3896
3897 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3898
3899 return NULL;
3900}
3901
3902static void pineview_disable_cxsr(struct drm_device *dev)
3903{
3904 struct drm_i915_private *dev_priv = dev->dev_private;
3905
3906 /* deactivate cxsr */
3907 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3908}
3909
3910/*
3911 * Latency for FIFO fetches is dependent on several factors:
3912 * - memory configuration (speed, channels)
3913 * - chipset
3914 * - current MCH state
3915 * It can be fairly high in some situations, so here we assume a fairly
3916 * pessimal value. It's a tradeoff between extra memory fetches (if we
3917 * set this value too high, the FIFO will fetch frequently to stay full)
3918 * and power consumption (set it too low to save power and we might see
3919 * FIFO underruns and display "flicker").
3920 *
3921 * A value of 5us seems to be a good balance; safe for very low end
3922 * platforms but not overly aggressive on lower latency configs.
3923 */
3924static const int latency_ns = 5000;
3925
3926static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3927{
3928 struct drm_i915_private *dev_priv = dev->dev_private;
3929 uint32_t dsparb = I915_READ(DSPARB);
3930 int size;
3931
3932 size = dsparb & 0x7f;
3933 if (plane)
3934 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3935
3936 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3937 plane ? "B" : "A", size);
3938
3939 return size;
3940}
3941
3942static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3943{
3944 struct drm_i915_private *dev_priv = dev->dev_private;
3945 uint32_t dsparb = I915_READ(DSPARB);
3946 int size;
3947
3948 size = dsparb & 0x1ff;
3949 if (plane)
3950 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3951 size >>= 1; /* Convert to cachelines */
3952
3953 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3954 plane ? "B" : "A", size);
3955
3956 return size;
3957}
3958
3959static int i845_get_fifo_size(struct drm_device *dev, int plane)
3960{
3961 struct drm_i915_private *dev_priv = dev->dev_private;
3962 uint32_t dsparb = I915_READ(DSPARB);
3963 int size;
3964
3965 size = dsparb & 0x7f;
3966 size >>= 2; /* Convert to cachelines */
3967
3968 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3969 plane ? "B" : "A",
3970 size);
3971
3972 return size;
3973}
3974
3975static int i830_get_fifo_size(struct drm_device *dev, int plane)
3976{
3977 struct drm_i915_private *dev_priv = dev->dev_private;
3978 uint32_t dsparb = I915_READ(DSPARB);
3979 int size;
3980
3981 size = dsparb & 0x7f;
3982 size >>= 1; /* Convert to cachelines */
3983
3984 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3985 plane ? "B" : "A", size);
3986
3987 return size;
3988}
3989
3990static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3991{
3992 struct drm_crtc *crtc, *enabled = NULL;
3993
3994 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3995 if (crtc->enabled && crtc->fb) {
3996 if (enabled)
3997 return NULL;
3998 enabled = crtc;
3999 }
4000 }
4001
4002 return enabled;
4003}
4004
4005static void pineview_update_wm(struct drm_device *dev)
4006{
4007 struct drm_i915_private *dev_priv = dev->dev_private;
4008 struct drm_crtc *crtc;
4009 const struct cxsr_latency *latency;
4010 u32 reg;
4011 unsigned long wm;
4012
4013 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
4014 dev_priv->fsb_freq, dev_priv->mem_freq);
4015 if (!latency) {
4016 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
4017 pineview_disable_cxsr(dev);
4018 return;
4019 }
4020
4021 crtc = single_enabled_crtc(dev);
4022 if (crtc) {
4023 int clock = crtc->mode.clock;
4024 int pixel_size = crtc->fb->bits_per_pixel / 8;
4025
4026 /* Display SR */
4027 wm = intel_calculate_wm(clock, &pineview_display_wm,
4028 pineview_display_wm.fifo_size,
4029 pixel_size, latency->display_sr);
4030 reg = I915_READ(DSPFW1);
4031 reg &= ~DSPFW_SR_MASK;
4032 reg |= wm << DSPFW_SR_SHIFT;
4033 I915_WRITE(DSPFW1, reg);
4034 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
4035
4036 /* cursor SR */
4037 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
4038 pineview_display_wm.fifo_size,
4039 pixel_size, latency->cursor_sr);
4040 reg = I915_READ(DSPFW3);
4041 reg &= ~DSPFW_CURSOR_SR_MASK;
4042 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
4043 I915_WRITE(DSPFW3, reg);
4044
4045 /* Display HPLL off SR */
4046 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
4047 pineview_display_hplloff_wm.fifo_size,
4048 pixel_size, latency->display_hpll_disable);
4049 reg = I915_READ(DSPFW3);
4050 reg &= ~DSPFW_HPLL_SR_MASK;
4051 reg |= wm & DSPFW_HPLL_SR_MASK;
4052 I915_WRITE(DSPFW3, reg);
4053
4054 /* cursor HPLL off SR */
4055 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
4056 pineview_display_hplloff_wm.fifo_size,
4057 pixel_size, latency->cursor_hpll_disable);
4058 reg = I915_READ(DSPFW3);
4059 reg &= ~DSPFW_HPLL_CURSOR_MASK;
4060 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
4061 I915_WRITE(DSPFW3, reg);
4062 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
4063
4064 /* activate cxsr */
4065 I915_WRITE(DSPFW3,
4066 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
4067 DRM_DEBUG_KMS("Self-refresh is enabled\n");
4068 } else {
4069 pineview_disable_cxsr(dev);
4070 DRM_DEBUG_KMS("Self-refresh is disabled\n");
4071 }
4072}
4073
4074static bool g4x_compute_wm0(struct drm_device *dev,
4075 int plane,
4076 const struct intel_watermark_params *display,
4077 int display_latency_ns,
4078 const struct intel_watermark_params *cursor,
4079 int cursor_latency_ns,
4080 int *plane_wm,
4081 int *cursor_wm)
4082{
4083 struct drm_crtc *crtc;
4084 int htotal, hdisplay, clock, pixel_size;
4085 int line_time_us, line_count;
4086 int entries, tlb_miss;
4087
4088 crtc = intel_get_crtc_for_plane(dev, plane);
4089 if (crtc->fb == NULL || !crtc->enabled) {
4090 *cursor_wm = cursor->guard_size;
4091 *plane_wm = display->guard_size;
4092 return false;
4093 }
4094
4095 htotal = crtc->mode.htotal;
4096 hdisplay = crtc->mode.hdisplay;
4097 clock = crtc->mode.clock;
4098 pixel_size = crtc->fb->bits_per_pixel / 8;
4099
4100 /* Use the small buffer method to calculate plane watermark */
4101 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4102 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4103 if (tlb_miss > 0)
4104 entries += tlb_miss;
4105 entries = howmany(entries, display->cacheline_size);
4106 *plane_wm = entries + display->guard_size;
4107 if (*plane_wm > (int)display->max_wm)
4108 *plane_wm = display->max_wm;
4109
4110 /* Use the large buffer method to calculate cursor watermark */
4111 line_time_us = ((htotal * 1000) / clock);
4112 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4113 entries = line_count * 64 * pixel_size;
4114 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4115 if (tlb_miss > 0)
4116 entries += tlb_miss;
4117 entries = howmany(entries, cursor->cacheline_size);
4118 *cursor_wm = entries + cursor->guard_size;
4119 if (*cursor_wm > (int)cursor->max_wm)
4120 *cursor_wm = (int)cursor->max_wm;
4121
4122 return true;
4123}
4124
4125/*
4126 * Check the wm result.
4127 *
4128 * If any calculated watermark values is larger than the maximum value that
4129 * can be programmed into the associated watermark register, that watermark
4130 * must be disabled.
4131 */
4132static bool g4x_check_srwm(struct drm_device *dev,
4133 int display_wm, int cursor_wm,
4134 const struct intel_watermark_params *display,
4135 const struct intel_watermark_params *cursor)
4136{
4137 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
4138 display_wm, cursor_wm);
4139
4140 if (display_wm > display->max_wm) {
4141 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
4142 display_wm, display->max_wm);
4143 return false;
4144 }
4145
4146 if (cursor_wm > cursor->max_wm) {
4147 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
4148 cursor_wm, cursor->max_wm);
4149 return false;
4150 }
4151
4152 if (!(display_wm || cursor_wm)) {
4153 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
4154 return false;
4155 }
4156
4157 return true;
4158}
4159
4160static bool g4x_compute_srwm(struct drm_device *dev,
4161 int plane,
4162 int latency_ns,
4163 const struct intel_watermark_params *display,
4164 const struct intel_watermark_params *cursor,
4165 int *display_wm, int *cursor_wm)
4166{
4167 struct drm_crtc *crtc;
4168 int hdisplay, htotal, pixel_size, clock;
4169 unsigned long line_time_us;
4170 int line_count, line_size;
4171 int small, large;
4172 int entries;
4173
4174 if (!latency_ns) {
4175 *display_wm = *cursor_wm = 0;
4176 return false;
4177 }
4178
4179 crtc = intel_get_crtc_for_plane(dev, plane);
4180 hdisplay = crtc->mode.hdisplay;
4181 htotal = crtc->mode.htotal;
4182 clock = crtc->mode.clock;
4183 pixel_size = crtc->fb->bits_per_pixel / 8;
4184
4185 line_time_us = (htotal * 1000) / clock;
4186 line_count = (latency_ns / line_time_us + 1000) / 1000;
4187 line_size = hdisplay * pixel_size;
4188
4189 /* Use the minimum of the small and large buffer method for primary */
4190 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4191 large = line_count * line_size;
4192
4193 entries = howmany(min(small, large), display->cacheline_size);
4194 *display_wm = entries + display->guard_size;
4195
4196 /* calculate the self-refresh watermark for display cursor */
4197 entries = line_count * pixel_size * 64;
4198 entries = howmany(entries, cursor->cacheline_size);
4199 *cursor_wm = entries + cursor->guard_size;
4200
4201 return g4x_check_srwm(dev,
4202 *display_wm, *cursor_wm,
4203 display, cursor);
4204}
4205
4206#define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask))
4207
4208static void g4x_update_wm(struct drm_device *dev)
4209{
4210 static const int sr_latency_ns = 12000;
4211 struct drm_i915_private *dev_priv = dev->dev_private;
4212 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4213 int plane_sr, cursor_sr;
4214 unsigned int enabled = 0;
4215
4216 if (g4x_compute_wm0(dev, 0,
4217 &g4x_wm_info, latency_ns,
4218 &g4x_cursor_wm_info, latency_ns,
4219 &planea_wm, &cursora_wm))
4220 enabled |= 1;
4221
4222 if (g4x_compute_wm0(dev, 1,
4223 &g4x_wm_info, latency_ns,
4224 &g4x_cursor_wm_info, latency_ns,
4225 &planeb_wm, &cursorb_wm))
4226 enabled |= 2;
4227
4228 plane_sr = cursor_sr = 0;
4229 if (single_plane_enabled(enabled) &&
4230 g4x_compute_srwm(dev, ffs(enabled) - 1,
4231 sr_latency_ns,
4232 &g4x_wm_info,
4233 &g4x_cursor_wm_info,
4234 &plane_sr, &cursor_sr))
4235 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4236 else
4237 I915_WRITE(FW_BLC_SELF,
4238 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4239
4240 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4241 planea_wm, cursora_wm,
4242 planeb_wm, cursorb_wm,
4243 plane_sr, cursor_sr);
4244
4245 I915_WRITE(DSPFW1,
4246 (plane_sr << DSPFW_SR_SHIFT) |
4247 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4248 (planeb_wm << DSPFW_PLANEB_SHIFT) |
4249 planea_wm);
4250 I915_WRITE(DSPFW2,
4251 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4252 (cursora_wm << DSPFW_CURSORA_SHIFT));
4253 /* HPLL off in SR has some issues on G4x... disable it */
4254 I915_WRITE(DSPFW3,
4255 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4256 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4257}
4258
4259static void i965_update_wm(struct drm_device *dev)
4260{
4261 struct drm_i915_private *dev_priv = dev->dev_private;
4262 struct drm_crtc *crtc;
4263 int srwm = 1;
4264 int cursor_sr = 16;
4265
4266 /* Calc sr entries for one plane configs */
4267 crtc = single_enabled_crtc(dev);
4268 if (crtc) {
4269 /* self-refresh has much higher latency */
4270 static const int sr_latency_ns = 12000;
4271 int clock = crtc->mode.clock;
4272 int htotal = crtc->mode.htotal;
4273 int hdisplay = crtc->mode.hdisplay;
4274 int pixel_size = crtc->fb->bits_per_pixel / 8;
4275 unsigned long line_time_us;
4276 int entries;
4277
4278 line_time_us = ((htotal * 1000) / clock);
4279
4280 /* Use ns/us then divide to preserve precision */
4281 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4282 pixel_size * hdisplay;
4283 entries = howmany(entries, I915_FIFO_LINE_SIZE);
4284 srwm = I965_FIFO_SIZE - entries;
4285 if (srwm < 0)
4286 srwm = 1;
4287 srwm &= 0x1ff;
4288 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4289 entries, srwm);
4290
4291 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4292 pixel_size * 64;
4293 entries = howmany(entries, i965_cursor_wm_info.cacheline_size);
4294 cursor_sr = i965_cursor_wm_info.fifo_size -
4295 (entries + i965_cursor_wm_info.guard_size);
4296
4297 if (cursor_sr > i965_cursor_wm_info.max_wm)
4298 cursor_sr = i965_cursor_wm_info.max_wm;
4299
4300 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4301 "cursor %d\n", srwm, cursor_sr);
4302
4303 if (IS_CRESTLINE(dev))
4304 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4305 } else {
4306 /* Turn off self refresh if both pipes are enabled */
4307 if (IS_CRESTLINE(dev))
4308 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4309 & ~FW_BLC_SELF_EN);
4310 }
4311
4312 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4313 srwm);
4314
4315 /* 965 has limitations... */
4316 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4317 (8 << 16) | (8 << 8) | (8 << 0));
4318 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4319 /* update cursor SR watermark */
4320 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4321}
4322
4323static void i9xx_update_wm(struct drm_device *dev)
4324{
4325 struct drm_i915_private *dev_priv = dev->dev_private;
4326 const struct intel_watermark_params *wm_info;
4327 uint32_t fwater_lo;
4328 uint32_t fwater_hi;
4329 int cwm, srwm = 1;
4330 int fifo_size;
4331 int planea_wm, planeb_wm;
4332 struct drm_crtc *crtc, *enabled = NULL;
4333
4334 if (IS_I945GM(dev))
4335 wm_info = &i945_wm_info;
4336 else if (!IS_GEN2(dev))
4337 wm_info = &i915_wm_info;
4338 else
4339 wm_info = &i855_wm_info;
4340
4341 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4342 crtc = intel_get_crtc_for_plane(dev, 0);
4343 if (crtc->enabled && crtc->fb) {
4344 planea_wm = intel_calculate_wm(crtc->mode.clock,
4345 wm_info, fifo_size,
4346 crtc->fb->bits_per_pixel / 8,
4347 latency_ns);
4348 enabled = crtc;
4349 } else
4350 planea_wm = fifo_size - wm_info->guard_size;
4351
4352 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4353 crtc = intel_get_crtc_for_plane(dev, 1);
4354 if (crtc->enabled && crtc->fb) {
4355 planeb_wm = intel_calculate_wm(crtc->mode.clock,
4356 wm_info, fifo_size,
4357 crtc->fb->bits_per_pixel / 8,
4358 latency_ns);
4359 if (enabled == NULL)
4360 enabled = crtc;
4361 else
4362 enabled = NULL;
4363 } else
4364 planeb_wm = fifo_size - wm_info->guard_size;
4365
4366 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4367
4368 /*
4369 * Overlay gets an aggressive default since video jitter is bad.
4370 */
4371 cwm = 2;
4372
4373 /* Play safe and disable self-refresh before adjusting watermarks. */
4374 if (IS_I945G(dev) || IS_I945GM(dev))
4375 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4376 else if (IS_I915GM(dev))
4377 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4378
4379 /* Calc sr entries for one plane configs */
4380 if (HAS_FW_BLC(dev) && enabled) {
4381 /* self-refresh has much higher latency */
4382 static const int sr_latency_ns = 6000;
4383 int clock = enabled->mode.clock;
4384 int htotal = enabled->mode.htotal;
4385 int hdisplay = enabled->mode.hdisplay;
4386 int pixel_size = enabled->fb->bits_per_pixel / 8;
4387 unsigned long line_time_us;
4388 int entries;
4389
4390 line_time_us = (htotal * 1000) / clock;
4391
4392 /* Use ns/us then divide to preserve precision */
4393 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4394 pixel_size * hdisplay;
4395 entries = howmany(entries, wm_info->cacheline_size);
4396 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4397 srwm = wm_info->fifo_size - entries;
4398 if (srwm < 0)
4399 srwm = 1;
4400
4401 if (IS_I945G(dev) || IS_I945GM(dev))
4402 I915_WRITE(FW_BLC_SELF,
4403 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4404 else if (IS_I915GM(dev))
4405 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4406 }
4407
4408 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4409 planea_wm, planeb_wm, cwm, srwm);
4410
4411 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4412 fwater_hi = (cwm & 0x1f);
4413
4414 /* Set request length to 8 cachelines per fetch */
4415 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4416 fwater_hi = fwater_hi | (1 << 8);
4417
4418 I915_WRITE(FW_BLC, fwater_lo);
4419 I915_WRITE(FW_BLC2, fwater_hi);
4420
4421 if (HAS_FW_BLC(dev)) {
4422 if (enabled) {
4423 if (IS_I945G(dev) || IS_I945GM(dev))
4424 I915_WRITE(FW_BLC_SELF,
4425 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4426 else if (IS_I915GM(dev))
4427 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4428 DRM_DEBUG_KMS("memory self refresh enabled\n");
4429 } else
4430 DRM_DEBUG_KMS("memory self refresh disabled\n");
4431 }
4432}
4433
4434static void i830_update_wm(struct drm_device *dev)
4435{
4436 struct drm_i915_private *dev_priv = dev->dev_private;
4437 struct drm_crtc *crtc;
4438 uint32_t fwater_lo;
4439 int planea_wm;
4440
4441 crtc = single_enabled_crtc(dev);
4442 if (crtc == NULL)
4443 return;
4444
4445 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4446 dev_priv->display.get_fifo_size(dev, 0),
4447 crtc->fb->bits_per_pixel / 8,
4448 latency_ns);
4449 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4450 fwater_lo |= (3<<8) | planea_wm;
4451
4452 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4453
4454 I915_WRITE(FW_BLC, fwater_lo);
4455}
4456
4457#define ILK_LP0_PLANE_LATENCY 700
4458#define ILK_LP0_CURSOR_LATENCY 1300
4459
4460/*
4461 * Check the wm result.
4462 *
4463 * If any calculated watermark values is larger than the maximum value that
4464 * can be programmed into the associated watermark register, that watermark
4465 * must be disabled.
4466 */
4467static bool ironlake_check_srwm(struct drm_device *dev, int level,
4468 int fbc_wm, int display_wm, int cursor_wm,
4469 const struct intel_watermark_params *display,
4470 const struct intel_watermark_params *cursor)
4471{
4472 struct drm_i915_private *dev_priv = dev->dev_private;
4473
4474 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4475 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4476
4477 if (fbc_wm > SNB_FBC_MAX_SRWM) {
4478 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4479 fbc_wm, SNB_FBC_MAX_SRWM, level);
4480
4481 /* fbc has it's own way to disable FBC WM */
4482 I915_WRITE(DISP_ARB_CTL,
4483 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4484 return false;
4485 }
4486
4487 if (display_wm > display->max_wm) {
4488 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4489 display_wm, SNB_DISPLAY_MAX_SRWM, level);
4490 return false;
4491 }
4492
4493 if (cursor_wm > cursor->max_wm) {
4494 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4495 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4496 return false;
4497 }
4498
4499 if (!(fbc_wm || display_wm || cursor_wm)) {
4500 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4501 return false;
4502 }
4503
4504 return true;
4505}
4506
4507/*
4508 * Compute watermark values of WM[1-3],
4509 */
4510static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4511 int latency_ns,
4512 const struct intel_watermark_params *display,
4513 const struct intel_watermark_params *cursor,
4514 int *fbc_wm, int *display_wm, int *cursor_wm)
4515{
4516 struct drm_crtc *crtc;
4517 unsigned long line_time_us;
4518 int hdisplay, htotal, pixel_size, clock;
4519 int line_count, line_size;
4520 int small, large;
4521 int entries;
4522
4523 if (!latency_ns) {
4524 *fbc_wm = *display_wm = *cursor_wm = 0;
4525 return false;
4526 }
4527
4528 crtc = intel_get_crtc_for_plane(dev, plane);
4529 hdisplay = crtc->mode.hdisplay;
4530 htotal = crtc->mode.htotal;
4531 clock = crtc->mode.clock;
4532 pixel_size = crtc->fb->bits_per_pixel / 8;
4533
4534 line_time_us = (htotal * 1000) / clock;
4535 line_count = (latency_ns / line_time_us + 1000) / 1000;
4536 line_size = hdisplay * pixel_size;
4537
4538 /* Use the minimum of the small and large buffer method for primary */
4539 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4540 large = line_count * line_size;
4541
4542 entries = howmany(min(small, large), display->cacheline_size);
4543 *display_wm = entries + display->guard_size;
4544
4545 /*
4546 * Spec says:
4547 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4548 */
4549 *fbc_wm = howmany(*display_wm * 64, line_size) + 2;
4550
4551 /* calculate the self-refresh watermark for display cursor */
4552 entries = line_count * pixel_size * 64;
4553 entries = howmany(entries, cursor->cacheline_size);
4554 *cursor_wm = entries + cursor->guard_size;
4555
4556 return ironlake_check_srwm(dev, level,
4557 *fbc_wm, *display_wm, *cursor_wm,
4558 display, cursor);
4559}
4560
4561static void ironlake_update_wm(struct drm_device *dev)
4562{
4563 struct drm_i915_private *dev_priv = dev->dev_private;
4564 int fbc_wm, plane_wm, cursor_wm;
4565 unsigned int enabled;
4566
4567 enabled = 0;
4568 if (g4x_compute_wm0(dev, 0,
4569 &ironlake_display_wm_info,
4570 ILK_LP0_PLANE_LATENCY,
4571 &ironlake_cursor_wm_info,
4572 ILK_LP0_CURSOR_LATENCY,
4573 &plane_wm, &cursor_wm)) {
4574 I915_WRITE(WM0_PIPEA_ILK,
4575 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4576 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4577 " plane %d, " "cursor: %d\n",
4578 plane_wm, cursor_wm);
4579 enabled |= 1;
4580 }
4581
4582 if (g4x_compute_wm0(dev, 1,
4583 &ironlake_display_wm_info,
4584 ILK_LP0_PLANE_LATENCY,
4585 &ironlake_cursor_wm_info,
4586 ILK_LP0_CURSOR_LATENCY,
4587 &plane_wm, &cursor_wm)) {
4588 I915_WRITE(WM0_PIPEB_ILK,
4589 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4590 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4591 " plane %d, cursor: %d\n",
4592 plane_wm, cursor_wm);
4593 enabled |= 2;
4594 }
4595
4596 /*
4597 * Calculate and update the self-refresh watermark only when one
4598 * display plane is used.
4599 */
4600 I915_WRITE(WM3_LP_ILK, 0);
4601 I915_WRITE(WM2_LP_ILK, 0);
4602 I915_WRITE(WM1_LP_ILK, 0);
4603
4604 if (!single_plane_enabled(enabled))
4605 return;
4606 enabled = ffs(enabled) - 1;
4607
4608 /* WM1 */
4609 if (!ironlake_compute_srwm(dev, 1, enabled,
4610 ILK_READ_WM1_LATENCY() * 500,
4611 &ironlake_display_srwm_info,
4612 &ironlake_cursor_srwm_info,
4613 &fbc_wm, &plane_wm, &cursor_wm))
4614 return;
4615
4616 I915_WRITE(WM1_LP_ILK,
4617 WM1_LP_SR_EN |
4618 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4619 (fbc_wm << WM1_LP_FBC_SHIFT) |
4620 (plane_wm << WM1_LP_SR_SHIFT) |
4621 cursor_wm);
4622
4623 /* WM2 */
4624 if (!ironlake_compute_srwm(dev, 2, enabled,
4625 ILK_READ_WM2_LATENCY() * 500,
4626 &ironlake_display_srwm_info,
4627 &ironlake_cursor_srwm_info,
4628 &fbc_wm, &plane_wm, &cursor_wm))
4629 return;
4630
4631 I915_WRITE(WM2_LP_ILK,
4632 WM2_LP_EN |
4633 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4634 (fbc_wm << WM1_LP_FBC_SHIFT) |
4635 (plane_wm << WM1_LP_SR_SHIFT) |
4636 cursor_wm);
4637
4638 /*
4639 * WM3 is unsupported on ILK, probably because we don't have latency
4640 * data for that power state
4641 */
4642}
4643
4644void sandybridge_update_wm(struct drm_device *dev)
4645{
4646 struct drm_i915_private *dev_priv = dev->dev_private;
4647 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
4648 u32 val;
4649 int fbc_wm, plane_wm, cursor_wm;
4650 unsigned int enabled;
4651
4652 enabled = 0;
4653 if (g4x_compute_wm0(dev, 0,
4654 &sandybridge_display_wm_info, latency,
4655 &sandybridge_cursor_wm_info, latency,
4656 &plane_wm, &cursor_wm)) {
4657 val = I915_READ(WM0_PIPEA_ILK);
4658 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4659 I915_WRITE(WM0_PIPEA_ILK, val |
4660 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4661 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4662 " plane %d, " "cursor: %d\n",
4663 plane_wm, cursor_wm);
4664 enabled |= 1;
4665 }
4666
4667 if (g4x_compute_wm0(dev, 1,
4668 &sandybridge_display_wm_info, latency,
4669 &sandybridge_cursor_wm_info, latency,
4670 &plane_wm, &cursor_wm)) {
4671 val = I915_READ(WM0_PIPEB_ILK);
4672 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4673 I915_WRITE(WM0_PIPEB_ILK, val |
4674 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4675 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4676 " plane %d, cursor: %d\n",
4677 plane_wm, cursor_wm);
4678 enabled |= 2;
4679 }
4680
4681 /* IVB has 3 pipes */
4682 if (IS_IVYBRIDGE(dev) &&
4683 g4x_compute_wm0(dev, 2,
4684 &sandybridge_display_wm_info, latency,
4685 &sandybridge_cursor_wm_info, latency,
4686 &plane_wm, &cursor_wm)) {
4687 val = I915_READ(WM0_PIPEC_IVB);
4688 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4689 I915_WRITE(WM0_PIPEC_IVB, val |
4690 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4691 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
4692 " plane %d, cursor: %d\n",
4693 plane_wm, cursor_wm);
4694 enabled |= 3;
4695 }
4696
4697 /*
4698 * Calculate and update the self-refresh watermark only when one
4699 * display plane is used.
4700 *
4701 * SNB support 3 levels of watermark.
4702 *
4703 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4704 * and disabled in the descending order
4705 *
4706 */
4707 I915_WRITE(WM3_LP_ILK, 0);
4708 I915_WRITE(WM2_LP_ILK, 0);
4709 I915_WRITE(WM1_LP_ILK, 0);
4710
4711 if (!single_plane_enabled(enabled) ||
4712 dev_priv->sprite_scaling_enabled)
4713 return;
4714 enabled = ffs(enabled) - 1;
4715
4716 /* WM1 */
4717 if (!ironlake_compute_srwm(dev, 1, enabled,
4718 SNB_READ_WM1_LATENCY() * 500,
4719 &sandybridge_display_srwm_info,
4720 &sandybridge_cursor_srwm_info,
4721 &fbc_wm, &plane_wm, &cursor_wm))
4722 return;
4723
4724 I915_WRITE(WM1_LP_ILK,
4725 WM1_LP_SR_EN |
4726 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4727 (fbc_wm << WM1_LP_FBC_SHIFT) |
4728 (plane_wm << WM1_LP_SR_SHIFT) |
4729 cursor_wm);
4730
4731 /* WM2 */
4732 if (!ironlake_compute_srwm(dev, 2, enabled,
4733 SNB_READ_WM2_LATENCY() * 500,
4734 &sandybridge_display_srwm_info,
4735 &sandybridge_cursor_srwm_info,
4736 &fbc_wm, &plane_wm, &cursor_wm))
4737 return;
4738
4739 I915_WRITE(WM2_LP_ILK,
4740 WM2_LP_EN |
4741 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4742 (fbc_wm << WM1_LP_FBC_SHIFT) |
4743 (plane_wm << WM1_LP_SR_SHIFT) |
4744 cursor_wm);
4745
4746 /* WM3 */
4747 if (!ironlake_compute_srwm(dev, 3, enabled,
4748 SNB_READ_WM3_LATENCY() * 500,
4749 &sandybridge_display_srwm_info,
4750 &sandybridge_cursor_srwm_info,
4751 &fbc_wm, &plane_wm, &cursor_wm))
4752 return;
4753
4754 I915_WRITE(WM3_LP_ILK,
4755 WM3_LP_EN |
4756 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4757 (fbc_wm << WM1_LP_FBC_SHIFT) |
4758 (plane_wm << WM1_LP_SR_SHIFT) |
4759 cursor_wm);
4760}
4761
4762static bool
4763sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4764 uint32_t sprite_width, int pixel_size,
4765 const struct intel_watermark_params *display,
4766 int display_latency_ns, int *sprite_wm)
4767{
4768 struct drm_crtc *crtc;
4769 int clock;
4770 int entries, tlb_miss;
4771
4772 crtc = intel_get_crtc_for_plane(dev, plane);
4773 if (crtc->fb == NULL || !crtc->enabled) {
4774 *sprite_wm = display->guard_size;
4775 return false;
4776 }
4777
4778 clock = crtc->mode.clock;
4779
4780 /* Use the small buffer method to calculate the sprite watermark */
4781 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4782 tlb_miss = display->fifo_size*display->cacheline_size -
4783 sprite_width * 8;
4784 if (tlb_miss > 0)
4785 entries += tlb_miss;
4786 entries = howmany(entries, display->cacheline_size);
4787 *sprite_wm = entries + display->guard_size;
4788 if (*sprite_wm > (int)display->max_wm)
4789 *sprite_wm = display->max_wm;
4790
4791 return true;
4792}
4793
4794static bool
4795sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4796 uint32_t sprite_width, int pixel_size,
4797 const struct intel_watermark_params *display,
4798 int latency_ns, int *sprite_wm)
4799{
4800 struct drm_crtc *crtc;
4801 unsigned long line_time_us;
4802 int clock;
4803 int line_count, line_size;
4804 int small, large;
4805 int entries;
4806
4807 if (!latency_ns) {
4808 *sprite_wm = 0;
4809 return false;
4810 }
4811
4812 crtc = intel_get_crtc_for_plane(dev, plane);
4813 clock = crtc->mode.clock;
4814 if (!clock) {
4815 *sprite_wm = 0;
4816 return false;
4817 }
4818
4819 line_time_us = (sprite_width * 1000) / clock;
4820 if (!line_time_us) {
4821 *sprite_wm = 0;
4822 return false;
4823 }
4824
4825 line_count = (latency_ns / line_time_us + 1000) / 1000;
4826 line_size = sprite_width * pixel_size;
4827
4828 /* Use the minimum of the small and large buffer method for primary */
4829 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4830 large = line_count * line_size;
4831
4832 entries = howmany(min(small, large), display->cacheline_size);
4833 *sprite_wm = entries + display->guard_size;
4834
4835 return *sprite_wm > 0x3ff ? false : true;
4836}
4837
4838static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
4839 uint32_t sprite_width, int pixel_size)
4840{
4841 struct drm_i915_private *dev_priv = dev->dev_private;
4842 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
4843 u32 val;
4844 int sprite_wm, reg;
4845 int ret;
4846
4847 switch (pipe) {
4848 case 0:
4849 reg = WM0_PIPEA_ILK;
4850 break;
4851 case 1:
4852 reg = WM0_PIPEB_ILK;
4853 break;
4854 case 2:
4855 reg = WM0_PIPEC_IVB;
4856 break;
4857 default:
4858 return; /* bad pipe */
4859 }
4860
4861 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
4862 &sandybridge_display_wm_info,
4863 latency, &sprite_wm);
4864 if (!ret) {
4865 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
4866 pipe);
4867 return;
4868 }
4869
4870 val = I915_READ(reg);
4871 val &= ~WM0_PIPE_SPRITE_MASK;
4872 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
4873 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
4874
4875
4876 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4877 pixel_size,
4878 &sandybridge_display_srwm_info,
4879 SNB_READ_WM1_LATENCY() * 500,
4880 &sprite_wm);
4881 if (!ret) {
4882 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
4883 pipe);
4884 return;
4885 }
4886 I915_WRITE(WM1S_LP_ILK, sprite_wm);
4887
4888 /* Only IVB has two more LP watermarks for sprite */
4889 if (!IS_IVYBRIDGE(dev))
4890 return;
4891
4892 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4893 pixel_size,
4894 &sandybridge_display_srwm_info,
4895 SNB_READ_WM2_LATENCY() * 500,
4896 &sprite_wm);
4897 if (!ret) {
4898 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
4899 pipe);
4900 return;
4901 }
4902 I915_WRITE(WM2S_LP_IVB, sprite_wm);
4903
4904 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4905 pixel_size,
4906 &sandybridge_display_srwm_info,
4907 SNB_READ_WM3_LATENCY() * 500,
4908 &sprite_wm);
4909 if (!ret) {
4910 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
4911 pipe);
4912 return;
4913 }
4914 I915_WRITE(WM3S_LP_IVB, sprite_wm);
4915}
4916
4917/**
4918 * intel_update_watermarks - update FIFO watermark values based on current modes
4919 *
4920 * Calculate watermark values for the various WM regs based on current mode
4921 * and plane configuration.
4922 *
4923 * There are several cases to deal with here:
4924 * - normal (i.e. non-self-refresh)
4925 * - self-refresh (SR) mode
4926 * - lines are large relative to FIFO size (buffer can hold up to 2)
4927 * - lines are small relative to FIFO size (buffer can hold more than 2
4928 * lines), so need to account for TLB latency
4929 *
4930 * The normal calculation is:
4931 * watermark = dotclock * bytes per pixel * latency
4932 * where latency is platform & configuration dependent (we assume pessimal
4933 * values here).
4934 *
4935 * The SR calculation is:
4936 * watermark = (trunc(latency/line time)+1) * surface width *
4937 * bytes per pixel
4938 * where
4939 * line time = htotal / dotclock
4940 * surface width = hdisplay for normal plane and 64 for cursor
4941 * and latency is assumed to be high, as above.
4942 *
4943 * The final value programmed to the register should always be rounded up,
4944 * and include an extra 2 entries to account for clock crossings.
4945 *
4946 * We don't use the sprite, so we can ignore that. And on Crestline we have
4947 * to set the non-SR watermarks to 8.
4948 */
4949static void intel_update_watermarks(struct drm_device *dev)
4950{
4951 struct drm_i915_private *dev_priv = dev->dev_private;
4952
4953 if (dev_priv->display.update_wm)
4954 dev_priv->display.update_wm(dev);
4955}
4956
4957void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
4958 uint32_t sprite_width, int pixel_size)
4959{
4960 struct drm_i915_private *dev_priv = dev->dev_private;
4961
4962 if (dev_priv->display.update_sprite_wm)
4963 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
4964 pixel_size);
4965}
4966
4967static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4968{
4969 if (i915_panel_use_ssc >= 0)
4970 return i915_panel_use_ssc != 0;
4971 return dev_priv->lvds_use_ssc
4972 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4973}
4974

--- 207 unchanged lines hidden (view full) ---

5182 reduced_clock && i915_powersave) {
5183 I915_WRITE(FP1(pipe), fp2);
5184 intel_crtc->lowfreq_avail = true;
5185 } else {
5186 I915_WRITE(FP1(pipe), fp);
5187 }
5188}
5189
3583static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
3584{
3585 if (i915_panel_use_ssc >= 0)
3586 return i915_panel_use_ssc != 0;
3587 return dev_priv->lvds_use_ssc
3588 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
3589}
3590

--- 207 unchanged lines hidden (view full) ---

3798 reduced_clock && i915_powersave) {
3799 I915_WRITE(FP1(pipe), fp2);
3800 intel_crtc->lowfreq_avail = true;
3801 } else {
3802 I915_WRITE(FP1(pipe), fp);
3803 }
3804}
3805
3806static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
3807 struct drm_display_mode *adjusted_mode)
3808{
3809 struct drm_device *dev = crtc->dev;
3810 struct drm_i915_private *dev_priv = dev->dev_private;
3811 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3812 int pipe = intel_crtc->pipe;
3813 u32 temp;
3814
3815 temp = I915_READ(LVDS);
3816 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
3817 if (pipe == 1) {
3818 temp |= LVDS_PIPEB_SELECT;
3819 } else {
3820 temp &= ~LVDS_PIPEB_SELECT;
3821 }
3822 /* set the corresponsding LVDS_BORDER bit */
3823 temp |= dev_priv->lvds_border_bits;
3824 /* Set the B0-B3 data pairs corresponding to whether we're going to
3825 * set the DPLLs for dual-channel mode or not.
3826 */
3827 if (clock->p2 == 7)
3828 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
3829 else
3830 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
3831
3832 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
3833 * appropriately here, but we need to look more thoroughly into how
3834 * panels behave in the two modes.
3835 */
3836 /* set the dithering flag on LVDS as needed */
3837 if (INTEL_INFO(dev)->gen >= 4) {
3838 if (dev_priv->lvds_dither)
3839 temp |= LVDS_ENABLE_DITHER;
3840 else
3841 temp &= ~LVDS_ENABLE_DITHER;
3842 }
3843 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
3844 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
3845 temp |= LVDS_HSYNC_POLARITY;
3846 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
3847 temp |= LVDS_VSYNC_POLARITY;
3848 I915_WRITE(LVDS, temp);
3849}
3850
3851static void i9xx_update_pll(struct drm_crtc *crtc,
3852 struct drm_display_mode *mode,
3853 struct drm_display_mode *adjusted_mode,
3854 intel_clock_t *clock, intel_clock_t *reduced_clock,
3855 int num_connectors)
3856{
3857 struct drm_device *dev = crtc->dev;
3858 struct drm_i915_private *dev_priv = dev->dev_private;
3859 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3860 int pipe = intel_crtc->pipe;
3861 u32 dpll;
3862 bool is_sdvo;
3863
3864 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
3865 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
3866
3867 dpll = DPLL_VGA_MODE_DIS;
3868
3869 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3870 dpll |= DPLLB_MODE_LVDS;
3871 else
3872 dpll |= DPLLB_MODE_DAC_SERIAL;
3873 if (is_sdvo) {
3874 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
3875 if (pixel_multiplier > 1) {
3876 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3877 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3878 }
3879 dpll |= DPLL_DVO_HIGH_SPEED;
3880 }
3881 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
3882 dpll |= DPLL_DVO_HIGH_SPEED;
3883
3884 /* compute bitmask from p1 value */
3885 if (IS_PINEVIEW(dev))
3886 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
3887 else {
3888 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3889 if (IS_G4X(dev) && reduced_clock)
3890 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
3891 }
3892 switch (clock->p2) {
3893 case 5:
3894 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
3895 break;
3896 case 7:
3897 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
3898 break;
3899 case 10:
3900 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
3901 break;
3902 case 14:
3903 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
3904 break;
3905 }
3906 if (INTEL_INFO(dev)->gen >= 4)
3907 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
3908
3909 if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3910 dpll |= PLL_REF_INPUT_TVCLKINBC;
3911 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3912 /* XXX: just matching BIOS for now */
3913 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
3914 dpll |= 3;
3915 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3916 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
3917 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3918 else
3919 dpll |= PLL_REF_INPUT_DREFCLK;
3920
3921 dpll |= DPLL_VCO_ENABLE;
3922 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
3923 POSTING_READ(DPLL(pipe));
3924 DELAY(150);
3925
3926 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
3927 * This is an exception to the general rule that mode_set doesn't turn
3928 * things on.
3929 */
3930 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3931 intel_update_lvds(crtc, clock, adjusted_mode);
3932
3933 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
3934 intel_dp_set_m_n(crtc, mode, adjusted_mode);
3935
3936 I915_WRITE(DPLL(pipe), dpll);
3937
3938 /* Wait for the clocks to stabilize. */
3939 POSTING_READ(DPLL(pipe));
3940 DELAY(150);
3941
3942 if (INTEL_INFO(dev)->gen >= 4) {
3943 u32 temp = 0;
3944 if (is_sdvo) {
3945 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
3946 if (temp > 1)
3947 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
3948 else
3949 temp = 0;
3950 }
3951 I915_WRITE(DPLL_MD(pipe), temp);
3952 } else {
3953 /* The pixel multiplier can only be updated once the
3954 * DPLL is enabled and the clocks are stable.
3955 *
3956 * So write it again.
3957 */
3958 I915_WRITE(DPLL(pipe), dpll);
3959 }
3960}
3961
3962static void i8xx_update_pll(struct drm_crtc *crtc,
3963 struct drm_display_mode *adjusted_mode,
3964 intel_clock_t *clock,
3965 int num_connectors)
3966{
3967 struct drm_device *dev = crtc->dev;
3968 struct drm_i915_private *dev_priv = dev->dev_private;
3969 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3970 int pipe = intel_crtc->pipe;
3971 u32 dpll;
3972
3973 dpll = DPLL_VGA_MODE_DIS;
3974
3975 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3976 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3977 } else {
3978 if (clock->p1 == 2)
3979 dpll |= PLL_P1_DIVIDE_BY_TWO;
3980 else
3981 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3982 if (clock->p2 == 4)
3983 dpll |= PLL_P2_DIVIDE_BY_4;
3984 }
3985
3986 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3987 /* XXX: just matching BIOS for now */
3988 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
3989 dpll |= 3;
3990 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3991 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
3992 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3993 else
3994 dpll |= PLL_REF_INPUT_DREFCLK;
3995
3996 dpll |= DPLL_VCO_ENABLE;
3997 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
3998 POSTING_READ(DPLL(pipe));
3999 DELAY(150);
4000
4001 I915_WRITE(DPLL(pipe), dpll);
4002
4003 /* Wait for the clocks to stabilize. */
4004 POSTING_READ(DPLL(pipe));
4005 DELAY(150);
4006
4007 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4008 * This is an exception to the general rule that mode_set doesn't turn
4009 * things on.
4010 */
4011 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4012 intel_update_lvds(crtc, clock, adjusted_mode);
4013
4014 /* The pixel multiplier can only be updated once the
4015 * DPLL is enabled and the clocks are stable.
4016 *
4017 * So write it again.
4018 */
4019 I915_WRITE(DPLL(pipe), dpll);
4020}
4021
5190static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5191 struct drm_display_mode *mode,
5192 struct drm_display_mode *adjusted_mode,
5193 int x, int y,
5194 struct drm_framebuffer *old_fb)
5195{
5196 struct drm_device *dev = crtc->dev;
5197 struct drm_i915_private *dev_priv = dev->dev_private;
5198 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5199 int pipe = intel_crtc->pipe;
5200 int plane = intel_crtc->plane;
5201 int refclk, num_connectors = 0;
5202 intel_clock_t clock, reduced_clock;
4022static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4023 struct drm_display_mode *mode,
4024 struct drm_display_mode *adjusted_mode,
4025 int x, int y,
4026 struct drm_framebuffer *old_fb)
4027{
4028 struct drm_device *dev = crtc->dev;
4029 struct drm_i915_private *dev_priv = dev->dev_private;
4030 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4031 int pipe = intel_crtc->pipe;
4032 int plane = intel_crtc->plane;
4033 int refclk, num_connectors = 0;
4034 intel_clock_t clock, reduced_clock;
5203 u32 dpll, dspcntr, pipeconf, vsyncshift;
5204 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
5205 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4035 u32 dspcntr, pipeconf, vsyncshift;
4036 bool ok, has_reduced_clock = false, is_sdvo = false;
4037 bool is_lvds = false, is_tv = false, is_dp = false;
5206 struct drm_mode_config *mode_config = &dev->mode_config;
5207 struct intel_encoder *encoder;
5208 const intel_limit_t *limit;
5209 int ret;
4038 struct drm_mode_config *mode_config = &dev->mode_config;
4039 struct intel_encoder *encoder;
4040 const intel_limit_t *limit;
4041 int ret;
5210 u32 temp;
5211 u32 lvds_sync = 0;
5212
5213 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5214 if (encoder->base.crtc != crtc)
5215 continue;
5216
5217 switch (encoder->type) {
5218 case INTEL_OUTPUT_LVDS:
5219 is_lvds = true;
5220 break;
5221 case INTEL_OUTPUT_SDVO:
5222 case INTEL_OUTPUT_HDMI:
5223 is_sdvo = true;
5224 if (encoder->needs_tv_clock)
5225 is_tv = true;
5226 break;
4042
4043 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4044 if (encoder->base.crtc != crtc)
4045 continue;
4046
4047 switch (encoder->type) {
4048 case INTEL_OUTPUT_LVDS:
4049 is_lvds = true;
4050 break;
4051 case INTEL_OUTPUT_SDVO:
4052 case INTEL_OUTPUT_HDMI:
4053 is_sdvo = true;
4054 if (encoder->needs_tv_clock)
4055 is_tv = true;
4056 break;
5227 case INTEL_OUTPUT_DVO:
5228 is_dvo = true;
5229 break;
5230 case INTEL_OUTPUT_TVOUT:
5231 is_tv = true;
5232 break;
4057 case INTEL_OUTPUT_TVOUT:
4058 is_tv = true;
4059 break;
5233 case INTEL_OUTPUT_ANALOG:
5234 is_crt = true;
5235 break;
5236 case INTEL_OUTPUT_DISPLAYPORT:
5237 is_dp = true;
5238 break;
5239 }
5240
5241 num_connectors++;
5242 }
5243

--- 30 unchanged lines hidden (view full) ---

5274 }
5275
5276 if (is_sdvo && is_tv)
5277 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
5278
5279 i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
5280 &reduced_clock : NULL);
5281
4060 case INTEL_OUTPUT_DISPLAYPORT:
4061 is_dp = true;
4062 break;
4063 }
4064
4065 num_connectors++;
4066 }
4067

--- 30 unchanged lines hidden (view full) ---

4098 }
4099
4100 if (is_sdvo && is_tv)
4101 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
4102
4103 i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
4104 &reduced_clock : NULL);
4105
5282 dpll = DPLL_VGA_MODE_DIS;
5283
5284 if (!IS_GEN2(dev)) {
5285 if (is_lvds)
5286 dpll |= DPLLB_MODE_LVDS;
5287 else
5288 dpll |= DPLLB_MODE_DAC_SERIAL;
5289 if (is_sdvo) {
5290 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5291 if (pixel_multiplier > 1) {
5292 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5293 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5294 }
5295 dpll |= DPLL_DVO_HIGH_SPEED;
5296 }
5297 if (is_dp)
5298 dpll |= DPLL_DVO_HIGH_SPEED;
5299
5300 /* compute bitmask from p1 value */
5301 if (IS_PINEVIEW(dev))
5302 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5303 else {
5304 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5305 if (IS_G4X(dev) && has_reduced_clock)
5306 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5307 }
5308 switch (clock.p2) {
5309 case 5:
5310 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5311 break;
5312 case 7:
5313 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5314 break;
5315 case 10:
5316 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5317 break;
5318 case 14:
5319 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5320 break;
5321 }
5322 if (INTEL_INFO(dev)->gen >= 4)
5323 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5324 } else {
5325 if (is_lvds) {
5326 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5327 } else {
5328 if (clock.p1 == 2)
5329 dpll |= PLL_P1_DIVIDE_BY_TWO;
5330 else
5331 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5332 if (clock.p2 == 4)
5333 dpll |= PLL_P2_DIVIDE_BY_4;
5334 }
5335 }
5336
5337 if (is_sdvo && is_tv)
5338 dpll |= PLL_REF_INPUT_TVCLKINBC;
5339 else if (is_tv)
5340 /* XXX: just matching BIOS for now */
5341 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
5342 dpll |= 3;
5343 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5344 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4106 if (IS_GEN2(dev))
4107 i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
5345 else
4108 else
5346 dpll |= PLL_REF_INPUT_DREFCLK;
4109 i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
4110 has_reduced_clock ? &reduced_clock : NULL,
4111 num_connectors);
5347
5348 /* setup pipeconf */
5349 pipeconf = I915_READ(PIPECONF(pipe));
5350
5351 /* Set up the display plane register */
5352 dspcntr = DISPPLANE_GAMMA_ENABLE;
5353
5354 if (pipe == 0)

--- 20 unchanged lines hidden (view full) ---

5375 if (is_dp) {
5376 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5377 pipeconf |= PIPECONF_BPP_6 |
5378 PIPECONF_DITHER_EN |
5379 PIPECONF_DITHER_TYPE_SP;
5380 }
5381 }
5382
4112
4113 /* setup pipeconf */
4114 pipeconf = I915_READ(PIPECONF(pipe));
4115
4116 /* Set up the display plane register */
4117 dspcntr = DISPPLANE_GAMMA_ENABLE;
4118
4119 if (pipe == 0)

--- 20 unchanged lines hidden (view full) ---

4140 if (is_dp) {
4141 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4142 pipeconf |= PIPECONF_BPP_6 |
4143 PIPECONF_DITHER_EN |
4144 PIPECONF_DITHER_TYPE_SP;
4145 }
4146 }
4147
5383 dpll |= DPLL_VCO_ENABLE;
5384
5385 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5386 drm_mode_debug_printmodeline(mode);
5387
4148 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4149 drm_mode_debug_printmodeline(mode);
4150
5388 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5389
5390 POSTING_READ(DPLL(pipe));
5391 DELAY(150);
5392
5393 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5394 * This is an exception to the general rule that mode_set doesn't turn
5395 * things on.
5396 */
5397 if (is_lvds) {
5398 temp = I915_READ(LVDS);
5399 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5400 if (pipe == 1) {
5401 temp |= LVDS_PIPEB_SELECT;
5402 } else {
5403 temp &= ~LVDS_PIPEB_SELECT;
5404 }
5405 /* set the corresponsding LVDS_BORDER bit */
5406 temp |= dev_priv->lvds_border_bits;
5407 /* Set the B0-B3 data pairs corresponding to whether we're going to
5408 * set the DPLLs for dual-channel mode or not.
5409 */
5410 if (clock.p2 == 7)
5411 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5412 else
5413 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5414
5415 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5416 * appropriately here, but we need to look more thoroughly into how
5417 * panels behave in the two modes.
5418 */
5419 /* set the dithering flag on LVDS as needed */
5420 if (INTEL_INFO(dev)->gen >= 4) {
5421 if (dev_priv->lvds_dither)
5422 temp |= LVDS_ENABLE_DITHER;
5423 else
5424 temp &= ~LVDS_ENABLE_DITHER;
5425 }
5426 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5427 lvds_sync |= LVDS_HSYNC_POLARITY;
5428 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5429 lvds_sync |= LVDS_VSYNC_POLARITY;
5430 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5431 != lvds_sync) {
5432 char flags[2] = "-+";
5433 DRM_INFO("Changing LVDS panel from "
5434 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5435 flags[!(temp & LVDS_HSYNC_POLARITY)],
5436 flags[!(temp & LVDS_VSYNC_POLARITY)],
5437 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5438 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5439 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5440 temp |= lvds_sync;
5441 }
5442 I915_WRITE(LVDS, temp);
5443 }
5444
5445 if (is_dp) {
5446 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5447 }
5448
5449 I915_WRITE(DPLL(pipe), dpll);
5450
5451 /* Wait for the clocks to stabilize. */
5452 POSTING_READ(DPLL(pipe));
5453 DELAY(150);
5454
5455 if (INTEL_INFO(dev)->gen >= 4) {
5456 temp = 0;
5457 if (is_sdvo) {
5458 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5459 if (temp > 1)
5460 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5461 else
5462 temp = 0;
5463 }
5464 I915_WRITE(DPLL_MD(pipe), temp);
5465 } else {
5466 /* The pixel multiplier can only be updated once the
5467 * DPLL is enabled and the clocks are stable.
5468 *
5469 * So write it again.
5470 */
5471 I915_WRITE(DPLL(pipe), dpll);
5472 }
5473
5474 if (HAS_PIPE_CXSR(dev)) {
5475 if (intel_crtc->lowfreq_avail) {
5476 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5477 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5478 } else {
5479 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5480 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5481 }

--- 49 unchanged lines hidden (view full) ---

5531 I915_WRITE(PIPECONF(pipe), pipeconf);
5532 POSTING_READ(PIPECONF(pipe));
5533 intel_enable_pipe(dev_priv, pipe, false);
5534
5535 intel_wait_for_vblank(dev, pipe);
5536
5537 I915_WRITE(DSPCNTR(plane), dspcntr);
5538 POSTING_READ(DSPCNTR(plane));
4151 if (HAS_PIPE_CXSR(dev)) {
4152 if (intel_crtc->lowfreq_avail) {
4153 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4154 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4155 } else {
4156 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4157 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4158 }

--- 49 unchanged lines hidden (view full) ---

4208 I915_WRITE(PIPECONF(pipe), pipeconf);
4209 POSTING_READ(PIPECONF(pipe));
4210 intel_enable_pipe(dev_priv, pipe, false);
4211
4212 intel_wait_for_vblank(dev, pipe);
4213
4214 I915_WRITE(DSPCNTR(plane), dspcntr);
4215 POSTING_READ(DSPCNTR(plane));
5539 intel_enable_plane(dev_priv, plane, pipe);
5540
5541 ret = intel_pipe_set_base(crtc, x, y, old_fb);
5542
5543 intel_update_watermarks(dev);
5544
5545 return ret;
5546}
5547

--- 159 unchanged lines hidden (view full) ---

5707 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5708 int pipe = intel_crtc->pipe;
5709 int plane = intel_crtc->plane;
5710 int refclk, num_connectors = 0;
5711 intel_clock_t clock, reduced_clock;
5712 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5713 bool ok, has_reduced_clock = false, is_sdvo = false;
5714 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4216
4217 ret = intel_pipe_set_base(crtc, x, y, old_fb);
4218
4219 intel_update_watermarks(dev);
4220
4221 return ret;
4222}
4223

--- 159 unchanged lines hidden (view full) ---

4383 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4384 int pipe = intel_crtc->pipe;
4385 int plane = intel_crtc->plane;
4386 int refclk, num_connectors = 0;
4387 intel_clock_t clock, reduced_clock;
4388 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4389 bool ok, has_reduced_clock = false, is_sdvo = false;
4390 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5715 struct intel_encoder *has_edp_encoder = NULL;
5716 struct drm_mode_config *mode_config = &dev->mode_config;
4391 struct drm_mode_config *mode_config = &dev->mode_config;
5717 struct intel_encoder *encoder;
4392 struct intel_encoder *encoder, *edp_encoder = NULL;
5718 const intel_limit_t *limit;
5719 int ret;
5720 struct fdi_m_n m_n = {0};
5721 u32 temp;
4393 const intel_limit_t *limit;
4394 int ret;
4395 struct fdi_m_n m_n = {0};
4396 u32 temp;
5722 u32 lvds_sync = 0;
5723 int target_clock, pixel_multiplier, lane, link_bw, factor;
5724 unsigned int pipe_bpp;
5725 bool dither;
4397 int target_clock, pixel_multiplier, lane, link_bw, factor;
4398 unsigned int pipe_bpp;
4399 bool dither;
4400 bool is_cpu_edp = false, is_pch_edp = false;
5726
5727 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5728 if (encoder->base.crtc != crtc)
5729 continue;
5730
5731 switch (encoder->type) {
5732 case INTEL_OUTPUT_LVDS:
5733 is_lvds = true;

--- 9 unchanged lines hidden (view full) ---

5743 break;
5744 case INTEL_OUTPUT_ANALOG:
5745 is_crt = true;
5746 break;
5747 case INTEL_OUTPUT_DISPLAYPORT:
5748 is_dp = true;
5749 break;
5750 case INTEL_OUTPUT_EDP:
4401
4402 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4403 if (encoder->base.crtc != crtc)
4404 continue;
4405
4406 switch (encoder->type) {
4407 case INTEL_OUTPUT_LVDS:
4408 is_lvds = true;

--- 9 unchanged lines hidden (view full) ---

4418 break;
4419 case INTEL_OUTPUT_ANALOG:
4420 is_crt = true;
4421 break;
4422 case INTEL_OUTPUT_DISPLAYPORT:
4423 is_dp = true;
4424 break;
4425 case INTEL_OUTPUT_EDP:
5751 has_edp_encoder = encoder;
4426 is_dp = true;
4427 if (intel_encoder_is_pch_edp(&encoder->base))
4428 is_pch_edp = true;
4429 else
4430 is_cpu_edp = true;
4431 edp_encoder = encoder;
5752 break;
5753 }
5754
5755 num_connectors++;
5756 }
5757
5758 refclk = ironlake_get_refclk(crtc);
5759

--- 46 unchanged lines hidden (view full) ---

5806 }
5807 }
5808
5809 /* FDI link */
5810 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5811 lane = 0;
5812 /* CPU eDP doesn't require FDI link, so just set DP M/N
5813 according to current link config */
4432 break;
4433 }
4434
4435 num_connectors++;
4436 }
4437
4438 refclk = ironlake_get_refclk(crtc);
4439

--- 46 unchanged lines hidden (view full) ---

4486 }
4487 }
4488
4489 /* FDI link */
4490 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4491 lane = 0;
4492 /* CPU eDP doesn't require FDI link, so just set DP M/N
4493 according to current link config */
5814 if (has_edp_encoder &&
5815 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4494 if (is_cpu_edp) {
5816 target_clock = mode->clock;
4495 target_clock = mode->clock;
5817 intel_edp_link_config(has_edp_encoder,
5818 &lane, &link_bw);
4496 intel_edp_link_config(edp_encoder, &lane, &link_bw);
5819 } else {
5820 /* [e]DP over FDI requires target mode clock
5821 instead of link clock */
4497 } else {
4498 /* [e]DP over FDI requires target mode clock
4499 instead of link clock */
5822 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
4500 if (is_dp)
5823 target_clock = mode->clock;
5824 else
5825 target_clock = adjusted_mode->clock;
5826
5827 /* FDI is a binary signal running at ~2.7GHz, encoding
5828 * each output octet as 10 bits. The actual frequency
5829 * is stored as a divider into a 100MHz clock, and the
5830 * mode pixel clock is stored in units of 1KHz.

--- 74 unchanged lines hidden (view full) ---

5905 dpll |= DPLLB_MODE_DAC_SERIAL;
5906 if (is_sdvo) {
5907 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5908 if (pixel_multiplier > 1) {
5909 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5910 }
5911 dpll |= DPLL_DVO_HIGH_SPEED;
5912 }
4501 target_clock = mode->clock;
4502 else
4503 target_clock = adjusted_mode->clock;
4504
4505 /* FDI is a binary signal running at ~2.7GHz, encoding
4506 * each output octet as 10 bits. The actual frequency
4507 * is stored as a divider into a 100MHz clock, and the
4508 * mode pixel clock is stored in units of 1KHz.

--- 74 unchanged lines hidden (view full) ---

4583 dpll |= DPLLB_MODE_DAC_SERIAL;
4584 if (is_sdvo) {
4585 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4586 if (pixel_multiplier > 1) {
4587 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
4588 }
4589 dpll |= DPLL_DVO_HIGH_SPEED;
4590 }
5913 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
4591 if (is_dp && !is_cpu_edp)
5914 dpll |= DPLL_DVO_HIGH_SPEED;
5915
5916 /* compute bitmask from p1 value */
5917 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5918 /* also FPA1 */
5919 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5920
5921 switch (clock.p2) {

--- 22 unchanged lines hidden (view full) ---

5944 else
5945 dpll |= PLL_REF_INPUT_DREFCLK;
5946
5947 /* setup pipeconf */
5948 pipeconf = I915_READ(PIPECONF(pipe));
5949
5950 /* Set up the display plane register */
5951 dspcntr = DISPPLANE_GAMMA_ENABLE;
4592 dpll |= DPLL_DVO_HIGH_SPEED;
4593
4594 /* compute bitmask from p1 value */
4595 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4596 /* also FPA1 */
4597 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4598
4599 switch (clock.p2) {

--- 22 unchanged lines hidden (view full) ---

4622 else
4623 dpll |= PLL_REF_INPUT_DREFCLK;
4624
4625 /* setup pipeconf */
4626 pipeconf = I915_READ(PIPECONF(pipe));
4627
4628 /* Set up the display plane register */
4629 dspcntr = DISPPLANE_GAMMA_ENABLE;
5952
5953 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5954 drm_mode_debug_printmodeline(mode);
5955
4630 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
4631 drm_mode_debug_printmodeline(mode);
4632
5956 /* PCH eDP needs FDI, but CPU eDP does not */
5957 if (!intel_crtc->no_pll) {
5958 if (!has_edp_encoder ||
5959 intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5960 I915_WRITE(PCH_FP0(pipe), fp);
5961 I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4633 /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
4634 * pre-Haswell/LPT generation */
4635 if (HAS_PCH_LPT(dev)) {
4636 DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
4637 pipe);
4638 } else if (!is_cpu_edp) {
4639 struct intel_pch_pll *pll;
5962
4640
5963 POSTING_READ(PCH_DPLL(pipe));
5964 DELAY(150);
5965 }
5966 } else {
5967 if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
5968 fp == I915_READ(PCH_FP0(0))) {
5969 intel_crtc->use_pll_a = true;
5970 DRM_DEBUG_KMS("using pipe a dpll\n");
5971 } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
5972 fp == I915_READ(PCH_FP0(1))) {
5973 intel_crtc->use_pll_a = false;
5974 DRM_DEBUG_KMS("using pipe b dpll\n");
5975 } else {
5976 DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
5977 return -EINVAL;
5978 }
5979 }
4641 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
4642 if (pll == NULL) {
4643 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
4644 pipe);
4645 return -EINVAL;
4646 }
4647 } else
4648 intel_put_pch_pll(intel_crtc);
5980
5981 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5982 * This is an exception to the general rule that mode_set doesn't turn
5983 * things on.
5984 */
5985 if (is_lvds) {
5986 temp = I915_READ(PCH_LVDS);
5987 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;

--- 16 unchanged lines hidden (view full) ---

6004 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
6005 else
6006 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
6007
6008 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
6009 * appropriately here, but we need to look more thoroughly into how
6010 * panels behave in the two modes.
6011 */
4649
4650 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4651 * This is an exception to the general rule that mode_set doesn't turn
4652 * things on.
4653 */
4654 if (is_lvds) {
4655 temp = I915_READ(PCH_LVDS);
4656 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;

--- 16 unchanged lines hidden (view full) ---

4673 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4674 else
4675 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4676
4677 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4678 * appropriately here, but we need to look more thoroughly into how
4679 * panels behave in the two modes.
4680 */
4681 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
6012 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4682 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
6013 lvds_sync |= LVDS_HSYNC_POLARITY;
4683 temp |= LVDS_HSYNC_POLARITY;
6014 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4684 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
6015 lvds_sync |= LVDS_VSYNC_POLARITY;
6016 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
6017 != lvds_sync) {
6018 char flags[2] = "-+";
6019 DRM_INFO("Changing LVDS panel from "
6020 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
6021 flags[!(temp & LVDS_HSYNC_POLARITY)],
6022 flags[!(temp & LVDS_VSYNC_POLARITY)],
6023 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
6024 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
6025 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
6026 temp |= lvds_sync;
6027 }
4685 temp |= LVDS_VSYNC_POLARITY;
6028 I915_WRITE(PCH_LVDS, temp);
6029 }
6030
6031 pipeconf &= ~PIPECONF_DITHER_EN;
6032 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
6033 if ((is_lvds && dev_priv->lvds_dither) || dither) {
6034 pipeconf |= PIPECONF_DITHER_EN;
6035 pipeconf |= PIPECONF_DITHER_TYPE_SP;
6036 }
4686 I915_WRITE(PCH_LVDS, temp);
4687 }
4688
4689 pipeconf &= ~PIPECONF_DITHER_EN;
4690 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
4691 if ((is_lvds && dev_priv->lvds_dither) || dither) {
4692 pipeconf |= PIPECONF_DITHER_EN;
4693 pipeconf |= PIPECONF_DITHER_TYPE_SP;
4694 }
6037 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4695 if (is_dp && !is_cpu_edp) {
6038 intel_dp_set_m_n(crtc, mode, adjusted_mode);
6039 } else {
6040 /* For non-DP output, clear any trans DP clock recovery setting.*/
6041 I915_WRITE(TRANSDATA_M1(pipe), 0);
6042 I915_WRITE(TRANSDATA_N1(pipe), 0);
6043 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
6044 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
6045 }
6046
4696 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4697 } else {
4698 /* For non-DP output, clear any trans DP clock recovery setting.*/
4699 I915_WRITE(TRANSDATA_M1(pipe), 0);
4700 I915_WRITE(TRANSDATA_N1(pipe), 0);
4701 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
4702 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
4703 }
4704
6047 if (!intel_crtc->no_pll &&
6048 (!has_edp_encoder ||
6049 intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
6050 I915_WRITE(PCH_DPLL(pipe), dpll);
4705 if (intel_crtc->pch_pll) {
4706 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
6051
6052 /* Wait for the clocks to stabilize. */
4707
4708 /* Wait for the clocks to stabilize. */
6053 POSTING_READ(PCH_DPLL(pipe));
4709 POSTING_READ(intel_crtc->pch_pll->pll_reg);
6054 DELAY(150);
6055
6056 /* The pixel multiplier can only be updated once the
6057 * DPLL is enabled and the clocks are stable.
6058 *
6059 * So write it again.
6060 */
4710 DELAY(150);
4711
4712 /* The pixel multiplier can only be updated once the
4713 * DPLL is enabled and the clocks are stable.
4714 *
4715 * So write it again.
4716 */
6061 I915_WRITE(PCH_DPLL(pipe), dpll);
4717 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
6062 }
6063
6064 intel_crtc->lowfreq_avail = false;
4718 }
4719
4720 intel_crtc->lowfreq_avail = false;
6065 if (!intel_crtc->no_pll) {
4721 if (intel_crtc->pch_pll) {
6066 if (is_lvds && has_reduced_clock && i915_powersave) {
4722 if (is_lvds && has_reduced_clock && i915_powersave) {
6067 I915_WRITE(PCH_FP1(pipe), fp2);
4723 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
6068 intel_crtc->lowfreq_avail = true;
6069 if (HAS_PIPE_CXSR(dev)) {
6070 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6071 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6072 }
6073 } else {
4724 intel_crtc->lowfreq_avail = true;
4725 if (HAS_PIPE_CXSR(dev)) {
4726 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4727 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4728 }
4729 } else {
6074 I915_WRITE(PCH_FP1(pipe), fp);
4730 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
6075 if (HAS_PIPE_CXSR(dev)) {
6076 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6077 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
6078 }
6079 }
6080 }
6081
6082 pipeconf &= ~PIPECONF_INTERLACE_MASK;

--- 36 unchanged lines hidden (view full) ---

6119 I915_WRITE(PIPESRC(pipe),
6120 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
6121
6122 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
6123 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
6124 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
6125 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
6126
4731 if (HAS_PIPE_CXSR(dev)) {
4732 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4733 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4734 }
4735 }
4736 }
4737
4738 pipeconf &= ~PIPECONF_INTERLACE_MASK;

--- 36 unchanged lines hidden (view full) ---

4775 I915_WRITE(PIPESRC(pipe),
4776 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4777
4778 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
4779 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
4780 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
4781 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
4782
6127 if (has_edp_encoder &&
6128 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4783 if (is_cpu_edp)
6129 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
4784 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
6130 }
6131
6132 I915_WRITE(PIPECONF(pipe), pipeconf);
6133 POSTING_READ(PIPECONF(pipe));
6134
6135 intel_wait_for_vblank(dev, pipe);
6136
6137 I915_WRITE(DSPCNTR(plane), dspcntr);
6138 POSTING_READ(DSPCNTR(plane));
6139
6140 ret = intel_pipe_set_base(crtc, x, y, old_fb);
6141
6142 intel_update_watermarks(dev);
6143
4785
4786 I915_WRITE(PIPECONF(pipe), pipeconf);
4787 POSTING_READ(PIPECONF(pipe));
4788
4789 intel_wait_for_vblank(dev, pipe);
4790
4791 I915_WRITE(DSPCNTR(plane), dspcntr);
4792 POSTING_READ(DSPCNTR(plane));
4793
4794 ret = intel_pipe_set_base(crtc, x, y, old_fb);
4795
4796 intel_update_watermarks(dev);
4797
4798 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
4799
6144 return ret;
6145}
6146
6147static int intel_crtc_mode_set(struct drm_crtc *crtc,
6148 struct drm_display_mode *mode,
6149 struct drm_display_mode *adjusted_mode,
6150 int x, int y,
6151 struct drm_framebuffer *old_fb)

--- 342 unchanged lines hidden (view full) ---

6494 y = -y;
6495 }
6496 pos |= y << CURSOR_Y_SHIFT;
6497
6498 visible = base != 0;
6499 if (!visible && !intel_crtc->cursor_visible)
6500 return;
6501
4800 return ret;
4801}
4802
4803static int intel_crtc_mode_set(struct drm_crtc *crtc,
4804 struct drm_display_mode *mode,
4805 struct drm_display_mode *adjusted_mode,
4806 int x, int y,
4807 struct drm_framebuffer *old_fb)

--- 342 unchanged lines hidden (view full) ---

5150 y = -y;
5151 }
5152 pos |= y << CURSOR_Y_SHIFT;
5153
5154 visible = base != 0;
5155 if (!visible && !intel_crtc->cursor_visible)
5156 return;
5157
6502 if (IS_IVYBRIDGE(dev)) {
5158 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
6503 I915_WRITE(CURPOS_IVB(pipe), pos);
6504 ivb_update_cursor(crtc, base);
6505 } else {
6506 I915_WRITE(CURPOS(pipe), pos);
6507 if (IS_845G(dev) || IS_I865G(dev))
6508 i845_update_cursor(crtc, base);
6509 else
6510 i9xx_update_cursor(crtc, base);
6511 }
5159 I915_WRITE(CURPOS_IVB(pipe), pos);
5160 ivb_update_cursor(crtc, base);
5161 } else {
5162 I915_WRITE(CURPOS(pipe), pos);
5163 if (IS_845G(dev) || IS_I865G(dev))
5164 i845_update_cursor(crtc, base);
5165 else
5166 i9xx_update_cursor(crtc, base);
5167 }
6512
6513 if (visible)
6514 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
6515}
6516
6517static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6518 struct drm_file *file,
6519 uint32_t handle,
6520 uint32_t width, uint32_t height)
6521{
6522 struct drm_device *dev = crtc->dev;

--- 68 unchanged lines hidden (view full) ---

6591 I915_WRITE(CURSIZE, (height << 12) | width);
6592
6593 finish:
6594 if (intel_crtc->cursor_bo) {
6595 if (dev_priv->info->cursor_needs_physical) {
6596 if (intel_crtc->cursor_bo != obj)
6597 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
6598 } else
5168}
5169
5170static int intel_crtc_cursor_set(struct drm_crtc *crtc,
5171 struct drm_file *file,
5172 uint32_t handle,
5173 uint32_t width, uint32_t height)
5174{
5175 struct drm_device *dev = crtc->dev;

--- 68 unchanged lines hidden (view full) ---

5244 I915_WRITE(CURSIZE, (height << 12) | width);
5245
5246 finish:
5247 if (intel_crtc->cursor_bo) {
5248 if (dev_priv->info->cursor_needs_physical) {
5249 if (intel_crtc->cursor_bo != obj)
5250 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
5251 } else
6599 i915_gem_object_unpin(intel_crtc->cursor_bo);
5252 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
6600 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
6601 }
6602
6603 DRM_UNLOCK(dev);
6604
6605 intel_crtc->cursor_addr = addr;
6606 intel_crtc->cursor_bo = obj;
6607 intel_crtc->cursor_width = width;
6608 intel_crtc->cursor_height = height;
6609
6610 intel_crtc_update_cursor(crtc, true);
6611
6612 return 0;
6613fail_unpin:
5253 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
5254 }
5255
5256 DRM_UNLOCK(dev);
5257
5258 intel_crtc->cursor_addr = addr;
5259 intel_crtc->cursor_bo = obj;
5260 intel_crtc->cursor_width = width;
5261 intel_crtc->cursor_height = height;
5262
5263 intel_crtc_update_cursor(crtc, true);
5264
5265 return 0;
5266fail_unpin:
6614 i915_gem_object_unpin(obj);
5267 i915_gem_object_unpin_from_display_plane(obj);
6615fail_locked:
6616 DRM_UNLOCK(dev);
6617fail:
6618 drm_gem_object_unreference_unlocked(&obj->base);
6619 return ret;
6620}
6621
6622static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)

--- 411 unchanged lines hidden (view full) ---

7034 mode->hsync_start = (hsync & 0xffff) + 1;
7035 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
7036 mode->vdisplay = (vtot & 0xffff) + 1;
7037 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
7038 mode->vsync_start = (vsync & 0xffff) + 1;
7039 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
7040
7041 drm_mode_set_name(mode);
5268fail_locked:
5269 DRM_UNLOCK(dev);
5270fail:
5271 drm_gem_object_unreference_unlocked(&obj->base);
5272 return ret;
5273}
5274
5275static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)

--- 411 unchanged lines hidden (view full) ---

5687 mode->hsync_start = (hsync & 0xffff) + 1;
5688 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
5689 mode->vdisplay = (vtot & 0xffff) + 1;
5690 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
5691 mode->vsync_start = (vsync & 0xffff) + 1;
5692 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
5693
5694 drm_mode_set_name(mode);
7042 drm_mode_set_crtcinfo(mode, 0);
7043
7044 return mode;
7045}
7046
7047#define GPU_IDLE_TIMEOUT (500 /* ms */ * 1000 / hz)
7048
7049/* When this timer fires, we've been idle for awhile */
7050static void intel_gpu_idle_timer(void *arg)

--- 150 unchanged lines hidden (view full) ---

7201 drm_i915_private_t *dev_priv = dev->dev_private;
7202 struct drm_crtc *crtc = NULL;
7203 struct intel_framebuffer *intel_fb;
7204 struct intel_crtc *intel_crtc;
7205
7206 if (!drm_core_check_feature(dev, DRIVER_MODESET))
7207 return;
7208
5695
5696 return mode;
5697}
5698
5699#define GPU_IDLE_TIMEOUT (500 /* ms */ * 1000 / hz)
5700
5701/* When this timer fires, we've been idle for awhile */
5702static void intel_gpu_idle_timer(void *arg)

--- 150 unchanged lines hidden (view full) ---

5853 drm_i915_private_t *dev_priv = dev->dev_private;
5854 struct drm_crtc *crtc = NULL;
5855 struct intel_framebuffer *intel_fb;
5856 struct intel_crtc *intel_crtc;
5857
5858 if (!drm_core_check_feature(dev, DRIVER_MODESET))
5859 return;
5860
7209 if (!dev_priv->busy)
5861 if (!dev_priv->busy) {
5862 intel_sanitize_pm(dev);
7210 dev_priv->busy = true;
5863 dev_priv->busy = true;
7211 else
5864 } else
7212 callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT,
7213 intel_gpu_idle_timer, dev);
7214
5865 callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT,
5866 intel_gpu_idle_timer, dev);
5867
5868 if (obj == NULL)
5869 return;
5870
7215 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7216 if (!crtc->fb)
7217 continue;
7218
7219 intel_crtc = to_intel_crtc(crtc);
7220 intel_fb = to_intel_framebuffer(crtc->fb);
7221 if (intel_fb->obj == obj) {
7222 if (!intel_crtc->busy) {

--- 156 unchanged lines hidden (view full) ---

7379 struct drm_crtc *crtc,
7380 struct drm_framebuffer *fb,
7381 struct drm_i915_gem_object *obj)
7382{
7383 struct drm_i915_private *dev_priv = dev->dev_private;
7384 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7385 unsigned long offset;
7386 u32 flip_mask;
5871 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5872 if (!crtc->fb)
5873 continue;
5874
5875 intel_crtc = to_intel_crtc(crtc);
5876 intel_fb = to_intel_framebuffer(crtc->fb);
5877 if (intel_fb->obj == obj) {
5878 if (!intel_crtc->busy) {

--- 156 unchanged lines hidden (view full) ---

6035 struct drm_crtc *crtc,
6036 struct drm_framebuffer *fb,
6037 struct drm_i915_gem_object *obj)
6038{
6039 struct drm_i915_private *dev_priv = dev->dev_private;
6040 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6041 unsigned long offset;
6042 u32 flip_mask;
6043 struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
7387 int ret;
7388
6044 int ret;
6045
7389 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6046 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7390 if (ret)
6047 if (ret)
7391 goto out;
6048 goto err;
7392
7393 /* Offset into the new buffer for cases of shared fbs between CRTCs */
7394 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7395
6049
6050 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6051 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
6052
7396 ret = BEGIN_LP_RING(6);
6053 ret = intel_ring_begin(ring, 6);
7397 if (ret)
6054 if (ret)
7398 goto out;
6055 goto err_unpin;
7399
7400 /* Can't queue multiple flips, so wait for the previous
7401 * one to finish before executing the next.
7402 */
7403 if (intel_crtc->plane)
7404 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7405 else
7406 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6056
6057 /* Can't queue multiple flips, so wait for the previous
6058 * one to finish before executing the next.
6059 */
6060 if (intel_crtc->plane)
6061 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6062 else
6063 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7407 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7408 OUT_RING(MI_NOOP);
7409 OUT_RING(MI_DISPLAY_FLIP |
7410 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7411 OUT_RING(fb->pitches[0]);
7412 OUT_RING(obj->gtt_offset + offset);
7413 OUT_RING(0); /* aux display base address, unused */
7414 ADVANCE_LP_RING();
7415out:
6064 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
6065 intel_ring_emit(ring, MI_NOOP);
6066 intel_ring_emit(ring, MI_DISPLAY_FLIP |
6067 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6068 intel_ring_emit(ring, fb->pitches[0]);
6069 intel_ring_emit(ring, obj->gtt_offset + offset);
6070 intel_ring_emit(ring, 0); /* aux display base address, unused */
6071 intel_ring_advance(ring);
6072 return 0;
6073
6074err_unpin:
6075 intel_unpin_fb_obj(obj);
6076err:
7416 return ret;
7417}
7418
7419static int intel_gen3_queue_flip(struct drm_device *dev,
7420 struct drm_crtc *crtc,
7421 struct drm_framebuffer *fb,
7422 struct drm_i915_gem_object *obj)
7423{
7424 struct drm_i915_private *dev_priv = dev->dev_private;
7425 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7426 unsigned long offset;
7427 u32 flip_mask;
6077 return ret;
6078}
6079
6080static int intel_gen3_queue_flip(struct drm_device *dev,
6081 struct drm_crtc *crtc,
6082 struct drm_framebuffer *fb,
6083 struct drm_i915_gem_object *obj)
6084{
6085 struct drm_i915_private *dev_priv = dev->dev_private;
6086 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6087 unsigned long offset;
6088 u32 flip_mask;
6089 struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
7428 int ret;
7429
6090 int ret;
6091
7430 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6092 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7431 if (ret)
6093 if (ret)
7432 goto out;
6094 goto err;
7433
7434 /* Offset into the new buffer for cases of shared fbs between CRTCs */
7435 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7436
6095
6096 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6097 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
6098
7437 ret = BEGIN_LP_RING(6);
6099 ret = intel_ring_begin(ring, 6);
7438 if (ret)
6100 if (ret)
7439 goto out;
6101 goto err_unpin;
7440
7441 if (intel_crtc->plane)
7442 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7443 else
7444 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6102
6103 if (intel_crtc->plane)
6104 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6105 else
6106 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7445 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7446 OUT_RING(MI_NOOP);
7447 OUT_RING(MI_DISPLAY_FLIP_I915 |
7448 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7449 OUT_RING(fb->pitches[0]);
7450 OUT_RING(obj->gtt_offset + offset);
7451 OUT_RING(MI_NOOP);
6107 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
6108 intel_ring_emit(ring, MI_NOOP);
6109 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
6110 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6111 intel_ring_emit(ring, fb->pitches[0]);
6112 intel_ring_emit(ring, obj->gtt_offset + offset);
6113 intel_ring_emit(ring, MI_NOOP);
7452
6114
7453 ADVANCE_LP_RING();
7454out:
6115 intel_ring_advance(ring);
6116 return 0;
6117
6118err_unpin:
6119 intel_unpin_fb_obj(obj);
6120err:
7455 return ret;
7456}
7457
7458static int intel_gen4_queue_flip(struct drm_device *dev,
7459 struct drm_crtc *crtc,
7460 struct drm_framebuffer *fb,
7461 struct drm_i915_gem_object *obj)
7462{
7463 struct drm_i915_private *dev_priv = dev->dev_private;
7464 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7465 uint32_t pf, pipesrc;
6121 return ret;
6122}
6123
6124static int intel_gen4_queue_flip(struct drm_device *dev,
6125 struct drm_crtc *crtc,
6126 struct drm_framebuffer *fb,
6127 struct drm_i915_gem_object *obj)
6128{
6129 struct drm_i915_private *dev_priv = dev->dev_private;
6130 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6131 uint32_t pf, pipesrc;
6132 struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
7466 int ret;
7467
6133 int ret;
6134
7468 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6135 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7469 if (ret)
6136 if (ret)
7470 goto out;
6137 goto err;
7471
6138
7472 ret = BEGIN_LP_RING(4);
6139 ret = intel_ring_begin(ring, 4);
7473 if (ret)
6140 if (ret)
7474 goto out;
6141 goto err_unpin;
7475
7476 /* i965+ uses the linear or tiled offsets from the
7477 * Display Registers (which do not change across a page-flip)
7478 * so we need only reprogram the base address.
7479 */
6142
6143 /* i965+ uses the linear or tiled offsets from the
6144 * Display Registers (which do not change across a page-flip)
6145 * so we need only reprogram the base address.
6146 */
7480 OUT_RING(MI_DISPLAY_FLIP |
7481 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7482 OUT_RING(fb->pitches[0]);
7483 OUT_RING(obj->gtt_offset | obj->tiling_mode);
6147 intel_ring_emit(ring, MI_DISPLAY_FLIP |
6148 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6149 intel_ring_emit(ring, fb->pitches[0]);
6150 intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
7484
7485 /* XXX Enabling the panel-fitter across page-flip is so far
7486 * untested on non-native modes, so ignore it for now.
7487 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
7488 */
7489 pf = 0;
7490 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6151
6152 /* XXX Enabling the panel-fitter across page-flip is so far
6153 * untested on non-native modes, so ignore it for now.
6154 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6155 */
6156 pf = 0;
6157 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7491 OUT_RING(pf | pipesrc);
7492 ADVANCE_LP_RING();
7493out:
6158 intel_ring_emit(ring, pf | pipesrc);
6159 intel_ring_advance(ring);
6160 return 0;
6161
6162err_unpin:
6163 intel_unpin_fb_obj(obj);
6164err:
7494 return ret;
7495}
7496
7497static int intel_gen6_queue_flip(struct drm_device *dev,
7498 struct drm_crtc *crtc,
7499 struct drm_framebuffer *fb,
7500 struct drm_i915_gem_object *obj)
7501{
7502 struct drm_i915_private *dev_priv = dev->dev_private;
7503 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6165 return ret;
6166}
6167
6168static int intel_gen6_queue_flip(struct drm_device *dev,
6169 struct drm_crtc *crtc,
6170 struct drm_framebuffer *fb,
6171 struct drm_i915_gem_object *obj)
6172{
6173 struct drm_i915_private *dev_priv = dev->dev_private;
6174 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6175 struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
7504 uint32_t pf, pipesrc;
7505 int ret;
7506
6176 uint32_t pf, pipesrc;
6177 int ret;
6178
7507 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6179 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7508 if (ret)
6180 if (ret)
7509 goto out;
6181 goto err;
7510
6182
7511 ret = BEGIN_LP_RING(4);
6183 ret = intel_ring_begin(ring, 4);
7512 if (ret)
6184 if (ret)
7513 goto out;
6185 goto err_unpin;
7514
6186
7515 OUT_RING(MI_DISPLAY_FLIP |
7516 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7517 OUT_RING(fb->pitches[0] | obj->tiling_mode);
7518 OUT_RING(obj->gtt_offset);
6187 intel_ring_emit(ring, MI_DISPLAY_FLIP |
6188 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6189 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
6190 intel_ring_emit(ring, obj->gtt_offset);
7519
7520 /* Contrary to the suggestions in the documentation,
7521 * "Enable Panel Fitter" does not seem to be required when page
7522 * flipping with a non-native mode, and worse causes a normal
7523 * modeset to fail.
7524 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
7525 */
7526 pf = 0;
7527 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6191
6192 /* Contrary to the suggestions in the documentation,
6193 * "Enable Panel Fitter" does not seem to be required when page
6194 * flipping with a non-native mode, and worse causes a normal
6195 * modeset to fail.
6196 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
6197 */
6198 pf = 0;
6199 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7528 OUT_RING(pf | pipesrc);
7529 ADVANCE_LP_RING();
7530out:
6200 intel_ring_emit(ring, pf | pipesrc);
6201 intel_ring_advance(ring);
6202 return 0;
6203
6204err_unpin:
6205 intel_unpin_fb_obj(obj);
6206err:
7531 return ret;
7532}
7533
7534/*
7535 * On gen7 we currently use the blit ring because (in early silicon at least)
7536 * the render ring doesn't give us interrpts for page flip completion, which
7537 * means clients will hang after the first flip is queued. Fortunately the
7538 * blit ring generates interrupts properly, so use it instead.

--- 5 unchanged lines hidden (view full) ---

7544{
7545 struct drm_i915_private *dev_priv = dev->dev_private;
7546 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7547 struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
7548 int ret;
7549
7550 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7551 if (ret)
6207 return ret;
6208}
6209
6210/*
6211 * On gen7 we currently use the blit ring because (in early silicon at least)
6212 * the render ring doesn't give us interrpts for page flip completion, which
6213 * means clients will hang after the first flip is queued. Fortunately the
6214 * blit ring generates interrupts properly, so use it instead.

--- 5 unchanged lines hidden (view full) ---

6220{
6221 struct drm_i915_private *dev_priv = dev->dev_private;
6222 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6223 struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
6224 int ret;
6225
6226 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6227 if (ret)
7552 goto out;
6228 goto err;
7553
7554 ret = intel_ring_begin(ring, 4);
7555 if (ret)
6229
6230 ret = intel_ring_begin(ring, 4);
6231 if (ret)
7556 goto out;
6232 goto err_unpin;
7557
7558 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
7559 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7560 intel_ring_emit(ring, (obj->gtt_offset));
7561 intel_ring_emit(ring, (MI_NOOP));
7562 intel_ring_advance(ring);
6233
6234 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
6235 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
6236 intel_ring_emit(ring, (obj->gtt_offset));
6237 intel_ring_emit(ring, (MI_NOOP));
6238 intel_ring_advance(ring);
7563out:
6239 return 0;
6240
6241err_unpin:
6242 intel_unpin_fb_obj(obj);
6243err:
7564 return ret;
7565}
7566
7567static int intel_default_queue_flip(struct drm_device *dev,
7568 struct drm_crtc *crtc,
7569 struct drm_framebuffer *fb,
7570 struct drm_i915_gem_object *obj)
7571{

--- 56 unchanged lines hidden (view full) ---

7628 * the flip occurs and the object is no longer visible.
7629 */
7630 atomic_set_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);
7631
7632 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
7633 if (ret)
7634 goto cleanup_pending;
7635 intel_disable_fbc(dev);
6244 return ret;
6245}
6246
6247static int intel_default_queue_flip(struct drm_device *dev,
6248 struct drm_crtc *crtc,
6249 struct drm_framebuffer *fb,
6250 struct drm_i915_gem_object *obj)
6251{

--- 56 unchanged lines hidden (view full) ---

6308 * the flip occurs and the object is no longer visible.
6309 */
6310 atomic_set_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);
6311
6312 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6313 if (ret)
6314 goto cleanup_pending;
6315 intel_disable_fbc(dev);
6316 intel_mark_busy(dev, obj);
7636 DRM_UNLOCK(dev);
7637
7638 CTR2(KTR_DRM, "i915_flip_request %d %p", intel_crtc->plane, obj);
7639
7640 return 0;
7641
7642cleanup_pending:
7643 atomic_clear_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);

--- 12 unchanged lines hidden (view full) ---

7656 return ret;
7657}
7658
7659static void intel_sanitize_modesetting(struct drm_device *dev,
7660 int pipe, int plane)
7661{
7662 struct drm_i915_private *dev_priv = dev->dev_private;
7663 u32 reg, val;
6317 DRM_UNLOCK(dev);
6318
6319 CTR2(KTR_DRM, "i915_flip_request %d %p", intel_crtc->plane, obj);
6320
6321 return 0;
6322
6323cleanup_pending:
6324 atomic_clear_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);

--- 12 unchanged lines hidden (view full) ---

6337 return ret;
6338}
6339
6340static void intel_sanitize_modesetting(struct drm_device *dev,
6341 int pipe, int plane)
6342{
6343 struct drm_i915_private *dev_priv = dev->dev_private;
6344 u32 reg, val;
6345 int i;
7664
7665 /* Clear any frame start delays used for debugging left by the BIOS */
6346
6347 /* Clear any frame start delays used for debugging left by the BIOS */
7666 for_each_pipe(pipe) {
7667 reg = PIPECONF(pipe);
6348 for_each_pipe(i) {
6349 reg = PIPECONF(i);
7668 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
7669 }
7670
7671 if (HAS_PCH_SPLIT(dev))
7672 return;
7673
7674 /* Who knows what state these registers were left in by the BIOS or
7675 * grub?

--- 53 unchanged lines hidden (view full) ---

7729 .cursor_set = intel_crtc_cursor_set,
7730 .cursor_move = intel_crtc_cursor_move,
7731 .gamma_set = intel_crtc_gamma_set,
7732 .set_config = drm_crtc_helper_set_config,
7733 .destroy = intel_crtc_destroy,
7734 .page_flip = intel_crtc_page_flip,
7735};
7736
6350 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
6351 }
6352
6353 if (HAS_PCH_SPLIT(dev))
6354 return;
6355
6356 /* Who knows what state these registers were left in by the BIOS or
6357 * grub?

--- 53 unchanged lines hidden (view full) ---

6411 .cursor_set = intel_crtc_cursor_set,
6412 .cursor_move = intel_crtc_cursor_move,
6413 .gamma_set = intel_crtc_gamma_set,
6414 .set_config = drm_crtc_helper_set_config,
6415 .destroy = intel_crtc_destroy,
6416 .page_flip = intel_crtc_page_flip,
6417};
6418
6419static void intel_pch_pll_init(struct drm_device *dev)
6420{
6421 drm_i915_private_t *dev_priv = dev->dev_private;
6422 int i;
6423
6424 if (dev_priv->num_pch_pll == 0) {
6425 DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
6426 return;
6427 }
6428
6429 for (i = 0; i < dev_priv->num_pch_pll; i++) {
6430 dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
6431 dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
6432 dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
6433 }
6434}
6435
7737static void intel_crtc_init(struct drm_device *dev, int pipe)
7738{
7739 drm_i915_private_t *dev_priv = dev->dev_private;
7740 struct intel_crtc *intel_crtc;
7741 int i;
7742
7743 intel_crtc = malloc(sizeof(struct intel_crtc) +
7744 (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),

--- 22 unchanged lines hidden (view full) ---

7767 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
7768 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
7769
7770 intel_crtc_reset(&intel_crtc->base);
7771 intel_crtc->active = true; /* force the pipe off on setup_init_config */
7772 intel_crtc->bpp = 24; /* default for pre-Ironlake */
7773
7774 if (HAS_PCH_SPLIT(dev)) {
6436static void intel_crtc_init(struct drm_device *dev, int pipe)
6437{
6438 drm_i915_private_t *dev_priv = dev->dev_private;
6439 struct intel_crtc *intel_crtc;
6440 int i;
6441
6442 intel_crtc = malloc(sizeof(struct intel_crtc) +
6443 (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),

--- 22 unchanged lines hidden (view full) ---

6466 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
6467 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
6468
6469 intel_crtc_reset(&intel_crtc->base);
6470 intel_crtc->active = true; /* force the pipe off on setup_init_config */
6471 intel_crtc->bpp = 24; /* default for pre-Ironlake */
6472
6473 if (HAS_PCH_SPLIT(dev)) {
7775 if (pipe == 2 && IS_IVYBRIDGE(dev))
7776 intel_crtc->no_pll = true;
7777 intel_helper_funcs.prepare = ironlake_crtc_prepare;
7778 intel_helper_funcs.commit = ironlake_crtc_commit;
7779 } else {
7780 intel_helper_funcs.prepare = i9xx_crtc_prepare;
7781 intel_helper_funcs.commit = i9xx_crtc_commit;
7782 }
7783
7784 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
7785
7786 intel_crtc->busy = false;
7787
7788 callout_init(&intel_crtc->idle_callout, CALLOUT_MPSAFE);
7789}
7790
7791int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
7792 struct drm_file *file)
7793{
6474 intel_helper_funcs.prepare = ironlake_crtc_prepare;
6475 intel_helper_funcs.commit = ironlake_crtc_commit;
6476 } else {
6477 intel_helper_funcs.prepare = i9xx_crtc_prepare;
6478 intel_helper_funcs.commit = i9xx_crtc_commit;
6479 }
6480
6481 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
6482
6483 intel_crtc->busy = false;
6484
6485 callout_init(&intel_crtc->idle_callout, CALLOUT_MPSAFE);
6486}
6487
6488int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
6489 struct drm_file *file)
6490{
7794 drm_i915_private_t *dev_priv = dev->dev_private;
7795 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7796 struct drm_mode_object *drmmode_obj;
7797 struct intel_crtc *crtc;
7798
6491 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
6492 struct drm_mode_object *drmmode_obj;
6493 struct intel_crtc *crtc;
6494
7799 if (!dev_priv) {
7800 DRM_ERROR("called with no initialization\n");
7801 return -EINVAL;
7802 }
6495 if (!drm_core_check_feature(dev, DRIVER_MODESET))
6496 return -ENODEV;
7803
7804 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
7805 DRM_MODE_OBJECT_CRTC);
7806
7807 if (!drmmode_obj) {
7808 DRM_ERROR("no such CRTC id\n");
7809 return -EINVAL;
7810 }

--- 56 unchanged lines hidden (view full) ---

7867 intel_dp_init(dev, DP_A);
7868
7869 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7870 intel_dp_init(dev, PCH_DP_D);
7871 }
7872
7873 intel_crt_init(dev);
7874
6497
6498 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
6499 DRM_MODE_OBJECT_CRTC);
6500
6501 if (!drmmode_obj) {
6502 DRM_ERROR("no such CRTC id\n");
6503 return -EINVAL;
6504 }

--- 56 unchanged lines hidden (view full) ---

6561 intel_dp_init(dev, DP_A);
6562
6563 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6564 intel_dp_init(dev, PCH_DP_D);
6565 }
6566
6567 intel_crt_init(dev);
6568
7875 if (HAS_PCH_SPLIT(dev)) {
6569 if (IS_HASWELL(dev)) {
7876 int found;
7877
6570 int found;
6571
6572 /* Haswell uses DDI functions to detect digital outputs */
6573 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
6574 /* DDI A only supports eDP */
6575 if (found)
6576 intel_ddi_init(dev, PORT_A);
6577
6578 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
6579 * register */
6580 found = I915_READ(SFUSE_STRAP);
6581
6582 if (found & SFUSE_STRAP_DDIB_DETECTED)
6583 intel_ddi_init(dev, PORT_B);
6584 if (found & SFUSE_STRAP_DDIC_DETECTED)
6585 intel_ddi_init(dev, PORT_C);
6586 if (found & SFUSE_STRAP_DDID_DETECTED)
6587 intel_ddi_init(dev, PORT_D);
6588 } else if (HAS_PCH_SPLIT(dev)) {
6589 int found;
6590
7878 DRM_DEBUG_KMS(
7879"HDMIB %d PCH_DP_B %d HDMIC %d HDMID %d PCH_DP_C %d PCH_DP_D %d LVDS %d\n",
7880 (I915_READ(HDMIB) & PORT_DETECTED) != 0,
7881 (I915_READ(PCH_DP_B) & DP_DETECTED) != 0,
7882 (I915_READ(HDMIC) & PORT_DETECTED) != 0,
7883 (I915_READ(HDMID) & PORT_DETECTED) != 0,
7884 (I915_READ(PCH_DP_C) & DP_DETECTED) != 0,
7885 (I915_READ(PCH_DP_D) & DP_DETECTED) != 0,
7886 (I915_READ(PCH_LVDS) & LVDS_DETECTED) != 0);
7887
7888 if (I915_READ(HDMIB) & PORT_DETECTED) {
7889 /* PCH SDVOB multiplex with HDMIB */
6591 DRM_DEBUG_KMS(
6592"HDMIB %d PCH_DP_B %d HDMIC %d HDMID %d PCH_DP_C %d PCH_DP_D %d LVDS %d\n",
6593 (I915_READ(HDMIB) & PORT_DETECTED) != 0,
6594 (I915_READ(PCH_DP_B) & DP_DETECTED) != 0,
6595 (I915_READ(HDMIC) & PORT_DETECTED) != 0,
6596 (I915_READ(HDMID) & PORT_DETECTED) != 0,
6597 (I915_READ(PCH_DP_C) & DP_DETECTED) != 0,
6598 (I915_READ(PCH_DP_D) & DP_DETECTED) != 0,
6599 (I915_READ(PCH_LVDS) & LVDS_DETECTED) != 0);
6600
6601 if (I915_READ(HDMIB) & PORT_DETECTED) {
6602 /* PCH SDVOB multiplex with HDMIB */
7890 found = intel_sdvo_init(dev, PCH_SDVOB);
6603 found = intel_sdvo_init(dev, PCH_SDVOB, true);
7891 if (!found)
7892 intel_hdmi_init(dev, HDMIB);
7893 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
7894 intel_dp_init(dev, PCH_DP_B);
7895 }
7896
7897 if (I915_READ(HDMIC) & PORT_DETECTED)
7898 intel_hdmi_init(dev, HDMIC);

--- 7 unchanged lines hidden (view full) ---

7906 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7907 intel_dp_init(dev, PCH_DP_D);
7908
7909 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
7910 bool found = false;
7911
7912 if (I915_READ(SDVOB) & SDVO_DETECTED) {
7913 DRM_DEBUG_KMS("probing SDVOB\n");
6604 if (!found)
6605 intel_hdmi_init(dev, HDMIB);
6606 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
6607 intel_dp_init(dev, PCH_DP_B);
6608 }
6609
6610 if (I915_READ(HDMIC) & PORT_DETECTED)
6611 intel_hdmi_init(dev, HDMIC);

--- 7 unchanged lines hidden (view full) ---

6619 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6620 intel_dp_init(dev, PCH_DP_D);
6621
6622 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
6623 bool found = false;
6624
6625 if (I915_READ(SDVOB) & SDVO_DETECTED) {
6626 DRM_DEBUG_KMS("probing SDVOB\n");
7914 found = intel_sdvo_init(dev, SDVOB);
6627 found = intel_sdvo_init(dev, SDVOB, true);
7915 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
7916 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
7917 intel_hdmi_init(dev, SDVOB);
7918 }
7919
7920 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
7921 DRM_DEBUG_KMS("probing DP_B\n");
7922 intel_dp_init(dev, DP_B);
7923 }
7924 }
7925
7926 /* Before G4X SDVOC doesn't have its own detect register */
7927
7928 if (I915_READ(SDVOB) & SDVO_DETECTED) {
7929 DRM_DEBUG_KMS("probing SDVOC\n");
6628 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
6629 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
6630 intel_hdmi_init(dev, SDVOB);
6631 }
6632
6633 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
6634 DRM_DEBUG_KMS("probing DP_B\n");
6635 intel_dp_init(dev, DP_B);
6636 }
6637 }
6638
6639 /* Before G4X SDVOC doesn't have its own detect register */
6640
6641 if (I915_READ(SDVOB) & SDVO_DETECTED) {
6642 DRM_DEBUG_KMS("probing SDVOC\n");
7930 found = intel_sdvo_init(dev, SDVOC);
6643 found = intel_sdvo_init(dev, SDVOC, false);
7931 }
7932
7933 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
7934
7935 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
7936 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
7937 intel_hdmi_init(dev, SDVOC);
7938 }

--- 117 unchanged lines hidden (view full) ---

8056 return (intel_framebuffer_create(dev, mode_cmd, obj, res));
8057}
8058
8059static const struct drm_mode_config_funcs intel_mode_funcs = {
8060 .fb_create = intel_user_framebuffer_create,
8061 .output_poll_changed = intel_fb_output_poll_changed,
8062};
8063
6644 }
6645
6646 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
6647
6648 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
6649 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
6650 intel_hdmi_init(dev, SDVOC);
6651 }

--- 117 unchanged lines hidden (view full) ---

6769 return (intel_framebuffer_create(dev, mode_cmd, obj, res));
6770}
6771
6772static const struct drm_mode_config_funcs intel_mode_funcs = {
6773 .fb_create = intel_user_framebuffer_create,
6774 .output_poll_changed = intel_fb_output_poll_changed,
6775};
6776
8064static struct drm_i915_gem_object *
8065intel_alloc_context_page(struct drm_device *dev)
8066{
8067 struct drm_i915_gem_object *ctx;
8068 int ret;
8069
8070 DRM_LOCK_ASSERT(dev);
8071
8072 ctx = i915_gem_alloc_object(dev, 4096);
8073 if (!ctx) {
8074 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
8075 return NULL;
8076 }
8077
8078 ret = i915_gem_object_pin(ctx, 4096, true);
8079 if (ret) {
8080 DRM_ERROR("failed to pin power context: %d\n", ret);
8081 goto err_unref;
8082 }
8083
8084 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
8085 if (ret) {
8086 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
8087 goto err_unpin;
8088 }
8089
8090 return ctx;
8091
8092err_unpin:
8093 i915_gem_object_unpin(ctx);
8094err_unref:
8095 drm_gem_object_unreference(&ctx->base);
8096 DRM_UNLOCK(dev);
8097 return NULL;
8098}
8099
8100bool ironlake_set_drps(struct drm_device *dev, u8 val)
8101{
8102 struct drm_i915_private *dev_priv = dev->dev_private;
8103 u16 rgvswctl;
8104
8105 rgvswctl = I915_READ16(MEMSWCTL);
8106 if (rgvswctl & MEMCTL_CMD_STS) {
8107 DRM_DEBUG("gpu busy, RCS change rejected\n");
8108 return false; /* still busy with another command */
8109 }
8110
8111 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
8112 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
8113 I915_WRITE16(MEMSWCTL, rgvswctl);
8114 POSTING_READ16(MEMSWCTL);
8115
8116 rgvswctl |= MEMCTL_CMD_STS;
8117 I915_WRITE16(MEMSWCTL, rgvswctl);
8118
8119 return true;
8120}
8121
8122void ironlake_enable_drps(struct drm_device *dev)
8123{
8124 struct drm_i915_private *dev_priv = dev->dev_private;
8125 u32 rgvmodectl = I915_READ(MEMMODECTL);
8126 u8 fmax, fmin, fstart, vstart;
8127
8128 /* Enable temp reporting */
8129 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
8130 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
8131
8132 /* 100ms RC evaluation intervals */
8133 I915_WRITE(RCUPEI, 100000);
8134 I915_WRITE(RCDNEI, 100000);
8135
8136 /* Set max/min thresholds to 90ms and 80ms respectively */
8137 I915_WRITE(RCBMAXAVG, 90000);
8138 I915_WRITE(RCBMINAVG, 80000);
8139
8140 I915_WRITE(MEMIHYST, 1);
8141
8142 /* Set up min, max, and cur for interrupt handling */
8143 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
8144 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
8145 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
8146 MEMMODE_FSTART_SHIFT;
8147
8148 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
8149 PXVFREQ_PX_SHIFT;
8150
8151 dev_priv->fmax = fmax; /* IPS callback will increase this */
8152 dev_priv->fstart = fstart;
8153
8154 dev_priv->max_delay = fstart;
8155 dev_priv->min_delay = fmin;
8156 dev_priv->cur_delay = fstart;
8157
8158 DRM_DEBUG("fmax: %d, fmin: %d, fstart: %d\n",
8159 fmax, fmin, fstart);
8160
8161 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
8162
8163 /*
8164 * Interrupts will be enabled in ironlake_irq_postinstall
8165 */
8166
8167 I915_WRITE(VIDSTART, vstart);
8168 POSTING_READ(VIDSTART);
8169
8170 rgvmodectl |= MEMMODE_SWMODE_EN;
8171 I915_WRITE(MEMMODECTL, rgvmodectl);
8172
8173 if (_intel_wait_for(dev,
8174 (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10,
8175 1, "915per"))
8176 DRM_ERROR("stuck trying to change perf mode\n");
8177 pause("915dsp", 1);
8178
8179 ironlake_set_drps(dev, fstart);
8180
8181 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
8182 I915_READ(0x112e0);
8183 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
8184 dev_priv->last_count2 = I915_READ(0x112f4);
8185 nanotime(&dev_priv->last_time2);
8186}
8187
8188void ironlake_disable_drps(struct drm_device *dev)
8189{
8190 struct drm_i915_private *dev_priv = dev->dev_private;
8191 u16 rgvswctl = I915_READ16(MEMSWCTL);
8192
8193 /* Ack interrupts, disable EFC interrupt */
8194 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
8195 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
8196 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
8197 I915_WRITE(DEIIR, DE_PCU_EVENT);
8198 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
8199
8200 /* Go back to the starting frequency */
8201 ironlake_set_drps(dev, dev_priv->fstart);
8202 pause("915dsp", 1);
8203 rgvswctl |= MEMCTL_CMD_STS;
8204 I915_WRITE(MEMSWCTL, rgvswctl);
8205 pause("915dsp", 1);
8206
8207}
8208
8209void gen6_set_rps(struct drm_device *dev, u8 val)
8210{
8211 struct drm_i915_private *dev_priv = dev->dev_private;
8212 u32 swreq;
8213
8214 swreq = (val & 0x3ff) << 25;
8215 I915_WRITE(GEN6_RPNSWREQ, swreq);
8216}
8217
8218void gen6_disable_rps(struct drm_device *dev)
8219{
8220 struct drm_i915_private *dev_priv = dev->dev_private;
8221
8222 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
8223 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
8224 I915_WRITE(GEN6_PMIER, 0);
8225 /* Complete PM interrupt masking here doesn't race with the rps work
8226 * item again unmasking PM interrupts because that is using a different
8227 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
8228 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
8229
8230 mtx_lock(&dev_priv->rps_lock);
8231 dev_priv->pm_iir = 0;
8232 mtx_unlock(&dev_priv->rps_lock);
8233
8234 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
8235}
8236
8237static unsigned long intel_pxfreq(u32 vidfreq)
8238{
8239 unsigned long freq;
8240 int div = (vidfreq & 0x3f0000) >> 16;
8241 int post = (vidfreq & 0x3000) >> 12;
8242 int pre = (vidfreq & 0x7);
8243
8244 if (!pre)
8245 return 0;
8246
8247 freq = ((div * 133333) / ((1<<post) * pre));
8248
8249 return freq;
8250}
8251
8252void intel_init_emon(struct drm_device *dev)
8253{
8254 struct drm_i915_private *dev_priv = dev->dev_private;
8255 u32 lcfuse;
8256 u8 pxw[16];
8257 int i;
8258
8259 /* Disable to program */
8260 I915_WRITE(ECR, 0);
8261 POSTING_READ(ECR);
8262
8263 /* Program energy weights for various events */
8264 I915_WRITE(SDEW, 0x15040d00);
8265 I915_WRITE(CSIEW0, 0x007f0000);
8266 I915_WRITE(CSIEW1, 0x1e220004);
8267 I915_WRITE(CSIEW2, 0x04000004);
8268
8269 for (i = 0; i < 5; i++)
8270 I915_WRITE(PEW + (i * 4), 0);
8271 for (i = 0; i < 3; i++)
8272 I915_WRITE(DEW + (i * 4), 0);
8273
8274 /* Program P-state weights to account for frequency power adjustment */
8275 for (i = 0; i < 16; i++) {
8276 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
8277 unsigned long freq = intel_pxfreq(pxvidfreq);
8278 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
8279 PXVFREQ_PX_SHIFT;
8280 unsigned long val;
8281
8282 val = vid * vid;
8283 val *= (freq / 1000);
8284 val *= 255;
8285 val /= (127*127*900);
8286 if (val > 0xff)
8287 DRM_ERROR("bad pxval: %ld\n", val);
8288 pxw[i] = val;
8289 }
8290 /* Render standby states get 0 weight */
8291 pxw[14] = 0;
8292 pxw[15] = 0;
8293
8294 for (i = 0; i < 4; i++) {
8295 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
8296 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
8297 I915_WRITE(PXW + (i * 4), val);
8298 }
8299
8300 /* Adjust magic regs to magic values (more experimental results) */
8301 I915_WRITE(OGW0, 0);
8302 I915_WRITE(OGW1, 0);
8303 I915_WRITE(EG0, 0x00007f00);
8304 I915_WRITE(EG1, 0x0000000e);
8305 I915_WRITE(EG2, 0x000e0000);
8306 I915_WRITE(EG3, 0x68000300);
8307 I915_WRITE(EG4, 0x42000000);
8308 I915_WRITE(EG5, 0x00140031);
8309 I915_WRITE(EG6, 0);
8310 I915_WRITE(EG7, 0);
8311
8312 for (i = 0; i < 8; i++)
8313 I915_WRITE(PXWL + (i * 4), 0);
8314
8315 /* Enable PMON + select events */
8316 I915_WRITE(ECR, 0x80000019);
8317
8318 lcfuse = I915_READ(LCFUSE02);
8319
8320 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
8321}
8322
8323static int intel_enable_rc6(struct drm_device *dev)
8324{
8325 /*
8326 * Respect the kernel parameter if it is set
8327 */
8328 if (i915_enable_rc6 >= 0)
8329 return i915_enable_rc6;
8330
8331 /*
8332 * Disable RC6 on Ironlake
8333 */
8334 if (INTEL_INFO(dev)->gen == 5)
8335 return 0;
8336
8337 /*
8338 * Enable rc6 on Sandybridge if DMA remapping is disabled
8339 */
8340 if (INTEL_INFO(dev)->gen == 6) {
8341 DRM_DEBUG_DRIVER(
8342 "Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
8343 intel_iommu_enabled ? "true" : "false",
8344 !intel_iommu_enabled ? "en" : "dis");
8345 return (intel_iommu_enabled ? 0 : INTEL_RC6_ENABLE);
8346 }
8347 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
8348 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
8349}
8350
8351void gen6_enable_rps(struct drm_i915_private *dev_priv)
8352{
8353 struct drm_device *dev = dev_priv->dev;
8354 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
8355 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
8356 u32 pcu_mbox, rc6_mask = 0;
8357 u32 gtfifodbg;
8358 int cur_freq, min_freq, max_freq;
8359 int rc6_mode;
8360 int i;
8361
8362 /* Here begins a magic sequence of register writes to enable
8363 * auto-downclocking.
8364 *
8365 * Perhaps there might be some value in exposing these to
8366 * userspace...
8367 */
8368 I915_WRITE(GEN6_RC_STATE, 0);
8369 DRM_LOCK(dev);
8370
8371 /* Clear the DBG now so we don't confuse earlier errors */
8372 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
8373 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
8374 I915_WRITE(GTFIFODBG, gtfifodbg);
8375 }
8376
8377 gen6_gt_force_wake_get(dev_priv);
8378
8379 /* disable the counters and set deterministic thresholds */
8380 I915_WRITE(GEN6_RC_CONTROL, 0);
8381
8382 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
8383 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
8384 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
8385 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
8386 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
8387
8388 for (i = 0; i < I915_NUM_RINGS; i++)
8389 I915_WRITE(RING_MAX_IDLE(dev_priv->rings[i].mmio_base), 10);
8390
8391 I915_WRITE(GEN6_RC_SLEEP, 0);
8392 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
8393 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
8394 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
8395 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
8396
8397 rc6_mode = intel_enable_rc6(dev_priv->dev);
8398 if (rc6_mode & INTEL_RC6_ENABLE)
8399 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
8400
8401 if (rc6_mode & INTEL_RC6p_ENABLE)
8402 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
8403
8404 if (rc6_mode & INTEL_RC6pp_ENABLE)
8405 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
8406
8407 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
8408 (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
8409 (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
8410 (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
8411
8412 I915_WRITE(GEN6_RC_CONTROL,
8413 rc6_mask |
8414 GEN6_RC_CTL_EI_MODE(1) |
8415 GEN6_RC_CTL_HW_ENABLE);
8416
8417 I915_WRITE(GEN6_RPNSWREQ,
8418 GEN6_FREQUENCY(10) |
8419 GEN6_OFFSET(0) |
8420 GEN6_AGGRESSIVE_TURBO);
8421 I915_WRITE(GEN6_RC_VIDEO_FREQ,
8422 GEN6_FREQUENCY(12));
8423
8424 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
8425 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
8426 18 << 24 |
8427 6 << 16);
8428 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
8429 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
8430 I915_WRITE(GEN6_RP_UP_EI, 100000);
8431 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
8432 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
8433 I915_WRITE(GEN6_RP_CONTROL,
8434 GEN6_RP_MEDIA_TURBO |
8435 GEN6_RP_MEDIA_HW_MODE |
8436 GEN6_RP_MEDIA_IS_GFX |
8437 GEN6_RP_ENABLE |
8438 GEN6_RP_UP_BUSY_AVG |
8439 GEN6_RP_DOWN_IDLE_CONT);
8440
8441 if (_intel_wait_for(dev,
8442 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8443 1, "915pr1"))
8444 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8445
8446 I915_WRITE(GEN6_PCODE_DATA, 0);
8447 I915_WRITE(GEN6_PCODE_MAILBOX,
8448 GEN6_PCODE_READY |
8449 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8450 if (_intel_wait_for(dev,
8451 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8452 1, "915pr2"))
8453 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8454
8455 min_freq = (rp_state_cap & 0xff0000) >> 16;
8456 max_freq = rp_state_cap & 0xff;
8457 cur_freq = (gt_perf_status & 0xff00) >> 8;
8458
8459 /* Check for overclock support */
8460 if (_intel_wait_for(dev,
8461 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8462 1, "915pr3"))
8463 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8464 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
8465 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
8466 if (_intel_wait_for(dev,
8467 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8468 1, "915pr4"))
8469 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8470 if (pcu_mbox & (1<<31)) { /* OC supported */
8471 max_freq = pcu_mbox & 0xff;
8472 DRM_DEBUG("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
8473 }
8474
8475 /* In units of 100MHz */
8476 dev_priv->max_delay = max_freq;
8477 dev_priv->min_delay = min_freq;
8478 dev_priv->cur_delay = cur_freq;
8479
8480 /* requires MSI enabled */
8481 I915_WRITE(GEN6_PMIER,
8482 GEN6_PM_MBOX_EVENT |
8483 GEN6_PM_THERMAL_EVENT |
8484 GEN6_PM_RP_DOWN_TIMEOUT |
8485 GEN6_PM_RP_UP_THRESHOLD |
8486 GEN6_PM_RP_DOWN_THRESHOLD |
8487 GEN6_PM_RP_UP_EI_EXPIRED |
8488 GEN6_PM_RP_DOWN_EI_EXPIRED);
8489 mtx_lock(&dev_priv->rps_lock);
8490 if (dev_priv->pm_iir != 0)
8491 printf("pm_iir %x\n", dev_priv->pm_iir);
8492 I915_WRITE(GEN6_PMIMR, 0);
8493 mtx_unlock(&dev_priv->rps_lock);
8494 /* enable all PM interrupts */
8495 I915_WRITE(GEN6_PMINTRMSK, 0);
8496
8497 gen6_gt_force_wake_put(dev_priv);
8498 DRM_UNLOCK(dev);
8499}
8500
8501void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
8502{
8503 struct drm_device *dev;
8504 int min_freq = 15;
8505 int gpu_freq, ia_freq, max_ia_freq;
8506 int scaling_factor = 180;
8507 uint64_t tsc_freq;
8508
8509 dev = dev_priv->dev;
8510#if 0
8511 max_ia_freq = cpufreq_quick_get_max(0);
8512 /*
8513 * Default to measured freq if none found, PCU will ensure we don't go
8514 * over
8515 */
8516 if (!max_ia_freq)
8517 max_ia_freq = tsc_freq;
8518
8519 /* Convert from Hz to MHz */
8520 max_ia_freq /= 1000;
8521#else
8522 tsc_freq = atomic_load_acq_64(&tsc_freq);
8523 max_ia_freq = tsc_freq / 1000 / 1000;
8524#endif
8525
8526 DRM_LOCK(dev);
8527
8528 /*
8529 * For each potential GPU frequency, load a ring frequency we'd like
8530 * to use for memory access. We do this by specifying the IA frequency
8531 * the PCU should use as a reference to determine the ring frequency.
8532 */
8533 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
8534 gpu_freq--) {
8535 int diff = dev_priv->max_delay - gpu_freq;
8536 int d;
8537
8538 /*
8539 * For GPU frequencies less than 750MHz, just use the lowest
8540 * ring freq.
8541 */
8542 if (gpu_freq < min_freq)
8543 ia_freq = 800;
8544 else
8545 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
8546 d = 100;
8547 ia_freq = (ia_freq + d / 2) / d;
8548
8549 I915_WRITE(GEN6_PCODE_DATA,
8550 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
8551 gpu_freq);
8552 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
8553 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8554 if (_intel_wait_for(dev,
8555 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8556 10, 1, "915frq")) {
8557 DRM_ERROR("pcode write of freq table timed out\n");
8558 continue;
8559 }
8560 }
8561
8562 DRM_UNLOCK(dev);
8563}
8564
8565static void ironlake_init_clock_gating(struct drm_device *dev)
8566{
8567 struct drm_i915_private *dev_priv = dev->dev_private;
8568 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8569
8570 /* Required for FBC */
8571 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
8572 DPFCRUNIT_CLOCK_GATE_DISABLE |
8573 DPFDUNIT_CLOCK_GATE_DISABLE;
8574 /* Required for CxSR */
8575 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
8576
8577 I915_WRITE(PCH_3DCGDIS0,
8578 MARIUNIT_CLOCK_GATE_DISABLE |
8579 SVSMUNIT_CLOCK_GATE_DISABLE);
8580 I915_WRITE(PCH_3DCGDIS1,
8581 VFMUNIT_CLOCK_GATE_DISABLE);
8582
8583 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8584
8585 /*
8586 * According to the spec the following bits should be set in
8587 * order to enable memory self-refresh
8588 * The bit 22/21 of 0x42004
8589 * The bit 5 of 0x42020
8590 * The bit 15 of 0x45000
8591 */
8592 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8593 (I915_READ(ILK_DISPLAY_CHICKEN2) |
8594 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
8595 I915_WRITE(ILK_DSPCLK_GATE,
8596 (I915_READ(ILK_DSPCLK_GATE) |
8597 ILK_DPARB_CLK_GATE));
8598 I915_WRITE(DISP_ARB_CTL,
8599 (I915_READ(DISP_ARB_CTL) |
8600 DISP_FBC_WM_DIS));
8601 I915_WRITE(WM3_LP_ILK, 0);
8602 I915_WRITE(WM2_LP_ILK, 0);
8603 I915_WRITE(WM1_LP_ILK, 0);
8604
8605 /*
8606 * Based on the document from hardware guys the following bits
8607 * should be set unconditionally in order to enable FBC.
8608 * The bit 22 of 0x42000
8609 * The bit 22 of 0x42004
8610 * The bit 7,8,9 of 0x42020.
8611 */
8612 if (IS_IRONLAKE_M(dev)) {
8613 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8614 I915_READ(ILK_DISPLAY_CHICKEN1) |
8615 ILK_FBCQ_DIS);
8616 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8617 I915_READ(ILK_DISPLAY_CHICKEN2) |
8618 ILK_DPARB_GATE);
8619 I915_WRITE(ILK_DSPCLK_GATE,
8620 I915_READ(ILK_DSPCLK_GATE) |
8621 ILK_DPFC_DIS1 |
8622 ILK_DPFC_DIS2 |
8623 ILK_CLK_FBC);
8624 }
8625
8626 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8627 I915_READ(ILK_DISPLAY_CHICKEN2) |
8628 ILK_ELPIN_409_SELECT);
8629 I915_WRITE(_3D_CHICKEN2,
8630 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
8631 _3D_CHICKEN2_WM_READ_PIPELINED);
8632}
8633
8634static void gen6_init_clock_gating(struct drm_device *dev)
8635{
8636 struct drm_i915_private *dev_priv = dev->dev_private;
8637 int pipe;
8638 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8639
8640 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8641
8642 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8643 I915_READ(ILK_DISPLAY_CHICKEN2) |
8644 ILK_ELPIN_409_SELECT);
8645
8646 I915_WRITE(WM3_LP_ILK, 0);
8647 I915_WRITE(WM2_LP_ILK, 0);
8648 I915_WRITE(WM1_LP_ILK, 0);
8649
8650 I915_WRITE(GEN6_UCGCTL1,
8651 I915_READ(GEN6_UCGCTL1) |
8652 GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
8653
8654 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8655 * gating disable must be set. Failure to set it results in
8656 * flickering pixels due to Z write ordering failures after
8657 * some amount of runtime in the Mesa "fire" demo, and Unigine
8658 * Sanctuary and Tropics, and apparently anything else with
8659 * alpha test or pixel discard.
8660 *
8661 * According to the spec, bit 11 (RCCUNIT) must also be set,
8662 * but we didn't debug actual testcases to find it out.
8663 */
8664 I915_WRITE(GEN6_UCGCTL2,
8665 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8666 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8667
8668 /*
8669 * According to the spec the following bits should be
8670 * set in order to enable memory self-refresh and fbc:
8671 * The bit21 and bit22 of 0x42000
8672 * The bit21 and bit22 of 0x42004
8673 * The bit5 and bit7 of 0x42020
8674 * The bit14 of 0x70180
8675 * The bit14 of 0x71180
8676 */
8677 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8678 I915_READ(ILK_DISPLAY_CHICKEN1) |
8679 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
8680 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8681 I915_READ(ILK_DISPLAY_CHICKEN2) |
8682 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8683 I915_WRITE(ILK_DSPCLK_GATE,
8684 I915_READ(ILK_DSPCLK_GATE) |
8685 ILK_DPARB_CLK_GATE |
8686 ILK_DPFD_CLK_GATE);
8687
8688 for_each_pipe(pipe) {
8689 I915_WRITE(DSPCNTR(pipe),
8690 I915_READ(DSPCNTR(pipe)) |
8691 DISPPLANE_TRICKLE_FEED_DISABLE);
8692 intel_flush_display_plane(dev_priv, pipe);
8693 }
8694}
8695
8696static void ivybridge_init_clock_gating(struct drm_device *dev)
8697{
8698 struct drm_i915_private *dev_priv = dev->dev_private;
8699 int pipe;
8700 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8701
8702 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8703
8704 I915_WRITE(WM3_LP_ILK, 0);
8705 I915_WRITE(WM2_LP_ILK, 0);
8706 I915_WRITE(WM1_LP_ILK, 0);
8707
8708 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8709 * This implements the WaDisableRCZUnitClockGating workaround.
8710 */
8711 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8712
8713 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
8714
8715 I915_WRITE(IVB_CHICKEN3,
8716 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8717 CHICKEN3_DGMG_DONE_FIX_DISABLE);
8718
8719 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
8720 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
8721 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
8722
8723 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
8724 I915_WRITE(GEN7_L3CNTLREG1,
8725 GEN7_WA_FOR_GEN7_L3_CONTROL);
8726 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8727 GEN7_WA_L3_CHICKEN_MODE);
8728
8729 /* This is required by WaCatErrorRejectionIssue */
8730 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8731 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8732 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8733
8734 for_each_pipe(pipe) {
8735 I915_WRITE(DSPCNTR(pipe),
8736 I915_READ(DSPCNTR(pipe)) |
8737 DISPPLANE_TRICKLE_FEED_DISABLE);
8738 intel_flush_display_plane(dev_priv, pipe);
8739 }
8740}
8741
8742static void g4x_init_clock_gating(struct drm_device *dev)
8743{
8744 struct drm_i915_private *dev_priv = dev->dev_private;
8745 uint32_t dspclk_gate;
8746
8747 I915_WRITE(RENCLK_GATE_D1, 0);
8748 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
8749 GS_UNIT_CLOCK_GATE_DISABLE |
8750 CL_UNIT_CLOCK_GATE_DISABLE);
8751 I915_WRITE(RAMCLK_GATE_D, 0);
8752 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
8753 OVRUNIT_CLOCK_GATE_DISABLE |
8754 OVCUNIT_CLOCK_GATE_DISABLE;
8755 if (IS_GM45(dev))
8756 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
8757 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
8758}
8759
8760static void crestline_init_clock_gating(struct drm_device *dev)
8761{
8762 struct drm_i915_private *dev_priv = dev->dev_private;
8763
8764 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
8765 I915_WRITE(RENCLK_GATE_D2, 0);
8766 I915_WRITE(DSPCLK_GATE_D, 0);
8767 I915_WRITE(RAMCLK_GATE_D, 0);
8768 I915_WRITE16(DEUC, 0);
8769}
8770
8771static void broadwater_init_clock_gating(struct drm_device *dev)
8772{
8773 struct drm_i915_private *dev_priv = dev->dev_private;
8774
8775 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
8776 I965_RCC_CLOCK_GATE_DISABLE |
8777 I965_RCPB_CLOCK_GATE_DISABLE |
8778 I965_ISC_CLOCK_GATE_DISABLE |
8779 I965_FBC_CLOCK_GATE_DISABLE);
8780 I915_WRITE(RENCLK_GATE_D2, 0);
8781}
8782
8783static void gen3_init_clock_gating(struct drm_device *dev)
8784{
8785 struct drm_i915_private *dev_priv = dev->dev_private;
8786 u32 dstate = I915_READ(D_STATE);
8787
8788 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
8789 DSTATE_DOT_CLOCK_GATING;
8790 I915_WRITE(D_STATE, dstate);
8791}
8792
8793static void i85x_init_clock_gating(struct drm_device *dev)
8794{
8795 struct drm_i915_private *dev_priv = dev->dev_private;
8796
8797 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
8798}
8799
8800static void i830_init_clock_gating(struct drm_device *dev)
8801{
8802 struct drm_i915_private *dev_priv = dev->dev_private;
8803
8804 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
8805}
8806
8807static void ibx_init_clock_gating(struct drm_device *dev)
8808{
8809 struct drm_i915_private *dev_priv = dev->dev_private;
8810
8811 /*
8812 * On Ibex Peak and Cougar Point, we need to disable clock
8813 * gating for the panel power sequencer or it will fail to
8814 * start up when no ports are active.
8815 */
8816 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8817}
8818
8819static void cpt_init_clock_gating(struct drm_device *dev)
8820{
8821 struct drm_i915_private *dev_priv = dev->dev_private;
8822 int pipe;
8823
8824 /*
8825 * On Ibex Peak and Cougar Point, we need to disable clock
8826 * gating for the panel power sequencer or it will fail to
8827 * start up when no ports are active.
8828 */
8829 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8830 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
8831 DPLS_EDP_PPS_FIX_DIS);
8832 /* Without this, mode sets may fail silently on FDI */
8833 for_each_pipe(pipe)
8834 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
8835}
8836
8837static void ironlake_teardown_rc6(struct drm_device *dev)
8838{
8839 struct drm_i915_private *dev_priv = dev->dev_private;
8840
8841 if (dev_priv->renderctx) {
8842 i915_gem_object_unpin(dev_priv->renderctx);
8843 drm_gem_object_unreference(&dev_priv->renderctx->base);
8844 dev_priv->renderctx = NULL;
8845 }
8846
8847 if (dev_priv->pwrctx) {
8848 i915_gem_object_unpin(dev_priv->pwrctx);
8849 drm_gem_object_unreference(&dev_priv->pwrctx->base);
8850 dev_priv->pwrctx = NULL;
8851 }
8852}
8853
8854static void ironlake_disable_rc6(struct drm_device *dev)
8855{
8856 struct drm_i915_private *dev_priv = dev->dev_private;
8857
8858 if (I915_READ(PWRCTXA)) {
8859 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
8860 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
8861 (void)_intel_wait_for(dev,
8862 ((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
8863 50, 1, "915pro");
8864
8865 I915_WRITE(PWRCTXA, 0);
8866 POSTING_READ(PWRCTXA);
8867
8868 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8869 POSTING_READ(RSTDBYCTL);
8870 }
8871
8872 ironlake_teardown_rc6(dev);
8873}
8874
8875static int ironlake_setup_rc6(struct drm_device *dev)
8876{
8877 struct drm_i915_private *dev_priv = dev->dev_private;
8878
8879 if (dev_priv->renderctx == NULL)
8880 dev_priv->renderctx = intel_alloc_context_page(dev);
8881 if (!dev_priv->renderctx)
8882 return -ENOMEM;
8883
8884 if (dev_priv->pwrctx == NULL)
8885 dev_priv->pwrctx = intel_alloc_context_page(dev);
8886 if (!dev_priv->pwrctx) {
8887 ironlake_teardown_rc6(dev);
8888 return -ENOMEM;
8889 }
8890
8891 return 0;
8892}
8893
8894void ironlake_enable_rc6(struct drm_device *dev)
8895{
8896 struct drm_i915_private *dev_priv = dev->dev_private;
8897 int ret;
8898
8899 /* rc6 disabled by default due to repeated reports of hanging during
8900 * boot and resume.
8901 */
8902 if (!intel_enable_rc6(dev))
8903 return;
8904
8905 DRM_LOCK(dev);
8906 ret = ironlake_setup_rc6(dev);
8907 if (ret) {
8908 DRM_UNLOCK(dev);
8909 return;
8910 }
8911
8912 /*
8913 * GPU can automatically power down the render unit if given a page
8914 * to save state.
8915 */
8916 ret = BEGIN_LP_RING(6);
8917 if (ret) {
8918 ironlake_teardown_rc6(dev);
8919 DRM_UNLOCK(dev);
8920 return;
8921 }
8922
8923 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
8924 OUT_RING(MI_SET_CONTEXT);
8925 OUT_RING(dev_priv->renderctx->gtt_offset |
8926 MI_MM_SPACE_GTT |
8927 MI_SAVE_EXT_STATE_EN |
8928 MI_RESTORE_EXT_STATE_EN |
8929 MI_RESTORE_INHIBIT);
8930 OUT_RING(MI_SUSPEND_FLUSH);
8931 OUT_RING(MI_NOOP);
8932 OUT_RING(MI_FLUSH);
8933 ADVANCE_LP_RING();
8934
8935 /*
8936 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
8937 * does an implicit flush, combined with MI_FLUSH above, it should be
8938 * safe to assume that renderctx is valid
8939 */
8940 ret = intel_wait_ring_idle(LP_RING(dev_priv));
8941 if (ret) {
8942 DRM_ERROR("failed to enable ironlake power power savings\n");
8943 ironlake_teardown_rc6(dev);
8944 DRM_UNLOCK(dev);
8945 return;
8946 }
8947
8948 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
8949 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8950 DRM_UNLOCK(dev);
8951}
8952
8953void intel_init_clock_gating(struct drm_device *dev)
8954{
8955 struct drm_i915_private *dev_priv = dev->dev_private;
8956
8957 dev_priv->display.init_clock_gating(dev);
8958
8959 if (dev_priv->display.init_pch_clock_gating)
8960 dev_priv->display.init_pch_clock_gating(dev);
8961}
8962
8963/* Set up chip specific display functions */
8964static void intel_init_display(struct drm_device *dev)
8965{
8966 struct drm_i915_private *dev_priv = dev->dev_private;
8967
8968 /* We always want a DPMS function */
8969 if (HAS_PCH_SPLIT(dev)) {
8970 dev_priv->display.dpms = ironlake_crtc_dpms;
8971 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
6777/* Set up chip specific display functions */
6778static void intel_init_display(struct drm_device *dev)
6779{
6780 struct drm_i915_private *dev_priv = dev->dev_private;
6781
6782 /* We always want a DPMS function */
6783 if (HAS_PCH_SPLIT(dev)) {
6784 dev_priv->display.dpms = ironlake_crtc_dpms;
6785 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
6786 dev_priv->display.off = ironlake_crtc_off;
8972 dev_priv->display.update_plane = ironlake_update_plane;
8973 } else {
8974 dev_priv->display.dpms = i9xx_crtc_dpms;
8975 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
6787 dev_priv->display.update_plane = ironlake_update_plane;
6788 } else {
6789 dev_priv->display.dpms = i9xx_crtc_dpms;
6790 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
6791 dev_priv->display.off = i9xx_crtc_off;
8976 dev_priv->display.update_plane = i9xx_update_plane;
8977 }
8978
6792 dev_priv->display.update_plane = i9xx_update_plane;
6793 }
6794
8979 if (I915_HAS_FBC(dev)) {
8980 if (HAS_PCH_SPLIT(dev)) {
8981 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
8982 dev_priv->display.enable_fbc = ironlake_enable_fbc;
8983 dev_priv->display.disable_fbc = ironlake_disable_fbc;
8984 } else if (IS_GM45(dev)) {
8985 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
8986 dev_priv->display.enable_fbc = g4x_enable_fbc;
8987 dev_priv->display.disable_fbc = g4x_disable_fbc;
8988 } else if (IS_CRESTLINE(dev)) {
8989 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
8990 dev_priv->display.enable_fbc = i8xx_enable_fbc;
8991 dev_priv->display.disable_fbc = i8xx_disable_fbc;
8992 }
8993 /* 855GM needs testing */
8994 }
8995
8996 /* Returns the core display clock speed */
6795 /* Returns the core display clock speed */
8997 if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
6796 if (IS_VALLEYVIEW(dev))
8998 dev_priv->display.get_display_clock_speed =
6797 dev_priv->display.get_display_clock_speed =
6798 valleyview_get_display_clock_speed;
6799 else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
6800 dev_priv->display.get_display_clock_speed =
8999 i945_get_display_clock_speed;
9000 else if (IS_I915G(dev))
9001 dev_priv->display.get_display_clock_speed =
9002 i915_get_display_clock_speed;
9003 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
9004 dev_priv->display.get_display_clock_speed =
9005 i9xx_misc_get_display_clock_speed;
9006 else if (IS_I915GM(dev))

--- 4 unchanged lines hidden (view full) ---

9011 i865_get_display_clock_speed;
9012 else if (IS_I85X(dev))
9013 dev_priv->display.get_display_clock_speed =
9014 i855_get_display_clock_speed;
9015 else /* 852, 830 */
9016 dev_priv->display.get_display_clock_speed =
9017 i830_get_display_clock_speed;
9018
6801 i945_get_display_clock_speed;
6802 else if (IS_I915G(dev))
6803 dev_priv->display.get_display_clock_speed =
6804 i915_get_display_clock_speed;
6805 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
6806 dev_priv->display.get_display_clock_speed =
6807 i9xx_misc_get_display_clock_speed;
6808 else if (IS_I915GM(dev))

--- 4 unchanged lines hidden (view full) ---

6813 i865_get_display_clock_speed;
6814 else if (IS_I85X(dev))
6815 dev_priv->display.get_display_clock_speed =
6816 i855_get_display_clock_speed;
6817 else /* 852, 830 */
6818 dev_priv->display.get_display_clock_speed =
6819 i830_get_display_clock_speed;
6820
9019 /* For FIFO watermark updates */
9020 if (HAS_PCH_SPLIT(dev)) {
6821 if (HAS_PCH_SPLIT(dev)) {
9021 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
9022 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
9023
9024 /* IVB configs may use multi-threaded forcewake */
9025 if (IS_IVYBRIDGE(dev)) {
9026 u32 ecobus;
9027
9028 /* A small trick here - if the bios hasn't configured MT forcewake,
9029 * and if the device is in RC6, then force_wake_mt_get will not wake
9030 * the device and the ECOBUS read will return zero. Which will be
9031 * (correctly) interpreted by the test below as MT forcewake being
9032 * disabled.
9033 */
9034 DRM_LOCK(dev);
9035 __gen6_gt_force_wake_mt_get(dev_priv);
9036 ecobus = I915_READ_NOTRACE(ECOBUS);
9037 __gen6_gt_force_wake_mt_put(dev_priv);
9038 DRM_UNLOCK(dev);
9039
9040 if (ecobus & FORCEWAKE_MT_ENABLE) {
9041 DRM_DEBUG_KMS("Using MT version of forcewake\n");
9042 dev_priv->display.force_wake_get =
9043 __gen6_gt_force_wake_mt_get;
9044 dev_priv->display.force_wake_put =
9045 __gen6_gt_force_wake_mt_put;
9046 }
9047 }
9048
9049 if (HAS_PCH_IBX(dev))
9050 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
9051 else if (HAS_PCH_CPT(dev))
9052 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
9053
9054 if (IS_GEN5(dev)) {
6822 if (IS_GEN5(dev)) {
9055 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
9056 dev_priv->display.update_wm = ironlake_update_wm;
9057 else {
9058 DRM_DEBUG_KMS("Failed to get proper latency. "
9059 "Disable CxSR\n");
9060 dev_priv->display.update_wm = NULL;
9061 }
9062 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
6823 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
9063 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
9064 dev_priv->display.write_eld = ironlake_write_eld;
9065 } else if (IS_GEN6(dev)) {
6824 dev_priv->display.write_eld = ironlake_write_eld;
6825 } else if (IS_GEN6(dev)) {
9066 if (SNB_READ_WM0_LATENCY()) {
9067 dev_priv->display.update_wm = sandybridge_update_wm;
9068 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9069 } else {
9070 DRM_DEBUG_KMS("Failed to read display plane latency. "
9071 "Disable CxSR\n");
9072 dev_priv->display.update_wm = NULL;
9073 }
9074 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
6826 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
9075 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
9076 dev_priv->display.write_eld = ironlake_write_eld;
9077 } else if (IS_IVYBRIDGE(dev)) {
9078 /* FIXME: detect B0+ stepping and use auto training */
9079 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
6827 dev_priv->display.write_eld = ironlake_write_eld;
6828 } else if (IS_IVYBRIDGE(dev)) {
6829 /* FIXME: detect B0+ stepping and use auto training */
6830 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
9080 if (SNB_READ_WM0_LATENCY()) {
9081 dev_priv->display.update_wm = sandybridge_update_wm;
9082 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9083 } else {
9084 DRM_DEBUG_KMS("Failed to read display plane latency. "
9085 "Disable CxSR\n");
9086 dev_priv->display.update_wm = NULL;
9087 }
9088 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
9089 dev_priv->display.write_eld = ironlake_write_eld;
6831 dev_priv->display.write_eld = ironlake_write_eld;
6832 } else if (IS_HASWELL(dev)) {
6833 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
6834 dev_priv->display.write_eld = ironlake_write_eld;
9090 } else
9091 dev_priv->display.update_wm = NULL;
6835 } else
6836 dev_priv->display.update_wm = NULL;
9092 } else if (IS_PINEVIEW(dev)) {
9093 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
9094 dev_priv->is_ddr3,
9095 dev_priv->fsb_freq,
9096 dev_priv->mem_freq)) {
9097 DRM_INFO("failed to find known CxSR latency "
9098 "(found ddr%s fsb freq %d, mem freq %d), "
9099 "disabling CxSR\n",
9100 (dev_priv->is_ddr3 == 1) ? "3" : "2",
9101 dev_priv->fsb_freq, dev_priv->mem_freq);
9102 /* Disable CxSR and never update its watermark again */
9103 pineview_disable_cxsr(dev);
9104 dev_priv->display.update_wm = NULL;
9105 } else
9106 dev_priv->display.update_wm = pineview_update_wm;
9107 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6837 } else if (IS_VALLEYVIEW(dev)) {
6838 dev_priv->display.force_wake_get = vlv_force_wake_get;
6839 dev_priv->display.force_wake_put = vlv_force_wake_put;
9108 } else if (IS_G4X(dev)) {
9109 dev_priv->display.write_eld = g4x_write_eld;
6840 } else if (IS_G4X(dev)) {
6841 dev_priv->display.write_eld = g4x_write_eld;
9110 dev_priv->display.update_wm = g4x_update_wm;
9111 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
9112 } else if (IS_GEN4(dev)) {
9113 dev_priv->display.update_wm = i965_update_wm;
9114 if (IS_CRESTLINE(dev))
9115 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
9116 else if (IS_BROADWATER(dev))
9117 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
9118 } else if (IS_GEN3(dev)) {
9119 dev_priv->display.update_wm = i9xx_update_wm;
9120 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9121 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9122 } else if (IS_I865G(dev)) {
9123 dev_priv->display.update_wm = i830_update_wm;
9124 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9125 dev_priv->display.get_fifo_size = i830_get_fifo_size;
9126 } else if (IS_I85X(dev)) {
9127 dev_priv->display.update_wm = i9xx_update_wm;
9128 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
9129 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9130 } else {
9131 dev_priv->display.update_wm = i830_update_wm;
9132 dev_priv->display.init_clock_gating = i830_init_clock_gating;
9133 if (IS_845G(dev))
9134 dev_priv->display.get_fifo_size = i845_get_fifo_size;
9135 else
9136 dev_priv->display.get_fifo_size = i830_get_fifo_size;
9137 }
9138
9139 /* Default just returns -ENODEV to indicate unsupported */
9140 dev_priv->display.queue_flip = intel_default_queue_flip;
9141
9142 switch (INTEL_INFO(dev)->gen) {
9143 case 2:
9144 dev_priv->display.queue_flip = intel_gen2_queue_flip;

--- 22 unchanged lines hidden (view full) ---

9167 * resume, or other times. This quirk makes sure that's the case for
9168 * affected systems.
9169 */
9170static void quirk_pipea_force(struct drm_device *dev)
9171{
9172 struct drm_i915_private *dev_priv = dev->dev_private;
9173
9174 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
6842 }
6843
6844 /* Default just returns -ENODEV to indicate unsupported */
6845 dev_priv->display.queue_flip = intel_default_queue_flip;
6846
6847 switch (INTEL_INFO(dev)->gen) {
6848 case 2:
6849 dev_priv->display.queue_flip = intel_gen2_queue_flip;

--- 22 unchanged lines hidden (view full) ---

6872 * resume, or other times. This quirk makes sure that's the case for
6873 * affected systems.
6874 */
6875static void quirk_pipea_force(struct drm_device *dev)
6876{
6877 struct drm_i915_private *dev_priv = dev->dev_private;
6878
6879 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
9175 DRM_DEBUG("applying pipe a force quirk\n");
6880 DRM_INFO("applying pipe a force quirk\n");
9176}
9177
9178/*
9179 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
9180 */
9181static void quirk_ssc_force_disable(struct drm_device *dev)
9182{
9183 struct drm_i915_private *dev_priv = dev->dev_private;
9184 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
6881}
6882
6883/*
6884 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
6885 */
6886static void quirk_ssc_force_disable(struct drm_device *dev)
6887{
6888 struct drm_i915_private *dev_priv = dev->dev_private;
6889 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
6890 DRM_INFO("applying lvds SSC disable quirk\n");
9185}
9186
6891}
6892
6893/*
6894 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
6895 * brightness value
6896 */
6897static void quirk_invert_brightness(struct drm_device *dev)
6898{
6899 struct drm_i915_private *dev_priv = dev->dev_private;
6900 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
6901 DRM_INFO("applying inverted panel brightness quirk\n");
6902}
6903
9187struct intel_quirk {
9188 int device;
9189 int subsystem_vendor;
9190 int subsystem_device;
9191 void (*hook)(struct drm_device *dev);
9192};
9193
9194#define PCI_ANY_ID (~0u)
9195
6904struct intel_quirk {
6905 int device;
6906 int subsystem_vendor;
6907 int subsystem_device;
6908 void (*hook)(struct drm_device *dev);
6909};
6910
6911#define PCI_ANY_ID (~0u)
6912
9196struct intel_quirk intel_quirks[] = {
6913static struct intel_quirk intel_quirks[] = {
9197 /* HP Mini needs pipe A force quirk (LP: #322104) */
9198 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
9199
9200 /* Thinkpad R31 needs pipe A force quirk */
9201 { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
9202 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
9203 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
9204

--- 8 unchanged lines hidden (view full) ---

9213 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9214 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9215
9216 /* Lenovo U160 cannot use SSC on LVDS */
9217 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
9218
9219 /* Sony Vaio Y cannot use SSC on LVDS */
9220 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
6914 /* HP Mini needs pipe A force quirk (LP: #322104) */
6915 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
6916
6917 /* Thinkpad R31 needs pipe A force quirk */
6918 { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
6919 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
6920 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
6921

--- 8 unchanged lines hidden (view full) ---

6930 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
6931 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
6932
6933 /* Lenovo U160 cannot use SSC on LVDS */
6934 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
6935
6936 /* Sony Vaio Y cannot use SSC on LVDS */
6937 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
6938
6939 /* Acer Aspire 5734Z must invert backlight brightness */
6940 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
9221};
9222
9223static void intel_init_quirks(struct drm_device *dev)
9224{
9225 struct intel_quirk *q;
9226 device_t d;
9227 int i;
9228

--- 19 unchanged lines hidden (view full) ---

9248 if (HAS_PCH_SPLIT(dev))
9249 vga_reg = CPU_VGACNTRL;
9250 else
9251 vga_reg = VGACNTRL;
9252
9253#if 0
9254 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
9255#endif
6941};
6942
6943static void intel_init_quirks(struct drm_device *dev)
6944{
6945 struct intel_quirk *q;
6946 device_t d;
6947 int i;
6948

--- 19 unchanged lines hidden (view full) ---

6968 if (HAS_PCH_SPLIT(dev))
6969 vga_reg = CPU_VGACNTRL;
6970 else
6971 vga_reg = VGACNTRL;
6972
6973#if 0
6974 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
6975#endif
9256 outb(VGA_SR_INDEX, 1);
6976 outb(VGA_SR_INDEX, SR01);
9257 sr1 = inb(VGA_SR_DATA);
9258 outb(VGA_SR_DATA, sr1 | 1 << 5);
9259#if 0
9260 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
9261#endif
9262 DELAY(300);
9263
9264 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9265 POSTING_READ(vga_reg);
9266}
9267
6977 sr1 = inb(VGA_SR_DATA);
6978 outb(VGA_SR_DATA, sr1 | 1 << 5);
6979#if 0
6980 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6981#endif
6982 DELAY(300);
6983
6984 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
6985 POSTING_READ(vga_reg);
6986}
6987
6988static void ivb_pch_pwm_override(struct drm_device *dev)
6989{
6990 struct drm_i915_private *dev_priv = dev->dev_private;
6991
6992 /*
6993 * IVB has CPU eDP backlight regs too, set things up to let the
6994 * PCH regs control the backlight
6995 */
6996 I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
6997 I915_WRITE(BLC_PWM_CPU_CTL, 0);
6998 I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30));
6999}
7000
7001void intel_modeset_init_hw(struct drm_device *dev)
7002{
7003 struct drm_i915_private *dev_priv = dev->dev_private;
7004
7005 intel_init_clock_gating(dev);
7006
7007 if (IS_IRONLAKE_M(dev)) {
7008 ironlake_enable_drps(dev);
7009 ironlake_enable_rc6(dev);
7010 intel_init_emon(dev);
7011 }
7012
7013 if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
7014 gen6_enable_rps(dev_priv);
7015 gen6_update_ring_freq(dev_priv);
7016 }
7017
7018 if (IS_IVYBRIDGE(dev))
7019 ivb_pch_pwm_override(dev);
7020}
7021
9268void intel_modeset_init(struct drm_device *dev)
9269{
9270 struct drm_i915_private *dev_priv = dev->dev_private;
9271 int i, ret;
9272
9273 drm_mode_config_init(dev);
9274
9275 dev->mode_config.min_width = 0;
9276 dev->mode_config.min_height = 0;
9277
9278 dev->mode_config.preferred_depth = 24;
9279 dev->mode_config.prefer_shadow = 1;
9280
7022void intel_modeset_init(struct drm_device *dev)
7023{
7024 struct drm_i915_private *dev_priv = dev->dev_private;
7025 int i, ret;
7026
7027 drm_mode_config_init(dev);
7028
7029 dev->mode_config.min_width = 0;
7030 dev->mode_config.min_height = 0;
7031
7032 dev->mode_config.preferred_depth = 24;
7033 dev->mode_config.prefer_shadow = 1;
7034
9281 dev->mode_config.funcs = __DECONST(struct drm_mode_config_funcs *,
9282 &intel_mode_funcs);
7035 dev->mode_config.funcs = &intel_mode_funcs;
9283
9284 intel_init_quirks(dev);
9285
7036
7037 intel_init_quirks(dev);
7038
7039 intel_init_pm(dev);
7040
7041 intel_prepare_ddi(dev);
7042
9286 intel_init_display(dev);
9287
9288 if (IS_GEN2(dev)) {
9289 dev->mode_config.max_width = 2048;
9290 dev->mode_config.max_height = 2048;
9291 } else if (IS_GEN3(dev)) {
9292 dev->mode_config.max_width = 4096;
9293 dev->mode_config.max_height = 4096;

--- 8 unchanged lines hidden (view full) ---

9302
9303 for (i = 0; i < dev_priv->num_pipe; i++) {
9304 intel_crtc_init(dev, i);
9305 ret = intel_plane_init(dev, i);
9306 if (ret)
9307 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
9308 }
9309
7043 intel_init_display(dev);
7044
7045 if (IS_GEN2(dev)) {
7046 dev->mode_config.max_width = 2048;
7047 dev->mode_config.max_height = 2048;
7048 } else if (IS_GEN3(dev)) {
7049 dev->mode_config.max_width = 4096;
7050 dev->mode_config.max_height = 4096;

--- 8 unchanged lines hidden (view full) ---

7059
7060 for (i = 0; i < dev_priv->num_pipe; i++) {
7061 intel_crtc_init(dev, i);
7062 ret = intel_plane_init(dev, i);
7063 if (ret)
7064 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
7065 }
7066
7067 intel_pch_pll_init(dev);
7068
9310 /* Just disable it once at startup */
9311 i915_disable_vga(dev);
9312 intel_setup_outputs(dev);
9313
7069 /* Just disable it once at startup */
7070 i915_disable_vga(dev);
7071 intel_setup_outputs(dev);
7072
9314 intel_init_clock_gating(dev);
9315
9316 if (IS_IRONLAKE_M(dev)) {
9317 ironlake_enable_drps(dev);
9318 intel_init_emon(dev);
9319 }
9320
9321 if (IS_GEN6(dev)) {
9322 gen6_enable_rps(dev_priv);
9323 gen6_update_ring_freq(dev_priv);
9324 }
9325
9326 TASK_INIT(&dev_priv->idle_task, 0, intel_idle_update, dev_priv);
9327 callout_init(&dev_priv->idle_callout, CALLOUT_MPSAFE);
9328}
9329
9330void intel_modeset_gem_init(struct drm_device *dev)
9331{
7073 TASK_INIT(&dev_priv->idle_task, 0, intel_idle_update, dev_priv);
7074 callout_init(&dev_priv->idle_callout, CALLOUT_MPSAFE);
7075}
7076
7077void intel_modeset_gem_init(struct drm_device *dev)
7078{
9332 if (IS_IRONLAKE_M(dev))
9333 ironlake_enable_rc6(dev);
7079 intel_modeset_init_hw(dev);
9334
9335 intel_setup_overlay(dev);
9336}
9337
9338void intel_modeset_cleanup(struct drm_device *dev)
9339{
9340 struct drm_i915_private *dev_priv = dev->dev_private;
9341 struct drm_crtc *crtc;

--- 14 unchanged lines hidden (view full) ---

9356 intel_crtc = to_intel_crtc(crtc);
9357 intel_increase_pllclock(crtc);
9358 }
9359
9360 intel_disable_fbc(dev);
9361
9362 if (IS_IRONLAKE_M(dev))
9363 ironlake_disable_drps(dev);
7080
7081 intel_setup_overlay(dev);
7082}
7083
7084void intel_modeset_cleanup(struct drm_device *dev)
7085{
7086 struct drm_i915_private *dev_priv = dev->dev_private;
7087 struct drm_crtc *crtc;

--- 14 unchanged lines hidden (view full) ---

7102 intel_crtc = to_intel_crtc(crtc);
7103 intel_increase_pllclock(crtc);
7104 }
7105
7106 intel_disable_fbc(dev);
7107
7108 if (IS_IRONLAKE_M(dev))
7109 ironlake_disable_drps(dev);
9364 if (IS_GEN6(dev))
7110 if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
9365 gen6_disable_rps(dev);
9366
9367 if (IS_IRONLAKE_M(dev))
9368 ironlake_disable_rc6(dev);
9369
7111 gen6_disable_rps(dev);
7112
7113 if (IS_IRONLAKE_M(dev))
7114 ironlake_disable_rc6(dev);
7115
7116 if (IS_VALLEYVIEW(dev))
7117 vlv_init_dpio(dev);
7118
9370 /* Disable the irq before mode object teardown, for the irq might
9371 * enqueue unpin/hotplug work. */
9372 drm_irq_uninstall(dev);
9373 DRM_UNLOCK(dev);
9374
9375 if (taskqueue_cancel(dev_priv->tq, &dev_priv->hotplug_task, NULL))
9376 taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
9377 if (taskqueue_cancel(dev_priv->tq, &dev_priv->rps_task, NULL))

--- 155 unchanged lines hidden ---
7119 /* Disable the irq before mode object teardown, for the irq might
7120 * enqueue unpin/hotplug work. */
7121 drm_irq_uninstall(dev);
7122 DRM_UNLOCK(dev);
7123
7124 if (taskqueue_cancel(dev_priv->tq, &dev_priv->hotplug_task, NULL))
7125 taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
7126 if (taskqueue_cancel(dev_priv->tq, &dev_priv->rps_task, NULL))

--- 155 unchanged lines hidden ---