Lines Matching defs:wm

362 		const struct skl_plane_wm *wm =
363 &crtc_state->wm.skl.optimal.planes[plane_id];
367 if (!wm->wm[0].enable)
370 /* Find the highest enabled wm level for this plane */
371 for (level = i915->display.wm.num_levels - 1;
372 !wm->wm[level].enable; --level)
375 /* Highest common enabled wm level for all planes */
384 const struct skl_plane_wm *wm =
385 &crtc_state->wm.skl.optimal.planes[plane_id];
388 * All enabled planes must have enabled a common wm level that
391 if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
407 const struct skl_plane_wm *wm =
408 &crtc_state->wm.skl.optimal.planes[plane_id];
410 if (wm->wm[0].enable && !wm->sagv.wm0.enable)
453 struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
699 crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
700 crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
730 unsigned int latency = i915->display.wm.skl_latency[level];
755 struct skl_wm_level wm = {};
767 for (level = 0; level < i915->display.wm.num_levels; level++) {
770 skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
771 if (wm.min_ddb_alloc == U16_MAX)
774 min_ddb_alloc = wm.min_ddb_alloc;
1402 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1405 return &wm->sagv.wm0;
1407 return &wm->wm[level];
1414 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1417 return &wm->sagv.trans_wm;
1419 return &wm->trans_wm;
1435 skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
1437 if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
1438 memset(wm, 0, sizeof(*wm));
1442 skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
1445 if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
1447 memset(wm, 0, sizeof(*wm));
1453 const struct skl_plane_wm *wm)
1461 * like PSR, might still use even disabled wm level registers,
1464 * decided to simply do it for all of the platforms, as those wm
1467 return level > 0 && !wm->wm[level].enable;
1478 const struct skl_wm_level *wm,
1496 size = wm->min_ddb_alloc + extra;
1520 memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
1521 memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
1534 skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
1543 for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
1546 const struct skl_plane_wm *wm =
1547 &crtc_state->wm.skl.optimal.planes[plane_id];
1551 &crtc_state->wm.skl.plane_ddb[plane_id];
1553 if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
1555 wm->wm[level].min_ddb_alloc != U16_MAX);
1562 blocks += wm->wm[level].min_ddb_alloc;
1563 blocks += wm->uv_wm[level].min_ddb_alloc;
1591 &crtc_state->wm.skl.plane_ddb[plane_id];
1593 &crtc_state->wm.skl.plane_ddb_y[plane_id];
1594 const struct skl_plane_wm *wm =
1595 &crtc_state->wm.skl.optimal.planes[plane_id];
1602 skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
1604 skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
1607 skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
1619 for (level++; level < i915->display.wm.num_levels; level++) {
1622 &crtc_state->wm.skl.plane_ddb[plane_id];
1624 &crtc_state->wm.skl.plane_ddb_y[plane_id];
1625 struct skl_plane_wm *wm =
1626 &crtc_state->wm.skl.optimal.planes[plane_id];
1630 skl_check_nv12_wm_level(&wm->wm[level],
1631 &wm->uv_wm[level],
1634 skl_check_wm_level(&wm->wm[level], ddb);
1636 if (skl_need_wm_copy_wa(i915, level, wm)) {
1637 wm->wm[level].blocks = wm->wm[level - 1].blocks;
1638 wm->wm[level].lines = wm->wm[level - 1].lines;
1639 wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines;
1650 &crtc_state->wm.skl.plane_ddb[plane_id];
1652 &crtc_state->wm.skl.plane_ddb_y[plane_id];
1653 struct skl_plane_wm *wm =
1654 &crtc_state->wm.skl.optimal.planes[plane_id];
1658 skl_check_wm_level(&wm->trans_wm, ddb_y);
1662 skl_check_wm_level(&wm->trans_wm, ddb);
1665 skl_check_wm_level(&wm->sagv.wm0, ddb);
1666 skl_check_wm_level(&wm->sagv.trans_wm, ddb);
1923 * wm calculations.
2008 for (level = 0; level < i915->display.wm.num_levels; level++) {
2026 struct skl_wm_level *levels = plane_wm->wm;
2107 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2116 skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
2118 skl_compute_transition_wm(i915, &wm->trans_wm,
2119 &wm->wm[0], &wm_params);
2122 tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
2124 skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
2125 &wm->sagv.wm0, &wm_params);
2135 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2139 wm->is_planar = true;
2147 skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
2157 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2161 memset(wm, 0, sizeof(*wm));
2187 struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2194 memset(wm, 0, sizeof(*wm));
2244 const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
2247 wm0_lines = max_t(int, wm0_lines, wm->wm[0].lines);
2260 for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
2298 crtc_state->wm_level_disabled = level < i915->display.wm.num_levels - 1;
2300 for (level++; level < i915->display.wm.num_levels; level++) {
2304 struct skl_plane_wm *wm =
2305 &crtc_state->wm.skl.optimal.planes[plane_id];
2311 wm->wm[level].enable = false;
2312 wm->uv_wm[level].enable = false;
2323 struct skl_plane_wm *wm =
2324 &crtc_state->wm.skl.optimal.planes[plane_id];
2326 wm->sagv.wm0.enable = false;
2327 wm->sagv.trans_wm.enable = false;
2361 crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
2400 const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
2402 &crtc_state->wm.skl.plane_ddb[plane_id];
2404 &crtc_state->wm.skl.plane_ddb_y[plane_id];
2407 for (level = 0; level < i915->display.wm.num_levels; level++)
2415 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
2418 &wm->sagv.wm0);
2420 &wm->sagv.trans_wm);
2437 const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
2439 &crtc_state->wm.skl.plane_ddb[plane_id];
2442 for (level = 0; level < i915->display.wm.num_levels; level++)
2450 const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
2453 &wm->sagv.wm0);
2455 &wm->sagv.trans_wm);
2476 for (level = 0; level < i915->display.wm.num_levels; level++) {
2482 if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
2537 if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
2538 &new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
2539 skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
2540 &new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
2704 old_pipe_wm = &old_crtc_state->wm.skl.optimal;
2705 new_pipe_wm = &new_crtc_state->wm.skl.optimal;
2711 old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
2712 new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
2738 enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
2739 enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
2740 enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
2741 enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
2745 enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
2746 enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
2747 enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
2748 enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
2757 enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
2758 enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
2759 enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
2760 enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
2761 enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
2762 enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
2763 enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
2764 enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
2768 enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
2769 enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
2770 enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
2771 enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
2772 enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
2773 enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
2774 enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
2775 enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
2784 old_wm->wm[0].blocks, old_wm->wm[1].blocks,
2785 old_wm->wm[2].blocks, old_wm->wm[3].blocks,
2786 old_wm->wm[4].blocks, old_wm->wm[5].blocks,
2787 old_wm->wm[6].blocks, old_wm->wm[7].blocks,
2791 new_wm->wm[0].blocks, new_wm->wm[1].blocks,
2792 new_wm->wm[2].blocks, new_wm->wm[3].blocks,
2793 new_wm->wm[4].blocks, new_wm->wm[5].blocks,
2794 new_wm->wm[6].blocks, new_wm->wm[7].blocks,
2803 old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
2804 old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
2805 old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
2806 old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
2810 new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
2811 new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
2812 new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
2813 new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
2828 for (level = 0; level < i915->display.wm.num_levels; level++) {
2889 * Force a full wm update for every plane on modeset.
2890 * Required because the reset value of the wm registers
2898 &old_crtc_state->wm.skl.optimal,
2899 &new_crtc_state->wm.skl.optimal))
3013 struct skl_plane_wm *wm = &out->planes[plane_id];
3015 for (level = 0; level < i915->display.wm.num_levels; level++) {
3021 skl_wm_level_from_reg_val(val, &wm->wm[level]);
3029 skl_wm_level_from_reg_val(val, &wm->trans_wm);
3037 skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
3044 skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
3046 wm->sagv.wm0 = wm->wm[0];
3047 wm->sagv.trans_wm = wm->trans_wm;
3071 memset(&crtc_state->wm.skl.optimal, 0,
3072 sizeof(crtc_state->wm.skl.optimal));
3074 skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
3075 crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
3081 &crtc_state->wm.skl.plane_ddb[plane_id];
3083 &crtc_state->wm.skl.plane_ddb_y[plane_id];
3104 crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
3105 crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
3109 skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
3133 entries[crtc->pipe] = crtc_state->wm.skl.ddb;
3146 if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
3186 memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
3205 struct skl_pipe_wm wm;
3207 const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
3219 skl_pipe_wm_get_hw_state(crtc, &hw->wm);
3237 for (level = 0; level < i915->display.wm.num_levels; level++) {
3238 hw_wm_level = &hw->wm.planes[plane->id].wm[level];
3255 hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
3270 hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
3286 hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
3304 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
3320 return i915->display.wm.ipc_enabled;
3352 i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
3359 u16 wm[], int num_levels, int read_latency)
3370 if (wm[level] == 0) {
3372 wm[i] = 0;
3386 if (wm[0] == 0) {
3388 wm[level] += read_latency;
3398 wm[0] += 1;
3401 static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
3403 int num_levels = i915->display.wm.num_levels;
3407 wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3408 wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3411 wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3412 wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3415 wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3416 wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3418 adjust_wm_latency(i915, wm, num_levels, 6);
3421 static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
3423 int num_levels = i915->display.wm.num_levels;
3437 wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
3438 wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
3439 wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
3440 wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
3450 wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
3451 wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
3452 wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
3453 wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
3455 adjust_wm_latency(i915, wm, num_levels, read_latency);
3461 i915->display.wm.num_levels = 6;
3463 i915->display.wm.num_levels = 8;
3466 mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
3468 skl_read_wm_latency(i915, i915->display.wm.skl_latency);
3470 intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency);
3484 i915->display.funcs.wm = &skl_wm_funcs;
3864 i915->display.wm.ipc_enabled = enable;
3918 for (level = i915->display.wm.num_levels - 1; level >= initial_wm_level; level--) {