Lines Matching defs:slpc

18 static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
20 return container_of(slpc, struct intel_guc, slpc);
23 static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc)
25 return guc_to_gt(slpc_to_guc(slpc));
28 static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc)
30 return slpc_to_gt(slpc)->i915;
48 void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc)
50 struct intel_guc *guc = slpc_to_guc(slpc);
52 slpc->supported = __detect_slpc_supported(guc);
53 slpc->selected = __guc_slpc_selected(guc);
90 static u32 slpc_get_state(struct intel_guc_slpc *slpc)
94 GEM_BUG_ON(!slpc->vma);
96 drm_clflush_virt_range(slpc->vaddr, sizeof(u32));
97 data = slpc->vaddr;
117 static int slpc_set_param_nb(struct intel_guc_slpc *slpc, u8 id, u32 value)
119 struct intel_guc *guc = slpc_to_guc(slpc);
152 static bool slpc_is_running(struct intel_guc_slpc *slpc)
154 return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
172 static int slpc_query_task_state(struct intel_guc_slpc *slpc)
174 struct intel_guc *guc = slpc_to_guc(slpc);
175 u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
182 drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
187 static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
189 struct intel_guc *guc = slpc_to_guc(slpc);
202 static int slpc_unset_param(struct intel_guc_slpc *slpc, u8 id)
204 struct intel_guc *guc = slpc_to_guc(slpc);
211 static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
213 struct intel_guc *guc = slpc_to_guc(slpc);
214 struct drm_i915_private *i915 = slpc_to_i915(slpc);
218 lockdep_assert_held(&slpc->lock);
233 ret = slpc_set_param_nb(slpc,
246 struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
255 mutex_lock(&slpc->lock);
256 if (atomic_read(&slpc->num_waiters)) {
257 err = slpc_force_min_freq(slpc, slpc->boost_freq);
259 slpc->num_boosts++;
261 mutex_unlock(&slpc->lock);
264 int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
266 struct intel_guc *guc = slpc_to_guc(slpc);
270 GEM_BUG_ON(slpc->vma);
272 err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
278 slpc->max_freq_softlimit = 0;
279 slpc->min_freq_softlimit = 0;
280 slpc->ignore_eff_freq = false;
281 slpc->min_is_rpmax = false;
283 slpc->boost_freq = 0;
284 atomic_set(&slpc->num_waiters, 0);
285 slpc->num_boosts = 0;
286 slpc->media_ratio_mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
288 rw_init(&slpc->lock, "slpc");
289 INIT_WORK(&slpc->boost_work, slpc_boost_work);
314 static const char *slpc_get_state_string(struct intel_guc_slpc *slpc)
316 return slpc_global_state_to_string(slpc_get_state(slpc));
334 static int slpc_reset(struct intel_guc_slpc *slpc)
336 struct intel_guc *guc = slpc_to_guc(slpc);
337 u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
348 if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
350 slpc_get_state_string(slpc));
358 static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc)
360 struct slpc_shared_data *data = slpc->vaddr;
362 GEM_BUG_ON(!slpc->vma);
369 static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
371 struct slpc_shared_data *data = slpc->vaddr;
373 GEM_BUG_ON(!slpc->vma);
399 * @slpc: pointer to intel_guc_slpc.
407 int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val)
409 struct drm_i915_private *i915 = slpc_to_i915(slpc);
413 if (val < slpc->min_freq ||
414 val > slpc->rp0_freq ||
415 val < slpc->min_freq_softlimit)
419 ret = slpc_set_param(slpc,
429 slpc->max_freq_softlimit = val;
436 * @slpc: pointer to intel_guc_slpc.
444 int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
446 struct drm_i915_private *i915 = slpc_to_i915(slpc);
452 ret = slpc_query_task_state(slpc);
455 *val = slpc_decode_max_freq(slpc);
461 int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val)
463 struct drm_i915_private *i915 = slpc_to_i915(slpc);
467 mutex_lock(&slpc->lock);
470 ret = slpc_set_param(slpc,
474 guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n",
477 slpc->ignore_eff_freq = val;
481 ret = slpc_set_param(slpc,
483 slpc->min_freq);
487 mutex_unlock(&slpc->lock);
493 * @slpc: pointer to intel_guc_slpc.
501 int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
503 struct drm_i915_private *i915 = slpc_to_i915(slpc);
507 if (val < slpc->min_freq ||
508 val > slpc->rp0_freq ||
509 val > slpc->max_freq_softlimit)
513 mutex_lock(&slpc->lock);
516 ret = slpc_set_param(slpc,
521 slpc->min_freq_softlimit = val;
524 mutex_unlock(&slpc->lock);
535 * @slpc: pointer to intel_guc_slpc.
543 int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
545 struct drm_i915_private *i915 = slpc_to_i915(slpc);
551 ret = slpc_query_task_state(slpc);
554 *val = slpc_decode_min_freq(slpc);
560 int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
562 struct drm_i915_private *i915 = slpc_to_i915(slpc);
570 ret = slpc_set_param(slpc,
591 static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
600 if (!slpc->max_freq_softlimit) {
601 slpc->max_freq_softlimit = slpc->rp0_freq;
602 slpc_to_gt(slpc)->defaults.max_freq = slpc->max_freq_softlimit;
603 } else if (slpc->max_freq_softlimit != slpc->rp0_freq) {
604 ret = intel_guc_slpc_set_max_freq(slpc,
605 slpc->max_freq_softlimit);
611 if (!slpc->min_freq_softlimit) {
613 slpc->min_freq_softlimit = slpc->min_freq;
614 slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
616 return intel_guc_slpc_set_min_freq(slpc,
617 slpc->min_freq_softlimit);
623 static bool is_slpc_min_freq_rpmax(struct intel_guc_slpc *slpc)
628 ret = intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq);
630 guc_err(slpc_to_guc(slpc), "Failed to get min freq: %pe\n", ERR_PTR(ret));
640 static void update_server_min_softlimit(struct intel_guc_slpc *slpc)
645 if (!slpc->min_freq_softlimit &&
646 is_slpc_min_freq_rpmax(slpc)) {
647 slpc->min_is_rpmax = true;
648 slpc->min_freq_softlimit = slpc->rp0_freq;
649 (slpc_to_gt(slpc))->defaults.min_freq = slpc->min_freq_softlimit;
653 static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
656 return slpc_set_param(slpc,
658 slpc->rp0_freq);
661 static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
663 struct intel_rps *rps = &slpc_to_gt(slpc)->rps;
667 slpc->rp0_freq = intel_gpu_freq(rps, caps.rp0_freq);
668 slpc->rp1_freq = intel_gpu_freq(rps, caps.rp1_freq);
669 slpc->min_freq = intel_gpu_freq(rps, caps.min_freq);
671 if (!slpc->boost_freq)
672 slpc->boost_freq = slpc->rp0_freq;
677 * @slpc: pointer to intel_guc_slpc.
684 int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode)
687 struct drm_i915_private *i915 = slpc_to_i915(slpc);
694 ret = slpc_set_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
696 guc_err(slpc_to_guc(slpc), "Override RC mode %d failed: %pe\n",
703 int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc)
705 struct drm_i915_private *i915 = slpc_to_i915(slpc);
710 ret = slpc_unset_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE);
712 guc_err(slpc_to_guc(slpc), "Unsetting RC mode failed: %pe\n", ERR_PTR(ret));
720 * @slpc: pointer to intel_guc_slpc.
731 int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
733 struct intel_guc *guc = slpc_to_guc(slpc);
736 GEM_BUG_ON(!slpc->vma);
738 slpc_shared_data_reset(slpc->vaddr);
740 ret = slpc_reset(slpc);
746 ret = slpc_query_task_state(slpc);
750 intel_guc_pm_intrmsk_enable(slpc_to_gt(slpc));
752 slpc_get_rp_values(slpc);
755 update_server_min_softlimit(slpc);
758 ret = slpc_use_fused_rp0(slpc);
765 intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
768 ret = slpc_set_softlimits(slpc);
775 intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
780 int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
784 if (val < slpc->min_freq || val > slpc->rp0_freq)
787 mutex_lock(&slpc->lock);
789 if (slpc->boost_freq != val) {
791 if (atomic_read(&slpc->num_waiters)) {
792 ret = slpc_force_min_freq(slpc, val);
799 slpc->boost_freq = val;
803 mutex_unlock(&slpc->lock);
807 void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
815 mutex_lock(&slpc->lock);
816 if (atomic_dec_and_test(&slpc->num_waiters))
817 slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
818 mutex_unlock(&slpc->lock);
821 int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
823 struct drm_i915_private *i915 = slpc_to_i915(slpc);
824 struct slpc_shared_data *data = slpc->vaddr;
829 GEM_BUG_ON(!slpc->vma);
832 ret = slpc_query_task_state(slpc);
837 drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
841 slpc_decode_max_freq(slpc));
843 slpc_decode_min_freq(slpc));
845 slpc->num_boosts);
847 atomic_read(&slpc->num_waiters));
854 void intel_guc_slpc_fini(struct intel_guc_slpc *slpc)
856 if (!slpc->vma)
859 i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP);