Searched refs:gvt (Results 1 - 25 of 32) sorted by relevance

12

/linux-master/drivers/gpu/drm/i915/gvt/
H A DMakefile4 gvt/aperture_gm.o \
5 gvt/cfg_space.o \
6 gvt/cmd_parser.o \
7 gvt/debugfs.o \
8 gvt/display.o \
9 gvt/dmabuf.o \
10 gvt/edid.o \
11 gvt/execlist.o \
12 gvt/fb_decoder.o \
13 gvt/firmwar
[all...]
H A Dsched_policy.h41 int (*init)(struct intel_gvt *gvt);
42 void (*clean)(struct intel_gvt *gvt);
49 void intel_gvt_schedule(struct intel_gvt *gvt);
51 int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
53 void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
63 void intel_gvt_kick_schedule(struct intel_gvt *gvt);
H A Daperture_gm.c40 #include "gvt.h"
44 struct intel_gvt *gvt = vgpu->gvt; local
45 struct intel_gt *gt = gvt->gt;
54 start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
55 end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
60 start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
61 end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
82 struct intel_gvt *gvt = vgpu->gvt; local
110 struct intel_gvt *gvt = vgpu->gvt; local
132 struct intel_gvt *gvt = vgpu->gvt; local
168 struct intel_gvt *gvt = vgpu->gvt; local
193 struct intel_gvt *gvt = vgpu->gvt; local
235 struct intel_gvt *gvt = vgpu->gvt; local
245 struct intel_gvt *gvt = vgpu->gvt; local
323 struct intel_gvt *gvt = vgpu->gvt; local
[all...]
H A Dmmio.c38 #include "gvt.h"
57 #define reg_is_mmio(gvt, reg) \
58 (reg >= 0 && reg < gvt->device_info.mmio_size)
60 #define reg_is_gtt(gvt, reg) \
61 (reg >= gvt->device_info.gtt_start_offset \
62 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
67 struct intel_gvt *gvt = NULL; local
74 gvt = vgpu->gvt;
109 struct intel_gvt *gvt = vgpu->gvt; local
184 struct intel_gvt *gvt = vgpu->gvt; local
246 struct intel_gvt *gvt = vgpu->gvt; local
[all...]
H A Dsched_policy.c35 #include "gvt.h"
42 for_each_engine(engine, vgpu->gvt->gt, i) {
68 struct intel_gvt *gvt; member in struct:gvt_sched_data
80 if (!vgpu || vgpu == vgpu->gvt->idle_vgpu)
132 static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) argument
134 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
155 for_each_engine(engine, gvt->gt, i) {
172 for_each_engine(engine, gvt->gt, i)
213 struct intel_gvt *gvt = sched_data->gvt; local
240 intel_gvt_schedule(struct intel_gvt *gvt) argument
277 tbs_sched_init(struct intel_gvt *gvt) argument
299 tbs_sched_clean(struct intel_gvt *gvt) argument
330 struct intel_gvt *gvt = vgpu->gvt; local
380 intel_gvt_init_sched_policy(struct intel_gvt *gvt) argument
392 intel_gvt_clean_sched_policy(struct intel_gvt *gvt) argument
436 intel_gvt_kick_schedule(struct intel_gvt *gvt) argument
[all...]
H A Dcmd_parser.h46 void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
48 int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
H A Dmmio.h71 intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg);
72 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
74 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
75 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
76 int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
77 int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
80 struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
99 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
108 void intel_gvt_restore_fence(struct intel_gvt *gvt);
109 void intel_gvt_restore_mmio(struct intel_gvt *gvt);
[all...]
H A Dgvt.h184 struct intel_gvt *gvt; member in struct:intel_vgpu
192 /* Both sched_data and sched_ctl can be seen a part of the global gvt
326 /* scheduler scope lock, protect gvt and vgpu schedule related data */
352 * use it with atomic bit ops so that no need to use gvt big lock.
382 static inline void intel_gvt_request_service(struct intel_gvt *gvt, argument
385 set_bit(service, (void *)&gvt->service_request);
386 wake_up(&gvt->service_thread_wq);
389 void intel_gvt_free_firmware(struct intel_gvt *gvt);
390 int intel_gvt_load_firmware(struct intel_gvt *gvt);
400 #define gvt_to_ggtt(gvt) ((gv
593 intel_gvt_mmio_set_accessed( struct intel_gvt *gvt, unsigned int offset) argument
607 intel_gvt_mmio_is_cmd_accessible( struct intel_gvt *gvt, unsigned int offset) argument
620 intel_gvt_mmio_set_cmd_accessible( struct intel_gvt *gvt, unsigned int offset) argument
632 intel_gvt_mmio_is_unalign( struct intel_gvt *gvt, unsigned int offset) argument
647 intel_gvt_mmio_has_mode_mask( struct intel_gvt *gvt, unsigned int offset) argument
663 intel_gvt_mmio_is_sr_in_ctx( struct intel_gvt *gvt, unsigned int offset) argument
677 intel_gvt_mmio_set_sr_in_ctx( struct intel_gvt *gvt, unsigned int offset) argument
692 intel_gvt_mmio_set_cmd_write_patch( struct intel_gvt *gvt, unsigned int offset) argument
707 intel_gvt_mmio_is_cmd_write_patch( struct intel_gvt *gvt, unsigned int offset) argument
[all...]
H A Ddebugfs.c26 #include "gvt.h"
58 static inline int mmio_diff_handler(struct intel_gvt *gvt, argument
65 preg = intel_uncore_read_notrace(gvt->gt->uncore, _MMIO(offset));
87 struct intel_gvt *gvt = vgpu->gvt; local
97 mutex_lock(&gvt->lock);
98 spin_lock_bh(&gvt->scheduler.mmio_context_lock);
100 mmio_hw_access_pre(gvt->gt);
102 intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
103 mmio_hw_access_post(gvt
195 struct intel_gvt *gvt = vgpu->gvt; local
208 intel_gvt_debugfs_init(struct intel_gvt *gvt) argument
222 intel_gvt_debugfs_clean(struct intel_gvt *gvt) argument
[all...]
H A Dvgpu.c35 #include "gvt.h"
40 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
99 * @gvt : GVT device
104 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) argument
106 unsigned int low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
107 unsigned int high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
111 gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
113 if (!gvt->types)
116 gvt->mdev_types = kcalloc(num_types, sizeof(*gvt
154 intel_gvt_clean_vgpu_types(struct intel_gvt *gvt) argument
226 struct intel_gvt *gvt = vgpu->gvt; local
266 intel_gvt_create_idle_vgpu(struct intel_gvt *gvt) argument
314 struct intel_gvt *gvt = vgpu->gvt; local
437 struct intel_gvt *gvt = vgpu->gvt; local
[all...]
H A Dfirmware.c35 #include "gvt.h"
69 static int expose_firmware_sysfs(struct intel_gvt *gvt) argument
71 struct intel_gvt_device_info *info = &gvt->device_info;
72 struct drm_i915_private *i915 = gvt->gt->i915;
96 memcpy(gvt->firmware.cfg_space, i915->vgpu.initial_cfg_space,
98 memcpy(p, gvt->firmware.cfg_space, info->cfg_space_size);
102 memcpy(gvt->firmware.mmio, i915->vgpu.initial_mmio,
105 memcpy(p, gvt->firmware.mmio, info->mmio_size);
121 static void clean_firmware_sysfs(struct intel_gvt *gvt) argument
123 struct pci_dev *pdev = to_pci_dev(gvt
134 intel_gvt_free_firmware(struct intel_gvt *gvt) argument
143 verify_firmware(struct intel_gvt *gvt, const struct firmware *fw) argument
198 intel_gvt_load_firmware(struct intel_gvt *gvt) argument
[all...]
H A Dkvmgt.c54 #include "gvt.h"
180 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
203 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
639 mutex_lock(&vgpu->gvt->lock);
640 for_each_active_vgpu(vgpu->gvt, itr, id) {
650 mutex_unlock(&vgpu->gvt->lock);
775 aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
842 struct intel_gvt *gvt = vgpu->gvt; local
852 return (offset >= gvt
1515 struct intel_gvt *gvt = kdev_to_i915(mtype->parent->dev)->gvt; local
1719 init_device_info(struct intel_gvt *gvt) argument
1736 intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt) argument
1754 struct intel_gvt *gvt = (struct intel_gvt *)data; local
1782 clean_service_thread(struct intel_gvt *gvt) argument
1787 init_service_thread(struct intel_gvt *gvt) argument
1810 struct intel_gvt *gvt = fetch_and_zero(&i915->gvt); local
1845 struct intel_gvt *gvt; local
1952 struct intel_gvt *gvt = i915->gvt; local
[all...]
H A Dgtt.c37 #include "gvt.h"
76 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
94 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
96 if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
100 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
102 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
105 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
305 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
318 e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
330 const struct intel_gvt_device_info *info = &vgpu->gvt
654 struct intel_gvt *gvt = spt->vgpu->gvt; local
683 struct intel_gvt *gvt = spt->vgpu->gvt; local
1404 struct intel_gvt *gvt = vgpu->gvt; local
1446 struct intel_gvt *gvt = vgpu->gvt; local
1465 struct intel_gvt *gvt = spt->vgpu->gvt; local
1502 struct intel_gvt *gvt = spt->vgpu->gvt; local
1756 struct intel_gvt *gvt = vgpu->gvt; local
1786 struct intel_gvt *gvt = vgpu->gvt; local
1863 struct intel_gvt *gvt = vgpu->gvt; local
2014 reclaim_one_ppgtt_mm(struct intel_gvt *gvt) argument
2071 struct intel_gvt *gvt = vgpu->gvt; local
2208 struct intel_gvt *gvt = vgpu->gvt; local
2537 clean_spt_oos(struct intel_gvt *gvt) argument
2554 setup_spt_oos(struct intel_gvt *gvt) argument
2687 intel_gvt_init_gtt(struct intel_gvt *gvt) argument
2738 intel_gvt_clean_gtt(struct intel_gvt *gvt) argument
2787 struct intel_gvt *gvt = vgpu->gvt; local
2828 intel_gvt_restore_ggtt(struct intel_gvt *gvt) argument
[all...]
H A Dmmio_context.h53 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
H A Dinterrupt.c36 #include "gvt.h"
166 struct intel_gvt *gvt,
169 struct intel_gvt_irq *irq = &gvt->irq;
197 struct intel_gvt *gvt = vgpu->gvt; local
198 const struct intel_gvt_irq_ops *ops = gvt->irq.ops;
227 struct intel_gvt *gvt = vgpu->gvt; local
228 const struct intel_gvt_irq_ops *ops = gvt->irq.ops;
266 struct intel_gvt *gvt local
165 regbase_to_irq_info( struct intel_gvt *gvt, unsigned int reg) argument
552 struct intel_gvt *gvt = irq_to_gvt(irq); local
680 struct intel_gvt *gvt = vgpu->gvt; local
714 intel_gvt_init_irq(struct intel_gvt *gvt) argument
[all...]
H A Dmmio_context.c43 #include "gvt.h"
176 struct intel_gvt *gvt = engine->i915->gvt; local
178 u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
179 u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
215 struct intel_gvt *gvt = vgpu->gvt; local
217 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
231 for (mmio = gvt->engine_mmio_list.mmio;
366 u32 *regs = vgpu->gvt
592 intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) argument
[all...]
H A Dhandlers.c41 #include "gvt.h"
63 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) argument
65 struct drm_i915_private *i915 = gvt->gt->i915;
81 static bool intel_gvt_match_device(struct intel_gvt *gvt, argument
84 return intel_gvt_get_device_type(gvt) & device;
99 struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, argument
104 hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
111 static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size, argument
118 if (!intel_gvt_match_device(gvt, device))
128 p = intel_gvt_find_mmio_info(gvt,
153 intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset) argument
256 struct intel_gvt *gvt = vgpu->gvt; local
1946 struct intel_gvt *gvt = vgpu->gvt; local
2179 init_generic_mmio_info(struct intel_gvt *gvt) argument
2442 init_bdw_mmio_info(struct intel_gvt *gvt) argument
2577 init_skl_mmio_info(struct intel_gvt *gvt) argument
2748 init_bxt_mmio_info(struct intel_gvt *gvt) argument
2796 find_mmio_block(struct intel_gvt *gvt, unsigned int offset) argument
2819 intel_gvt_clean_mmio_info(struct intel_gvt *gvt) argument
2839 struct intel_gvt *gvt = iter->data; local
2878 struct intel_gvt *gvt = iter->data; local
2911 init_mmio_info(struct intel_gvt *gvt) argument
2922 init_mmio_block_handlers(struct intel_gvt *gvt) argument
2949 intel_gvt_setup_mmio_info(struct intel_gvt *gvt) argument
3013 intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), void *data) argument
3110 intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, unsigned int offset) argument
3131 struct intel_gvt *gvt = vgpu->gvt; local
3201 intel_gvt_restore_fence(struct intel_gvt *gvt) argument
3214 mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data) argument
3225 intel_gvt_restore_mmio(struct intel_gvt *gvt) argument
[all...]
H A Dcfg_space.c35 #include "gvt.h"
120 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
126 offset + bytes > vgpu->gvt->device_info.cfg_space_size))
259 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
266 offset + bytes > vgpu->gvt->device_info.cfg_space_size))
322 struct intel_gvt *gvt = vgpu->gvt; local
323 struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
324 const struct intel_gvt_device_info *info = &gvt->device_info;
328 memcpy(vgpu_cfg_space(vgpu), gvt
[all...]
H A Dscheduler.c48 #include "gvt.h"
87 struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
130 struct intel_gvt *gvt = vgpu->gvt; local
218 if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
290 struct intel_gvt *gvt = container_of(nb, struct intel_gvt, local
292 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
379 * requests from gvt always set the has_init_breadcrumb flag, here
524 struct intel_gvt *gvt = workload->vgpu->gvt; local
848 pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine) argument
1066 complete_current_workload(struct intel_gvt *gvt, int ring_id) argument
1154 struct intel_gvt *gvt = engine->i915->gvt; local
1231 struct intel_gvt *gvt = vgpu->gvt; local
1242 intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) argument
1258 intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) argument
[all...]
H A Dscheduler.h139 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
141 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
H A Dcmd_parser.c48 #include "gvt.h"
519 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
665 find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode, argument
670 hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
679 get_cmd_info(struct intel_gvt *gvt, u32 cmd, argument
688 return find_cmd_entry(gvt, opcode, engine);
895 struct intel_gvt *gvt = vgpu->gvt; local
899 if (offset + 4 > gvt->device_info.mmio_size) {
908 intel_gvt_mmio_set_cmd_accessible(gvt, offse
1094 struct intel_gvt *gvt = s->vgpu->gvt; local
2716 add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e) argument
3103 struct intel_gvt *gvt = vgpu->gvt; local
3207 init_cmd_table(struct intel_gvt *gvt) argument
3236 clean_cmd_table(struct intel_gvt *gvt) argument
3248 intel_gvt_clean_cmd_parser(struct intel_gvt *gvt) argument
3253 intel_gvt_init_cmd_parser(struct intel_gvt *gvt) argument
[all...]
H A Dgtt.h223 int intel_gvt_init_gtt(struct intel_gvt *gvt);
224 void intel_gvt_clean_gtt(struct intel_gvt *gvt);
291 void intel_gvt_restore_ggtt(struct intel_gvt *gvt);
H A Dedid.c37 #include "gvt.h"
141 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
282 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
379 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
409 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
481 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
H A Ddisplay.c37 #include "gvt.h"
64 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
76 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
176 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
532 intel_gvt_request_service(vgpu->gvt,
541 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
624 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
656 for_each_pipe(vgpu->gvt->gt->i915, pipe)
671 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
760 struct drm_i915_private *dev_priv = vgpu->gvt
[all...]
H A Dfb_decoder.c38 #include "gvt.h"
150 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
206 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
336 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;

Completed in 236 milliseconds

12