Lines Matching refs:mm

37 static void update_mqd(struct mqd_manager *mm, void *mqd,
41 static uint64_t mqd_stride_v9(struct mqd_manager *mm,
44 if (mm->dev->kfd->cwsr_enabled &&
49 return mm->mqd_size;
62 static void update_cu_mask(struct mqd_manager *mm, void *mqd,
71 mqd_symmetrically_map_cu_mask(mm,
80 if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) {
158 static void init_mqd(struct mqd_manager *mm, void **mqd,
207 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) {
223 update_mqd(mm, m, q, NULL);
226 static int load_mqd(struct mqd_manager *mm, void *mqd,
233 return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
238 static void update_mqd(struct mqd_manager *mm, void *mqd,
299 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
302 if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3))
303 update_cu_mask(mm, mqd, minfo, 0);
306 if (minfo && KFD_GC_VERSION(mm->dev) >= IP_VERSION(9, 4, 2)) {
326 static int get_wave_state(struct mqd_manager *mm, void *mqd,
362 static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
369 static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
381 static void restore_mqd(struct mqd_manager *mm, void **mqd,
413 static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
419 init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
427 static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
439 err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
446 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
460 mm->update_mqd(mm, m, q, NULL);
465 static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
492 static void checkpoint_mqd_sdma(struct mqd_manager *mm,
504 static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
528 static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd,
539 for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
540 kfd_get_hiq_xcc_mqd(mm->dev, &xcc_mqd_mem_obj, xcc);
542 init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
547 m->cp_mqd_stride_size = kfd_hiq_mqd_stride(mm->dev);
559 static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd,
563 uint32_t xcc_mask = mm->dev->xcc_mask;
566 uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
570 err = mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, xcc_mqd,
583 static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
587 uint32_t xcc_mask = mm->dev->xcc_mask;
589 uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
599 err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, xcc_id);
621 static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
630 uint64_t offset = mm->mqd_stride(mm, q);
631 uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++;
634 for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
637 init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
645 if (mm->dev->kfd->cwsr_enabled &&
661 NUM_XCC(mm->dev->xcc_mask);
687 static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
692 uint64_t size = mm->mqd_stride(mm, q);
694 for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
696 update_mqd(mm, m, q, minfo);
698 update_cu_mask(mm, mqd, minfo, xcc);
720 static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
724 uint32_t xcc_mask = mm->dev->xcc_mask;
735 err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd,
748 static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
754 uint32_t xcc_mask = mm->dev->xcc_mask;
757 uint64_t mqd_stride_size = mm->mqd_stride(mm, p);
761 err = mm->dev->kfd2kgd->hqd_load(
762 mm->dev->adev, xcc_mqd, pipe_id, queue_id,
775 static int get_wave_state_v9_4_3(struct mqd_manager *mm, void *mqd,
784 uint64_t mqd_stride_size = mm->mqd_stride(mm, q);
787 for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
792 err = get_wave_state(mm, xcc_mqd, q, xcc_ctl_stack,