Lines Matching defs:fman

124 	struct vmw_fence_manager *fman = fman_from_fence(fence);
126 spin_lock(&fman->lock);
128 --fman->num_fence_objects;
129 spin_unlock(&fman->lock);
148 struct vmw_fence_manager *fman = fman_from_fence(fence);
149 struct vmw_private *dev_priv = fman->dev_priv;
183 static void __vmw_fences_update(struct vmw_fence_manager *fman);
190 struct vmw_fence_manager *fman = fman_from_fence(fence);
191 struct vmw_private *dev_priv = fman->dev_priv;
223 #define C (__vmw_fences_update(fman), dma_fence_is_signaled_locked(f))
232 __vmw_fences_update(fman);
300 struct vmw_fence_manager *fman =
308 mutex_lock(&fman->goal_irq_mutex);
310 spin_lock(&fman->lock);
311 list_splice_init(&fman->cleanup_list, &list);
312 seqno_valid = fman->seqno_valid;
313 spin_unlock(&fman->lock);
315 if (!seqno_valid && fman->goal_irq_on) {
316 fman->goal_irq_on = false;
317 vmw_goal_waiter_remove(fman->dev_priv);
319 mutex_unlock(&fman->goal_irq_mutex);
327 * hence fman::lock not held.
340 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
342 if (unlikely(!fman))
345 fman->dev_priv = dev_priv;
346 spin_lock_init(&fman->lock);
347 INIT_LIST_HEAD(&fman->fence_list);
348 INIT_LIST_HEAD(&fman->cleanup_list);
349 INIT_WORK(&fman->work, &vmw_fence_work_func);
350 fman->fifo_down = true;
351 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
353 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
354 fman->event_fence_action_size =
356 mutex_init(&fman->goal_irq_mutex);
357 fman->ctx = dma_fence_context_alloc(1);
359 return fman;
362 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
366 (void) cancel_work_sync(&fman->work);
368 spin_lock(&fman->lock);
369 lists_empty = list_empty(&fman->fence_list) &&
370 list_empty(&fman->cleanup_list);
371 spin_unlock(&fman->lock);
374 kfree(fman);
377 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
383 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
384 fman->ctx, seqno);
388 spin_lock(&fman->lock);
389 if (unlikely(fman->fifo_down)) {
393 list_add_tail(&fence->head, &fman->fence_list);
394 ++fman->num_fence_objects;
397 spin_unlock(&fman->lock);
402 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
409 fman->pending_actions[action->type]--;
418 list_add_tail(&action->head, &fman->cleanup_list);
426 * @fman: Pointer to a fence manager.
438 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
445 if (likely(!fman->seqno_valid))
448 fifo_mem = fman->dev_priv->mmio_virt;
453 fman->seqno_valid = false;
454 list_for_each_entry(fence, &fman->fence_list, head) {
456 fman->seqno_valid = true;
484 struct vmw_fence_manager *fman = fman_from_fence(fence);
491 fifo_mem = fman->dev_priv->mmio_virt;
493 if (likely(fman->seqno_valid &&
498 fman->seqno_valid = true;
503 static void __vmw_fences_update(struct vmw_fence_manager *fman)
509 u32 *fifo_mem = fman->dev_priv->mmio_virt;
513 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
520 vmw_fences_perform_actions(fman, &action_list);
531 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
540 if (!list_empty(&fman->cleanup_list))
541 (void) schedule_work(&fman->work);
544 void vmw_fences_update(struct vmw_fence_manager *fman)
546 spin_lock(&fman->lock);
547 __vmw_fences_update(fman);
548 spin_unlock(&fman->lock);
553 struct vmw_fence_manager *fman = fman_from_fence(fence);
558 vmw_fences_update(fman);
588 int vmw_fence_create(struct vmw_fence_manager *fman,
599 ret = vmw_fence_obj_init(fman, fence, seqno,
617 struct vmw_fence_manager *fman = fman_from_fence(fence);
623 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
624 fman->user_fence_size);
639 struct vmw_fence_manager *fman,
647 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
659 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
670 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
703 ttm_mem_global_free(mem_glob, fman->user_fence_size);
711 * @fman: pointer to a fence manager
717 int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
757 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
764 * restart when we've released the fman->lock.
767 spin_lock(&fman->lock);
768 fman->fifo_down = true;
769 while (!list_empty(&fman->fence_list)) {
771 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
774 spin_unlock(&fman->lock);
785 vmw_fences_perform_actions(fman, &action_list);
790 spin_lock(&fman->lock);
792 spin_unlock(&fman->lock);
795 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
797 spin_lock(&fman->lock);
798 fman->fifo_down = false;
799 spin_unlock(&fman->lock);
900 struct vmw_fence_manager *fman;
909 fman = fman_from_fence(fence);
917 spin_lock(&fman->lock);
919 spin_unlock(&fman->lock);
1005 struct vmw_fence_manager *fman = fman_from_fence(fence);
1008 mutex_lock(&fman->goal_irq_mutex);
1009 spin_lock(&fman->lock);
1011 fman->pending_actions[action->type]++;
1017 vmw_fences_perform_actions(fman, &action_list);
1022 * This function may set fman::seqno_valid, so it must
1028 spin_unlock(&fman->lock);
1031 if (!fman->goal_irq_on) {
1032 fman->goal_irq_on = true;
1033 vmw_goal_waiter_add(fman->dev_priv);
1035 vmw_fences_update(fman);
1037 mutex_unlock(&fman->goal_irq_mutex);
1064 struct vmw_fence_manager *fman = fman_from_fence(fence);
1077 eaction->dev = fman->dev_priv->dev;
1098 struct vmw_fence_manager *fman = fman_from_fence(fence);
1099 struct drm_device *dev = fman->dev_priv->dev;