Lines Matching refs:man

141  * @man: The command buffer manager.
154 struct vmw_cmdbuf_man *man;
196 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
198 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
203 * @man: The range manager.
206 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
209 if (mutex_lock_interruptible(&man->cur_mutex))
212 mutex_lock(&man->cur_mutex);
221 * @man: The range manager.
223 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
225 mutex_unlock(&man->cur_mutex);
244 dma_pool_free(header->man->dheaders, dheader, header->handle);
254 * For internal use. Must be called with man::lock held.
258 struct vmw_cmdbuf_man *man = header->man;
260 lockdep_assert_held_once(&man->lock);
268 wake_up_all(&man->alloc_queue);
270 dma_pool_free(man->headers, header->cb_header,
283 struct vmw_cmdbuf_man *man = header->man;
290 spin_lock(&man->lock);
292 spin_unlock(&man->lock);
303 struct vmw_cmdbuf_man *man = header->man;
307 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
311 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
333 * @man: The command buffer manager.
339 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
342 while (ctx->num_hw_submitted < man->max_hw_submitted &&
369 * @man: The command buffer manager.
377 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
383 vmw_cmdbuf_ctx_submit(man, ctx);
392 wake_up_all(&man->idle_queue);
401 list_add_tail(&entry->list, &man->error);
402 schedule_work(&man->work);
419 vmw_cmdbuf_ctx_submit(man, ctx);
428 * @man: The command buffer manager.
434 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
442 for_each_cmdbuf_ctx(man, i, ctx)
443 vmw_cmdbuf_ctx_process(man, ctx, &notempty);
445 if (man->irq_on && !notempty) {
446 vmw_generic_waiter_remove(man->dev_priv,
448 &man->dev_priv->cmdbuf_waiters);
449 man->irq_on = false;
450 } else if (!man->irq_on && notempty) {
451 vmw_generic_waiter_add(man->dev_priv,
453 &man->dev_priv->cmdbuf_waiters);
454 man->irq_on = true;
465 * @man: The command buffer manager.
472 * @man->lock needs to be held when calling this function.
474 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
481 list_add_tail(&header->list, &man->ctx[cb_context].submitted);
483 vmw_cmdbuf_man_process(man);
490 * @man: Pointer to the command buffer manager.
496 void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
498 spin_lock(&man->lock);
499 vmw_cmdbuf_man_process(man);
500 spin_unlock(&man->lock);
514 struct vmw_cmdbuf_man *man =
524 for_each_cmdbuf_ctx(man, i, ctx)
527 mutex_lock(&man->error_mutex);
528 spin_lock(&man->lock);
529 list_for_each_entry_safe(entry, next, &man->error, list) {
563 if (man->using_mob)
576 for_each_cmdbuf_ctx(man, i, ctx)
577 man->ctx[i].block_submission = true;
579 spin_unlock(&man->lock);
582 if (global_block && vmw_cmdbuf_preempt(man, 0))
585 spin_lock(&man->lock);
586 for_each_cmdbuf_ctx(man, i, ctx) {
588 vmw_cmdbuf_ctx_process(man, ctx, &dummy);
605 vmw_cmdbuf_man_process(man);
606 spin_unlock(&man->lock);
608 if (global_block && vmw_cmdbuf_startstop(man, 0, true))
613 vmw_cmd_send_fence(man->dev_priv, &dummy);
614 wake_up_all(&man->idle_queue);
617 mutex_unlock(&man->error_mutex);
623 * @man: The command buffer manager.
627 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
634 spin_lock(&man->lock);
635 vmw_cmdbuf_man_process(man);
636 for_each_cmdbuf_ctx(man, i, ctx) {
643 idle = list_empty(&man->error);
646 spin_unlock(&man->lock);
655 * @man: The command buffer manager.
658 * is automatically allocated when needed. Call with @man->cur_mutex held.
660 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
662 struct vmw_cmdbuf_header *cur = man->cur;
664 lockdep_assert_held_once(&man->cur_mutex);
669 spin_lock(&man->lock);
670 if (man->cur_pos == 0) {
675 man->cur->cb_header->length = man->cur_pos;
676 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
678 spin_unlock(&man->lock);
679 man->cur = NULL;
680 man->cur_pos = 0;
687 * @man: The command buffer manager.
693 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
696 int ret = vmw_cmdbuf_cur_lock(man, interruptible);
701 __vmw_cmdbuf_cur_flush(man);
702 vmw_cmdbuf_cur_unlock(man);
710 * @man: The command buffer manager.
718 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
723 ret = vmw_cmdbuf_cur_flush(man, interruptible);
724 vmw_generic_waiter_add(man->dev_priv,
726 &man->dev_priv->cmdbuf_waiters);
730 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
734 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
737 vmw_generic_waiter_remove(man->dev_priv,
739 &man->dev_priv->cmdbuf_waiters);
741 if (!vmw_cmdbuf_man_idle(man, true))
755 * @man: The command buffer manager.
762 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
771 spin_lock(&man->lock);
772 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
774 vmw_cmdbuf_man_process(man);
775 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
778 spin_unlock(&man->lock);
787 * @man: The command buffer manager.
796 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
812 if (mutex_lock_interruptible(&man->space_mutex))
815 mutex_lock(&man->space_mutex);
819 if (vmw_cmdbuf_try_alloc(man, &info))
822 vmw_generic_waiter_add(man->dev_priv,
824 &man->dev_priv->cmdbuf_waiters);
830 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
833 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
834 &man->dev_priv->cmdbuf_waiters);
835 mutex_unlock(&man->space_mutex);
839 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
841 vmw_generic_waiter_remove(man->dev_priv,
843 &man->dev_priv->cmdbuf_waiters);
846 mutex_unlock(&man->space_mutex);
855 * @man: The command buffer manager.
860 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
869 if (!man->has_pool)
872 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
877 header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
887 header->cmd = man->map + offset;
888 if (man->using_mob) {
890 cb_hdr->ptr.mob.mobid = man->cmd_space->tbo.resource->start;
893 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
899 spin_lock(&man->lock);
901 spin_unlock(&man->lock);
910 * @man: The command buffer manager.
914 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
924 dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
946 * @man: The command buffer manager.
955 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
969 ret = vmw_cmdbuf_space_inline(man, header, size);
971 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
978 header->man = man;
990 * @man: The command buffer manager.
998 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
1006 if (vmw_cmdbuf_cur_lock(man, interruptible))
1009 cur = man->cur;
1010 if (cur && (size + man->cur_pos > cur->size ||
1013 __vmw_cmdbuf_cur_flush(man);
1015 if (!man->cur) {
1016 ret = vmw_cmdbuf_alloc(man,
1017 max_t(size_t, size, man->default_size),
1018 interruptible, &man->cur);
1020 vmw_cmdbuf_cur_unlock(man);
1024 cur = man->cur;
1034 return (void *) (man->cur->cmd + man->cur_pos);
1040 * @man: The command buffer manager.
1044 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1047 struct vmw_cmdbuf_header *cur = man->cur;
1049 lockdep_assert_held_once(&man->cur_mutex);
1052 man->cur_pos += size;
1056 __vmw_cmdbuf_cur_flush(man);
1057 vmw_cmdbuf_cur_unlock(man);
1063 * @man: The command buffer manager.
1073 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1078 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1095 * @man: The command buffer manager.
1101 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1105 vmw_cmdbuf_commit_cur(man, size, flush);
1109 (void) vmw_cmdbuf_cur_lock(man, false);
1110 __vmw_cmdbuf_cur_flush(man);
1112 man->cur = header;
1113 man->cur_pos = size;
1117 __vmw_cmdbuf_cur_flush(man);
1118 vmw_cmdbuf_cur_unlock(man);
1125 * @man: The command buffer manager.
1131 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1137 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1145 spin_lock(&man->lock);
1147 spin_unlock(&man->lock);
1163 * @man: The command buffer manager.
1168 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1179 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1187 * @man: The command buffer manager.
1193 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1205 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1211 * @man: The command buffer manager.
1220 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
1222 struct vmw_private *dev_priv = man->dev_priv;
1225 if (man->has_pool)
1230 man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
1231 &man->handle, GFP_KERNEL);
1232 if (man->map) {
1233 man->using_mob = false;
1252 ret = vmw_bo_create(dev_priv, &bo_params, &man->cmd_space);
1256 man->map = vmw_bo_map_and_cache(man->cmd_space);
1257 man->using_mob = man->map;
1260 man->size = size;
1261 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1263 man->has_pool = true;
1271 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1274 (man->using_mob) ? "MOB" : "DMA");
1291 struct vmw_cmdbuf_man *man;
1299 man = kzalloc(sizeof(*man), GFP_KERNEL);
1300 if (!man)
1303 man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1305 man->headers = dma_pool_create("vmwgfx cmdbuf",
1309 if (!man->headers) {
1314 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1318 if (!man->dheaders) {
1323 for_each_cmdbuf_ctx(man, i, ctx)
1326 INIT_LIST_HEAD(&man->error);
1327 spin_lock_init(&man->lock);
1328 mutex_init(&man->cur_mutex);
1329 mutex_init(&man->space_mutex);
1330 mutex_init(&man->error_mutex);
1331 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1332 init_waitqueue_head(&man->alloc_queue);
1333 init_waitqueue_head(&man->idle_queue);
1334 man->dev_priv = dev_priv;
1335 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1336 INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1339 ret = vmw_cmdbuf_startstop(man, 0, true);
1342 vmw_cmdbuf_man_destroy(man);
1346 return man;
1349 dma_pool_destroy(man->headers);
1351 kfree(man);
1359 * @man: Pointer to a command buffer manager.
1367 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1369 if (!man->has_pool)
1372 man->has_pool = false;
1373 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1374 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1375 if (man->using_mob)
1376 vmw_bo_unreference(&man->cmd_space);
1378 dma_free_coherent(man->dev_priv->drm.dev,
1379 man->size, man->map, man->handle);
1385 * @man: Pointer to a command buffer manager.
1389 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1391 WARN_ON_ONCE(man->has_pool);
1392 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1394 if (vmw_cmdbuf_startstop(man, 0, false))
1397 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1398 &man->dev_priv->error_waiters);
1399 (void) cancel_work_sync(&man->work);
1400 dma_pool_destroy(man->dheaders);
1401 dma_pool_destroy(man->headers);
1402 mutex_destroy(&man->cur_mutex);
1403 mutex_destroy(&man->space_mutex);
1404 mutex_destroy(&man->error_mutex);
1405 kfree(man);