Lines Matching refs:man
155 struct vmw_cmdbuf_man *man; member
197 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
199 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
207 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible) in vmw_cmdbuf_cur_lock() argument
210 if (mutex_lock_interruptible(&man->cur_mutex)) in vmw_cmdbuf_cur_lock()
213 mutex_lock(&man->cur_mutex); in vmw_cmdbuf_cur_lock()
224 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man) in vmw_cmdbuf_cur_unlock() argument
226 mutex_unlock(&man->cur_mutex); in vmw_cmdbuf_cur_unlock()
245 dma_pool_free(header->man->dheaders, dheader, header->handle); in vmw_cmdbuf_header_inline_free()
259 struct vmw_cmdbuf_man *man = header->man; in __vmw_cmdbuf_header_free() local
261 lockdep_assert_held_once(&man->lock); in __vmw_cmdbuf_header_free()
269 wake_up_all(&man->alloc_queue); in __vmw_cmdbuf_header_free()
271 dma_pool_free(man->headers, header->cb_header, in __vmw_cmdbuf_header_free()
284 struct vmw_cmdbuf_man *man = header->man; in vmw_cmdbuf_header_free() local
291 spin_lock(&man->lock); in vmw_cmdbuf_header_free()
293 spin_unlock(&man->lock); in vmw_cmdbuf_header_free()
304 struct vmw_cmdbuf_man *man = header->man; in vmw_cmdbuf_header_submit() local
308 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val); in vmw_cmdbuf_header_submit()
312 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val); in vmw_cmdbuf_header_submit()
340 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_ctx_submit() argument
343 while (ctx->num_hw_submitted < man->max_hw_submitted && in vmw_cmdbuf_ctx_submit()
378 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_ctx_process() argument
384 vmw_cmdbuf_ctx_submit(man, ctx); in vmw_cmdbuf_ctx_process()
393 wake_up_all(&man->idle_queue); in vmw_cmdbuf_ctx_process()
402 list_add_tail(&entry->list, &man->error); in vmw_cmdbuf_ctx_process()
403 schedule_work(&man->work); in vmw_cmdbuf_ctx_process()
420 vmw_cmdbuf_ctx_submit(man, ctx); in vmw_cmdbuf_ctx_process()
435 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) in vmw_cmdbuf_man_process() argument
443 for_each_cmdbuf_ctx(man, i, ctx) in vmw_cmdbuf_man_process()
444 vmw_cmdbuf_ctx_process(man, ctx, ¬empty); in vmw_cmdbuf_man_process()
446 if (man->irq_on && !notempty) { in vmw_cmdbuf_man_process()
447 vmw_generic_waiter_remove(man->dev_priv, in vmw_cmdbuf_man_process()
449 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_man_process()
450 man->irq_on = false; in vmw_cmdbuf_man_process()
451 } else if (!man->irq_on && notempty) { in vmw_cmdbuf_man_process()
452 vmw_generic_waiter_add(man->dev_priv, in vmw_cmdbuf_man_process()
454 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_man_process()
455 man->irq_on = true; in vmw_cmdbuf_man_process()
475 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_ctx_add() argument
482 list_add_tail(&header->list, &man->ctx[cb_context].submitted); in vmw_cmdbuf_ctx_add()
484 vmw_cmdbuf_man_process(man); in vmw_cmdbuf_ctx_add()
497 void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man) in vmw_cmdbuf_irqthread() argument
499 spin_lock(&man->lock); in vmw_cmdbuf_irqthread()
500 vmw_cmdbuf_man_process(man); in vmw_cmdbuf_irqthread()
501 spin_unlock(&man->lock); in vmw_cmdbuf_irqthread()
515 struct vmw_cmdbuf_man *man = in vmw_cmdbuf_work_func() local
525 for_each_cmdbuf_ctx(man, i, ctx) in vmw_cmdbuf_work_func()
528 mutex_lock(&man->error_mutex); in vmw_cmdbuf_work_func()
529 spin_lock(&man->lock); in vmw_cmdbuf_work_func()
530 list_for_each_entry_safe(entry, next, &man->error, list) { in vmw_cmdbuf_work_func()
564 if (man->using_mob) in vmw_cmdbuf_work_func()
577 for_each_cmdbuf_ctx(man, i, ctx) in vmw_cmdbuf_work_func()
578 man->ctx[i].block_submission = true; in vmw_cmdbuf_work_func()
580 spin_unlock(&man->lock); in vmw_cmdbuf_work_func()
583 if (global_block && vmw_cmdbuf_preempt(man, 0)) in vmw_cmdbuf_work_func()
586 spin_lock(&man->lock); in vmw_cmdbuf_work_func()
587 for_each_cmdbuf_ctx(man, i, ctx) { in vmw_cmdbuf_work_func()
589 vmw_cmdbuf_ctx_process(man, ctx, &dummy); in vmw_cmdbuf_work_func()
606 vmw_cmdbuf_man_process(man); in vmw_cmdbuf_work_func()
607 spin_unlock(&man->lock); in vmw_cmdbuf_work_func()
609 if (global_block && vmw_cmdbuf_startstop(man, 0, true)) in vmw_cmdbuf_work_func()
614 vmw_cmd_send_fence(man->dev_priv, &dummy); in vmw_cmdbuf_work_func()
615 wake_up_all(&man->idle_queue); in vmw_cmdbuf_work_func()
618 mutex_unlock(&man->error_mutex); in vmw_cmdbuf_work_func()
628 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_man_idle() argument
635 spin_lock(&man->lock); in vmw_cmdbuf_man_idle()
636 vmw_cmdbuf_man_process(man); in vmw_cmdbuf_man_idle()
637 for_each_cmdbuf_ctx(man, i, ctx) { in vmw_cmdbuf_man_idle()
644 idle = list_empty(&man->error); in vmw_cmdbuf_man_idle()
647 spin_unlock(&man->lock); in vmw_cmdbuf_man_idle()
661 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) in __vmw_cmdbuf_cur_flush() argument
663 struct vmw_cmdbuf_header *cur = man->cur; in __vmw_cmdbuf_cur_flush()
665 lockdep_assert_held_once(&man->cur_mutex); in __vmw_cmdbuf_cur_flush()
670 spin_lock(&man->lock); in __vmw_cmdbuf_cur_flush()
671 if (man->cur_pos == 0) { in __vmw_cmdbuf_cur_flush()
676 man->cur->cb_header->length = man->cur_pos; in __vmw_cmdbuf_cur_flush()
677 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0); in __vmw_cmdbuf_cur_flush()
679 spin_unlock(&man->lock); in __vmw_cmdbuf_cur_flush()
680 man->cur = NULL; in __vmw_cmdbuf_cur_flush()
681 man->cur_pos = 0; in __vmw_cmdbuf_cur_flush()
694 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_cur_flush() argument
697 int ret = vmw_cmdbuf_cur_lock(man, interruptible); in vmw_cmdbuf_cur_flush()
702 __vmw_cmdbuf_cur_flush(man); in vmw_cmdbuf_cur_flush()
703 vmw_cmdbuf_cur_unlock(man); in vmw_cmdbuf_cur_flush()
719 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, in vmw_cmdbuf_idle() argument
724 ret = vmw_cmdbuf_cur_flush(man, interruptible); in vmw_cmdbuf_idle()
725 vmw_generic_waiter_add(man->dev_priv, in vmw_cmdbuf_idle()
727 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_idle()
731 (man->idle_queue, vmw_cmdbuf_man_idle(man, true), in vmw_cmdbuf_idle()
735 (man->idle_queue, vmw_cmdbuf_man_idle(man, true), in vmw_cmdbuf_idle()
738 vmw_generic_waiter_remove(man->dev_priv, in vmw_cmdbuf_idle()
740 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_idle()
742 if (!vmw_cmdbuf_man_idle(man, true)) in vmw_cmdbuf_idle()
763 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_try_alloc() argument
772 spin_lock(&man->lock); in vmw_cmdbuf_try_alloc()
773 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); in vmw_cmdbuf_try_alloc()
775 vmw_cmdbuf_man_process(man); in vmw_cmdbuf_try_alloc()
776 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); in vmw_cmdbuf_try_alloc()
779 spin_unlock(&man->lock); in vmw_cmdbuf_try_alloc()
797 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_alloc_space() argument
813 if (mutex_lock_interruptible(&man->space_mutex)) in vmw_cmdbuf_alloc_space()
816 mutex_lock(&man->space_mutex); in vmw_cmdbuf_alloc_space()
820 if (vmw_cmdbuf_try_alloc(man, &info)) in vmw_cmdbuf_alloc_space()
823 vmw_generic_waiter_add(man->dev_priv, in vmw_cmdbuf_alloc_space()
825 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_alloc_space()
831 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); in vmw_cmdbuf_alloc_space()
834 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER, in vmw_cmdbuf_alloc_space()
835 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_alloc_space()
836 mutex_unlock(&man->space_mutex); in vmw_cmdbuf_alloc_space()
840 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); in vmw_cmdbuf_alloc_space()
842 vmw_generic_waiter_remove(man->dev_priv, in vmw_cmdbuf_alloc_space()
844 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_alloc_space()
847 mutex_unlock(&man->space_mutex); in vmw_cmdbuf_alloc_space()
861 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_space_pool() argument
870 if (!man->has_pool) in vmw_cmdbuf_space_pool()
873 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible); in vmw_cmdbuf_space_pool()
878 header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL, in vmw_cmdbuf_space_pool()
888 header->cmd = man->map + offset; in vmw_cmdbuf_space_pool()
889 if (man->using_mob) { in vmw_cmdbuf_space_pool()
891 cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start; in vmw_cmdbuf_space_pool()
894 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset; in vmw_cmdbuf_space_pool()
900 spin_lock(&man->lock); in vmw_cmdbuf_space_pool()
902 spin_unlock(&man->lock); in vmw_cmdbuf_space_pool()
915 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_space_inline() argument
925 dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL, in vmw_cmdbuf_space_inline()
956 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_alloc() argument
970 ret = vmw_cmdbuf_space_inline(man, header, size); in vmw_cmdbuf_alloc()
972 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible); in vmw_cmdbuf_alloc()
979 header->man = man; in vmw_cmdbuf_alloc()
999 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_reserve_cur() argument
1007 if (vmw_cmdbuf_cur_lock(man, interruptible)) in vmw_cmdbuf_reserve_cur()
1010 cur = man->cur; in vmw_cmdbuf_reserve_cur()
1011 if (cur && (size + man->cur_pos > cur->size || in vmw_cmdbuf_reserve_cur()
1014 __vmw_cmdbuf_cur_flush(man); in vmw_cmdbuf_reserve_cur()
1016 if (!man->cur) { in vmw_cmdbuf_reserve_cur()
1017 ret = vmw_cmdbuf_alloc(man, in vmw_cmdbuf_reserve_cur()
1018 max_t(size_t, size, man->default_size), in vmw_cmdbuf_reserve_cur()
1019 interruptible, &man->cur); in vmw_cmdbuf_reserve_cur()
1021 vmw_cmdbuf_cur_unlock(man); in vmw_cmdbuf_reserve_cur()
1025 cur = man->cur; in vmw_cmdbuf_reserve_cur()
1035 return (void *) (man->cur->cmd + man->cur_pos); in vmw_cmdbuf_reserve_cur()
1045 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_commit_cur() argument
1048 struct vmw_cmdbuf_header *cur = man->cur; in vmw_cmdbuf_commit_cur()
1050 lockdep_assert_held_once(&man->cur_mutex); in vmw_cmdbuf_commit_cur()
1053 man->cur_pos += size; in vmw_cmdbuf_commit_cur()
1057 __vmw_cmdbuf_cur_flush(man); in vmw_cmdbuf_commit_cur()
1058 vmw_cmdbuf_cur_unlock(man); in vmw_cmdbuf_commit_cur()
1074 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, in vmw_cmdbuf_reserve() argument
1079 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible); in vmw_cmdbuf_reserve()
1102 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, in vmw_cmdbuf_commit() argument
1106 vmw_cmdbuf_commit_cur(man, size, flush); in vmw_cmdbuf_commit()
1110 (void) vmw_cmdbuf_cur_lock(man, false); in vmw_cmdbuf_commit()
1111 __vmw_cmdbuf_cur_flush(man); in vmw_cmdbuf_commit()
1113 man->cur = header; in vmw_cmdbuf_commit()
1114 man->cur_pos = size; in vmw_cmdbuf_commit()
1118 __vmw_cmdbuf_cur_flush(man); in vmw_cmdbuf_commit()
1119 vmw_cmdbuf_cur_unlock(man); in vmw_cmdbuf_commit()
1132 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_send_device_command() argument
1138 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header); in vmw_cmdbuf_send_device_command()
1146 spin_lock(&man->lock); in vmw_cmdbuf_send_device_command()
1148 spin_unlock(&man->lock); in vmw_cmdbuf_send_device_command()
1169 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context) in vmw_cmdbuf_preempt() argument
1180 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); in vmw_cmdbuf_preempt()
1194 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, in vmw_cmdbuf_startstop() argument
1206 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); in vmw_cmdbuf_startstop()
1221 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size) in vmw_cmdbuf_set_pool_size() argument
1223 struct vmw_private *dev_priv = man->dev_priv; in vmw_cmdbuf_set_pool_size()
1227 if (man->has_pool) in vmw_cmdbuf_set_pool_size()
1232 man->map = dma_alloc_coherent(dev_priv->drm.dev, size, in vmw_cmdbuf_set_pool_size()
1233 &man->handle, GFP_KERNEL); in vmw_cmdbuf_set_pool_size()
1234 if (man->map) { in vmw_cmdbuf_set_pool_size()
1235 man->using_mob = false; in vmw_cmdbuf_set_pool_size()
1249 &man->cmd_space); in vmw_cmdbuf_set_pool_size()
1253 man->using_mob = true; in vmw_cmdbuf_set_pool_size()
1254 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT, in vmw_cmdbuf_set_pool_size()
1255 &man->map_obj); in vmw_cmdbuf_set_pool_size()
1259 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy); in vmw_cmdbuf_set_pool_size()
1262 man->size = size; in vmw_cmdbuf_set_pool_size()
1263 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT); in vmw_cmdbuf_set_pool_size()
1265 man->has_pool = true; in vmw_cmdbuf_set_pool_size()
1273 man->default_size = VMW_CMDBUF_INLINE_SIZE; in vmw_cmdbuf_set_pool_size()
1276 (man->using_mob) ? "MOB" : "DMA"); in vmw_cmdbuf_set_pool_size()
1281 if (man->using_mob) { in vmw_cmdbuf_set_pool_size()
1282 ttm_bo_put(man->cmd_space); in vmw_cmdbuf_set_pool_size()
1283 man->cmd_space = NULL; in vmw_cmdbuf_set_pool_size()
1301 struct vmw_cmdbuf_man *man; in vmw_cmdbuf_man_create() local
1309 man = kzalloc(sizeof(*man), GFP_KERNEL); in vmw_cmdbuf_man_create()
1310 if (!man) in vmw_cmdbuf_man_create()
1313 man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ? in vmw_cmdbuf_man_create()
1315 man->headers = dma_pool_create("vmwgfx cmdbuf", in vmw_cmdbuf_man_create()
1319 if (!man->headers) { in vmw_cmdbuf_man_create()
1324 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf", in vmw_cmdbuf_man_create()
1328 if (!man->dheaders) { in vmw_cmdbuf_man_create()
1333 for_each_cmdbuf_ctx(man, i, ctx) in vmw_cmdbuf_man_create()
1336 INIT_LIST_HEAD(&man->error); in vmw_cmdbuf_man_create()
1337 spin_lock_init(&man->lock); in vmw_cmdbuf_man_create()
1338 mutex_init(&man->cur_mutex); in vmw_cmdbuf_man_create()
1339 mutex_init(&man->space_mutex); in vmw_cmdbuf_man_create()
1340 mutex_init(&man->error_mutex); in vmw_cmdbuf_man_create()
1341 man->default_size = VMW_CMDBUF_INLINE_SIZE; in vmw_cmdbuf_man_create()
1342 init_waitqueue_head(&man->alloc_queue); in vmw_cmdbuf_man_create()
1343 init_waitqueue_head(&man->idle_queue); in vmw_cmdbuf_man_create()
1344 man->dev_priv = dev_priv; in vmw_cmdbuf_man_create()
1345 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1; in vmw_cmdbuf_man_create()
1346 INIT_WORK(&man->work, &vmw_cmdbuf_work_func); in vmw_cmdbuf_man_create()
1349 ret = vmw_cmdbuf_startstop(man, 0, true); in vmw_cmdbuf_man_create()
1352 vmw_cmdbuf_man_destroy(man); in vmw_cmdbuf_man_create()
1356 return man; in vmw_cmdbuf_man_create()
1359 dma_pool_destroy(man->headers); in vmw_cmdbuf_man_create()
1361 kfree(man); in vmw_cmdbuf_man_create()
1377 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) in vmw_cmdbuf_remove_pool() argument
1379 if (!man->has_pool) in vmw_cmdbuf_remove_pool()
1382 man->has_pool = false; in vmw_cmdbuf_remove_pool()
1383 man->default_size = VMW_CMDBUF_INLINE_SIZE; in vmw_cmdbuf_remove_pool()
1384 (void) vmw_cmdbuf_idle(man, false, 10*HZ); in vmw_cmdbuf_remove_pool()
1385 if (man->using_mob) { in vmw_cmdbuf_remove_pool()
1386 (void) ttm_bo_kunmap(&man->map_obj); in vmw_cmdbuf_remove_pool()
1387 ttm_bo_put(man->cmd_space); in vmw_cmdbuf_remove_pool()
1388 man->cmd_space = NULL; in vmw_cmdbuf_remove_pool()
1390 dma_free_coherent(man->dev_priv->drm.dev, in vmw_cmdbuf_remove_pool()
1391 man->size, man->map, man->handle); in vmw_cmdbuf_remove_pool()
1402 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) in vmw_cmdbuf_man_destroy() argument
1404 WARN_ON_ONCE(man->has_pool); in vmw_cmdbuf_man_destroy()
1405 (void) vmw_cmdbuf_idle(man, false, 10*HZ); in vmw_cmdbuf_man_destroy()
1407 if (vmw_cmdbuf_startstop(man, 0, false)) in vmw_cmdbuf_man_destroy()
1410 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, in vmw_cmdbuf_man_destroy()
1411 &man->dev_priv->error_waiters); in vmw_cmdbuf_man_destroy()
1412 (void) cancel_work_sync(&man->work); in vmw_cmdbuf_man_destroy()
1413 dma_pool_destroy(man->dheaders); in vmw_cmdbuf_man_destroy()
1414 dma_pool_destroy(man->headers); in vmw_cmdbuf_man_destroy()
1415 mutex_destroy(&man->cur_mutex); in vmw_cmdbuf_man_destroy()
1416 mutex_destroy(&man->space_mutex); in vmw_cmdbuf_man_destroy()
1417 mutex_destroy(&man->error_mutex); in vmw_cmdbuf_man_destroy()
1418 kfree(man); in vmw_cmdbuf_man_destroy()