1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 #include "vmwgfx_drv.h"
28 #include "vmwgfx_resource_priv.h"
29 #include "vmwgfx_so.h"
30 #include "vmwgfx_binding.h"
31
32 /*
33 * The currently only reason we need to keep track of views is that if we
34 * destroy a hardware surface, all views pointing to it must also be destroyed,
35 * otherwise the device will error.
36 * So in particular if a surface is evicted, we must destroy all views pointing
37 * to it, and all context bindings of that view. Similarly we must restore
38 * the view bindings, views and surfaces pointed to by the views when a
39 * context is referenced in the command stream.
40 */
41
42 /**
43 * struct vmw_view - view metadata
44 *
45 * @rcu: RCU callback head
46 * @res: The struct vmw_resource we derive from
47 * @ctx: Non-refcounted pointer to the context this view belongs to.
48 * @srf: Refcounted pointer to the surface pointed to by this view.
49 * @cotable: Refcounted pointer to the cotable holding this view.
50 * @srf_head: List head for the surface-to-view list.
51 * @cotable_head: List head for the cotable-to_view list.
52 * @view_type: View type.
53 * @view_id: User-space per context view id. Currently used also as per
54 * context device view id.
55 * @cmd_size: Size of the SVGA3D define view command that we've copied from the
56 * command stream.
57 * @committed: Whether the view is actually created or pending creation at the
58 * device level.
59 * @cmd: The SVGA3D define view command copied from the command stream.
60 */
61 struct vmw_view {
62 struct rcu_head rcu;
63 struct vmw_resource res;
64 struct vmw_resource *ctx; /* Immutable */
65 struct vmw_resource *srf; /* Immutable */
66 struct vmw_resource *cotable; /* Immutable */
67 struct list_head srf_head; /* Protected by binding_mutex */
68 struct list_head cotable_head; /* Protected by binding_mutex */
69 unsigned view_type; /* Immutable */
70 unsigned view_id; /* Immutable */
71 u32 cmd_size; /* Immutable */
72 bool committed; /* Protected by binding_mutex */
73 u32 cmd[1]; /* Immutable */
74 };
75
76 static int vmw_view_create(struct vmw_resource *res);
77 static int vmw_view_destroy(struct vmw_resource *res);
78 static void vmw_hw_view_destroy(struct vmw_resource *res);
79 static void vmw_view_commit_notify(struct vmw_resource *res,
80 enum vmw_cmdbuf_res_state state);
81
82 static const struct vmw_res_func vmw_view_func = {
83 .res_type = vmw_res_view,
84 .needs_backup = false,
85 .may_evict = false,
86 .type_name = "DX view",
87 .backup_placement = NULL,
88 .create = vmw_view_create,
89 .commit_notify = vmw_view_commit_notify,
90 };
91
92 /**
93 * struct vmw_view_define - view define command body stub
94 *
95 * @view_id: The device id of the view being defined
96 * @sid: The surface id of the view being defined
97 *
98 * This generic struct is used by the code to change @view_id and @sid of a
99 * saved view define command.
100 */
101 struct vmw_view_define {
102 uint32 view_id;
103 uint32 sid;
104 };
105
106 /**
107 * vmw_view - Convert a struct vmw_resource to a struct vmw_view
108 *
109 * @res: Pointer to the resource to convert.
110 *
111 * Returns a pointer to a struct vmw_view.
112 */
vmw_view(struct vmw_resource * res)113 static struct vmw_view *vmw_view(struct vmw_resource *res)
114 {
115 return container_of(res, struct vmw_view, res);
116 }
117
118 /**
119 * vmw_view_commit_notify - Notify that a view operation has been committed to
120 * hardware from a user-supplied command stream.
121 *
122 * @res: Pointer to the view resource.
123 * @state: Indicating whether a creation or removal has been committed.
124 *
125 */
vmw_view_commit_notify(struct vmw_resource * res,enum vmw_cmdbuf_res_state state)126 static void vmw_view_commit_notify(struct vmw_resource *res,
127 enum vmw_cmdbuf_res_state state)
128 {
129 struct vmw_view *view = vmw_view(res);
130 struct vmw_private *dev_priv = res->dev_priv;
131
132 mutex_lock(&dev_priv->binding_mutex);
133 if (state == VMW_CMDBUF_RES_ADD) {
134 struct vmw_surface *srf = vmw_res_to_srf(view->srf);
135
136 list_add_tail(&view->srf_head, &srf->view_list);
137 vmw_cotable_add_resource(view->cotable, &view->cotable_head);
138 view->committed = true;
139 res->id = view->view_id;
140
141 } else {
142 list_del_init(&view->cotable_head);
143 list_del_init(&view->srf_head);
144 view->committed = false;
145 res->id = -1;
146 }
147 mutex_unlock(&dev_priv->binding_mutex);
148 }
149
150 /**
151 * vmw_view_create - Create a hardware view.
152 *
153 * @res: Pointer to the view resource.
154 *
155 * Create a hardware view. Typically used if that view has previously been
156 * destroyed by an eviction operation.
157 */
vmw_view_create(struct vmw_resource * res)158 static int vmw_view_create(struct vmw_resource *res)
159 {
160 struct vmw_view *view = vmw_view(res);
161 struct vmw_surface *srf = vmw_res_to_srf(view->srf);
162 struct vmw_private *dev_priv = res->dev_priv;
163 struct {
164 SVGA3dCmdHeader header;
165 struct vmw_view_define body;
166 } *cmd;
167
168 mutex_lock(&dev_priv->binding_mutex);
169 if (!view->committed) {
170 mutex_unlock(&dev_priv->binding_mutex);
171 return 0;
172 }
173
174 cmd = VMW_CMD_CTX_RESERVE(res->dev_priv, view->cmd_size, view->ctx->id);
175 if (!cmd) {
176 mutex_unlock(&dev_priv->binding_mutex);
177 return -ENOMEM;
178 }
179
180 memcpy(cmd, &view->cmd, view->cmd_size);
181 WARN_ON(cmd->body.view_id != view->view_id);
182 /* Sid may have changed due to surface eviction. */
183 WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
184 cmd->body.sid = view->srf->id;
185 vmw_cmd_commit(res->dev_priv, view->cmd_size);
186 res->id = view->view_id;
187 list_add_tail(&view->srf_head, &srf->view_list);
188 vmw_cotable_add_resource(view->cotable, &view->cotable_head);
189 mutex_unlock(&dev_priv->binding_mutex);
190
191 return 0;
192 }
193
194 /**
195 * vmw_view_destroy - Destroy a hardware view.
196 *
197 * @res: Pointer to the view resource.
198 *
199 * Destroy a hardware view. Typically used on unexpected termination of the
200 * owning process or if the surface the view is pointing to is destroyed.
201 */
vmw_view_destroy(struct vmw_resource * res)202 static int vmw_view_destroy(struct vmw_resource *res)
203 {
204 struct vmw_private *dev_priv = res->dev_priv;
205 struct vmw_view *view = vmw_view(res);
206 struct {
207 SVGA3dCmdHeader header;
208 union vmw_view_destroy body;
209 } *cmd;
210
211 lockdep_assert_held_once(&dev_priv->binding_mutex);
212 vmw_binding_res_list_scrub(&res->binding_head);
213
214 if (!view->committed || res->id == -1)
215 return 0;
216
217 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), view->ctx->id);
218 if (!cmd)
219 return -ENOMEM;
220
221 cmd->header.id = vmw_view_destroy_cmds[view->view_type];
222 cmd->header.size = sizeof(cmd->body);
223 cmd->body.view_id = view->view_id;
224 vmw_cmd_commit(dev_priv, sizeof(*cmd));
225 res->id = -1;
226 list_del_init(&view->cotable_head);
227 list_del_init(&view->srf_head);
228
229 return 0;
230 }
231
232 /**
233 * vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup.
234 *
235 * @res: Pointer to the view resource.
236 *
237 * Destroy a hardware view if it's still present.
238 */
vmw_hw_view_destroy(struct vmw_resource * res)239 static void vmw_hw_view_destroy(struct vmw_resource *res)
240 {
241 struct vmw_private *dev_priv = res->dev_priv;
242
243 mutex_lock(&dev_priv->binding_mutex);
244 WARN_ON(vmw_view_destroy(res));
245 res->id = -1;
246 mutex_unlock(&dev_priv->binding_mutex);
247 }
248
249 /**
250 * vmw_view_key - Compute a view key suitable for the cmdbuf resource manager
251 *
252 * @user_key: The user-space id used for the view.
253 * @view_type: The view type.
254 *
255 * Destroy a hardware view if it's still present.
256 */
vmw_view_key(u32 user_key,enum vmw_view_type view_type)257 static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type)
258 {
259 return user_key | (view_type << 20);
260 }
261
262 /**
263 * vmw_view_id_ok - Basic view id and type range checks.
264 *
265 * @user_key: The user-space id used for the view.
266 * @view_type: The view type.
267 *
268 * Checks that the view id and type (typically provided by user-space) is
269 * valid.
270 */
vmw_view_id_ok(u32 user_key,enum vmw_view_type view_type)271 static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
272 {
273 return (user_key < SVGA_COTABLE_MAX_IDS &&
274 view_type < vmw_view_max);
275 }
276
277 /**
278 * vmw_view_res_free - resource res_free callback for view resources
279 *
280 * @res: Pointer to a struct vmw_resource
281 *
282 * Frees memory held by the struct vmw_view.
283 */
vmw_view_res_free(struct vmw_resource * res)284 static void vmw_view_res_free(struct vmw_resource *res)
285 {
286 struct vmw_view *view = vmw_view(res);
287
288 vmw_resource_unreference(&view->cotable);
289 vmw_resource_unreference(&view->srf);
290 kfree_rcu(view, rcu);
291 }
292
293 /**
294 * vmw_view_add - Create a view resource and stage it for addition
295 * as a command buffer managed resource.
296 *
297 * @man: Pointer to the compat shader manager identifying the shader namespace.
298 * @ctx: Pointer to a struct vmw_resource identifying the active context.
299 * @srf: Pointer to a struct vmw_resource identifying the surface the view
300 * points to.
301 * @view_type: The view type deduced from the view create command.
302 * @user_key: The key that is used to identify the shader. The key is
303 * unique to the view type and to the context.
304 * @cmd: Pointer to the view create command in the command stream.
305 * @cmd_size: Size of the view create command in the command stream.
306 * @list: Caller's list of staged command buffer resource actions.
307 */
vmw_view_add(struct vmw_cmdbuf_res_manager * man,struct vmw_resource * ctx,struct vmw_resource * srf,enum vmw_view_type view_type,u32 user_key,const void * cmd,size_t cmd_size,struct list_head * list)308 int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
309 struct vmw_resource *ctx,
310 struct vmw_resource *srf,
311 enum vmw_view_type view_type,
312 u32 user_key,
313 const void *cmd,
314 size_t cmd_size,
315 struct list_head *list)
316 {
317 static const size_t vmw_view_define_sizes[] = {
318 [vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
319 [vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
320 [vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView),
321 [vmw_view_ua] = sizeof(SVGA3dCmdDXDefineUAView)
322 };
323
324 struct vmw_private *dev_priv = ctx->dev_priv;
325 struct vmw_resource *res;
326 struct vmw_view *view;
327 size_t size;
328 int ret;
329
330 if (cmd_size != vmw_view_define_sizes[view_type] +
331 sizeof(SVGA3dCmdHeader)) {
332 VMW_DEBUG_USER("Illegal view create command size.\n");
333 return -EINVAL;
334 }
335
336 if (!vmw_view_id_ok(user_key, view_type)) {
337 VMW_DEBUG_USER("Illegal view add view id.\n");
338 return -EINVAL;
339 }
340
341 size = offsetof(struct vmw_view, cmd) + cmd_size;
342
343 view = kmalloc(size, GFP_KERNEL);
344 if (!view) {
345 return -ENOMEM;
346 }
347
348 res = &view->res;
349 view->ctx = ctx;
350 view->srf = vmw_resource_reference(srf);
351 view->cotable = vmw_resource_reference
352 (vmw_context_cotable(ctx, vmw_view_cotables[view_type]));
353 view->view_type = view_type;
354 view->view_id = user_key;
355 view->cmd_size = cmd_size;
356 view->committed = false;
357 INIT_LIST_HEAD(&view->srf_head);
358 INIT_LIST_HEAD(&view->cotable_head);
359 memcpy(&view->cmd, cmd, cmd_size);
360 ret = vmw_resource_init(dev_priv, res, true,
361 vmw_view_res_free, &vmw_view_func);
362 if (ret)
363 goto out_resource_init;
364
365 ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view,
366 vmw_view_key(user_key, view_type),
367 res, list);
368 if (ret)
369 goto out_resource_init;
370
371 res->id = view->view_id;
372 res->hw_destroy = vmw_hw_view_destroy;
373
374 out_resource_init:
375 vmw_resource_unreference(&res);
376
377 return ret;
378 }
379
380 /**
381 * vmw_view_remove - Stage a view for removal.
382 *
383 * @man: Pointer to the view manager identifying the shader namespace.
384 * @user_key: The key that is used to identify the view. The key is
385 * unique to the view type.
386 * @view_type: View type
387 * @list: Caller's list of staged command buffer resource actions.
388 * @res_p: If the resource is in an already committed state, points to the
389 * struct vmw_resource on successful return. The pointer will be
390 * non ref-counted.
391 */
vmw_view_remove(struct vmw_cmdbuf_res_manager * man,u32 user_key,enum vmw_view_type view_type,struct list_head * list,struct vmw_resource ** res_p)392 int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
393 u32 user_key, enum vmw_view_type view_type,
394 struct list_head *list,
395 struct vmw_resource **res_p)
396 {
397 if (!vmw_view_id_ok(user_key, view_type)) {
398 VMW_DEBUG_USER("Illegal view remove view id.\n");
399 return -EINVAL;
400 }
401
402 return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view,
403 vmw_view_key(user_key, view_type),
404 list, res_p);
405 }
406
407 /**
408 * vmw_view_cotable_list_destroy - Evict all views belonging to a cotable.
409 *
410 * @dev_priv: Pointer to a device private struct.
411 * @list: List of views belonging to a cotable.
412 * @readback: Unused. Needed for function interface only.
413 *
414 * This function evicts all views belonging to a cotable.
415 * It must be called with the binding_mutex held, and the caller must hold
416 * a reference to the view resource. This is typically called before the
417 * cotable is paged out.
418 */
vmw_view_cotable_list_destroy(struct vmw_private * dev_priv,struct list_head * list,bool readback)419 void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
420 struct list_head *list,
421 bool readback)
422 {
423 struct vmw_view *entry, *next;
424
425 lockdep_assert_held_once(&dev_priv->binding_mutex);
426
427 list_for_each_entry_safe(entry, next, list, cotable_head)
428 WARN_ON(vmw_view_destroy(&entry->res));
429 }
430
431 /**
432 * vmw_view_surface_list_destroy - Evict all views pointing to a surface
433 *
434 * @dev_priv: Pointer to a device private struct.
435 * @list: List of views pointing to a surface.
436 *
437 * This function evicts all views pointing to a surface. This is typically
438 * called before the surface is evicted.
439 */
vmw_view_surface_list_destroy(struct vmw_private * dev_priv,struct list_head * list)440 void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
441 struct list_head *list)
442 {
443 struct vmw_view *entry, *next;
444
445 lockdep_assert_held_once(&dev_priv->binding_mutex);
446
447 list_for_each_entry_safe(entry, next, list, srf_head)
448 WARN_ON(vmw_view_destroy(&entry->res));
449 }
450
451 /**
452 * vmw_view_srf - Return a non-refcounted pointer to the surface a view is
453 * pointing to.
454 *
455 * @res: pointer to a view resource.
456 *
457 * Note that the view itself is holding a reference, so as long
458 * the view resource is alive, the surface resource will be.
459 */
vmw_view_srf(struct vmw_resource * res)460 struct vmw_resource *vmw_view_srf(struct vmw_resource *res)
461 {
462 return vmw_view(res)->srf;
463 }
464
465 /**
466 * vmw_view_lookup - Look up a view.
467 *
468 * @man: The context's cmdbuf ref manager.
469 * @view_type: The view type.
470 * @user_key: The view user id.
471 *
472 * returns a refcounted pointer to a view or an error pointer if not found.
473 */
vmw_view_lookup(struct vmw_cmdbuf_res_manager * man,enum vmw_view_type view_type,u32 user_key)474 struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
475 enum vmw_view_type view_type,
476 u32 user_key)
477 {
478 return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view,
479 vmw_view_key(user_key, view_type));
480 }
481
482 /**
483 * vmw_view_dirtying - Return whether a view type is dirtying its resource
484 * @res: Pointer to the view
485 *
486 * Each time a resource is put on the validation list as the result of a
487 * view pointing to it, we need to determine whether that resource will
488 * be dirtied (written to by the GPU) as a result of the corresponding
489 * GPU operation. Currently only rendertarget-, depth-stencil and unordered
490 * access views are capable of dirtying its resource.
491 *
492 * Return: Whether the view type of @res dirties the resource it points to.
493 */
vmw_view_dirtying(struct vmw_resource * res)494 u32 vmw_view_dirtying(struct vmw_resource *res)
495 {
496 static u32 view_is_dirtying[vmw_view_max] = {
497 [vmw_view_rt] = VMW_RES_DIRTY_SET,
498 [vmw_view_ds] = VMW_RES_DIRTY_SET,
499 [vmw_view_ua] = VMW_RES_DIRTY_SET,
500 };
501
502 /* Update this function as we add more view types */
503 BUILD_BUG_ON(vmw_view_max != 4);
504 return view_is_dirtying[vmw_view(res)->view_type];
505 }
506
507 const u32 vmw_view_destroy_cmds[] = {
508 [vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
509 [vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
510 [vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
511 [vmw_view_ua] = SVGA_3D_CMD_DX_DESTROY_UA_VIEW,
512 };
513
514 const SVGACOTableType vmw_view_cotables[] = {
515 [vmw_view_sr] = SVGA_COTABLE_SRVIEW,
516 [vmw_view_rt] = SVGA_COTABLE_RTVIEW,
517 [vmw_view_ds] = SVGA_COTABLE_DSVIEW,
518 [vmw_view_ua] = SVGA_COTABLE_UAVIEW,
519 };
520
521 const SVGACOTableType vmw_so_cotables[] = {
522 [vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT,
523 [vmw_so_bs] = SVGA_COTABLE_BLENDSTATE,
524 [vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL,
525 [vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE,
526 [vmw_so_ss] = SVGA_COTABLE_SAMPLER,
527 [vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT,
528 [vmw_so_max]= SVGA_COTABLE_MAX
529 };
530
531
532 /* To remove unused function warning */
533 static void vmw_so_build_asserts(void) __attribute__((used));
534
535
536 /*
537 * This function is unused at run-time, and only used to dump various build
538 * asserts important for code optimization assumptions.
539 */
vmw_so_build_asserts(void)540 static void vmw_so_build_asserts(void)
541 {
542 /* Assert that our vmw_view_cmd_to_type() function is correct. */
543 BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW !=
544 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1);
545 BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW !=
546 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2);
547 BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW !=
548 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3);
549 BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW !=
550 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4);
551 BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW !=
552 SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5);
553
554 /* Assert that our "one body fits all" assumption is valid */
555 BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32));
556
557 /* Assert that the view key space can hold all view ids. */
558 BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1));
559
560 /*
561 * Assert that the offset of sid in all view define commands
562 * is what we assume it to be.
563 */
564 BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
565 offsetof(SVGA3dCmdDXDefineShaderResourceView, sid));
566 BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
567 offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
568 BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
569 offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
570 BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
571 offsetof(SVGA3dCmdDXDefineUAView, sid));
572 BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
573 offsetof(SVGA3dCmdDXDefineDepthStencilView_v2, sid));
574 }
575