1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2024 Intel Corporation
4 */
5
6 #include <linux/cleanup.h>
7 #include <drm/drm_managed.h>
8
9 #include "xe_assert.h"
10 #include "xe_bo.h"
11 #include "xe_gt_printk.h"
12 #include "xe_guc.h"
13 #include "xe_guc_buf.h"
14 #include "xe_sa.h"
15
cache_to_guc(struct xe_guc_buf_cache * cache)16 static struct xe_guc *cache_to_guc(struct xe_guc_buf_cache *cache)
17 {
18 return container_of(cache, struct xe_guc, buf);
19 }
20
cache_to_gt(struct xe_guc_buf_cache * cache)21 static struct xe_gt *cache_to_gt(struct xe_guc_buf_cache *cache)
22 {
23 return guc_to_gt(cache_to_guc(cache));
24 }
25
26 /**
27 * xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache.
28 * @cache: the &xe_guc_buf_cache to initialize
29 *
30 * The Buffer Cache allows to obtain a reusable buffer that can be used to pass
31 * indirect H2G data to GuC without a need to create a ad-hoc allocation.
32 *
33 * Return: 0 on success or a negative error code on failure.
34 */
xe_guc_buf_cache_init(struct xe_guc_buf_cache * cache)35 int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
36 {
37 struct xe_gt *gt = cache_to_gt(cache);
38 struct xe_sa_manager *sam;
39
40 sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32));
41 if (IS_ERR(sam))
42 return PTR_ERR(sam);
43 cache->sam = sam;
44
45 xe_gt_dbg(gt, "reusable buffer with %u dwords at %#x for %ps\n",
46 xe_guc_buf_cache_dwords(cache), xe_bo_ggtt_addr(sam->bo),
47 __builtin_return_address(0));
48 return 0;
49 }
50
51 /**
52 * xe_guc_buf_cache_dwords() - Number of dwords the GuC Buffer Cache supports.
53 * @cache: the &xe_guc_buf_cache to query
54 *
55 * Return: a size of the largest reusable buffer (in dwords)
56 */
xe_guc_buf_cache_dwords(struct xe_guc_buf_cache * cache)57 u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache)
58 {
59 return cache->sam ? cache->sam->base.size / sizeof(u32) : 0;
60 }
61
62 /**
63 * xe_guc_buf_reserve() - Reserve a new sub-allocation.
64 * @cache: the &xe_guc_buf_cache where reserve sub-allocation
65 * @dwords: the requested size of the buffer in dwords
66 *
67 * Use xe_guc_buf_is_valid() to check if returned buffer reference is valid.
68 * Must use xe_guc_buf_release() to release a sub-allocation.
69 *
70 * Return: a &xe_guc_buf of new sub-allocation.
71 */
xe_guc_buf_reserve(struct xe_guc_buf_cache * cache,u32 dwords)72 struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords)
73 {
74 struct drm_suballoc *sa;
75
76 if (cache->sam)
77 sa = __xe_sa_bo_new(cache->sam, dwords * sizeof(u32), GFP_ATOMIC);
78 else
79 sa = ERR_PTR(-EOPNOTSUPP);
80
81 return (struct xe_guc_buf){ .sa = sa };
82 }
83
84 /**
85 * xe_guc_buf_from_data() - Reserve a new sub-allocation using data.
86 * @cache: the &xe_guc_buf_cache where reserve sub-allocation
87 * @data: the data to flush the sub-allocation
88 * @size: the size of the data
89 *
90 * Similar to xe_guc_buf_reserve() but flushes @data to the GPU memory.
91 *
92 * Return: a &xe_guc_buf of new sub-allocation.
93 */
xe_guc_buf_from_data(struct xe_guc_buf_cache * cache,const void * data,size_t size)94 struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache,
95 const void *data, size_t size)
96 {
97 struct drm_suballoc *sa;
98
99 sa = __xe_sa_bo_new(cache->sam, size, GFP_ATOMIC);
100 if (!IS_ERR(sa))
101 memcpy(xe_sa_bo_cpu_addr(sa), data, size);
102
103 return (struct xe_guc_buf){ .sa = sa };
104 }
105
106 /**
107 * xe_guc_buf_release() - Release a sub-allocation.
108 * @buf: the &xe_guc_buf to release
109 *
110 * Releases a sub-allocation reserved by the xe_guc_buf_reserve().
111 */
xe_guc_buf_release(const struct xe_guc_buf buf)112 void xe_guc_buf_release(const struct xe_guc_buf buf)
113 {
114 if (xe_guc_buf_is_valid(buf))
115 xe_sa_bo_free(buf.sa, NULL);
116 }
117
118 /**
119 * xe_guc_buf_flush() - Copy the data from the sub-allocation to the GPU memory.
120 * @buf: the &xe_guc_buf to flush
121 *
122 * Return: a GPU address of the sub-allocation.
123 */
xe_guc_buf_flush(const struct xe_guc_buf buf)124 u64 xe_guc_buf_flush(const struct xe_guc_buf buf)
125 {
126 xe_sa_bo_flush_write(buf.sa);
127 return xe_sa_bo_gpu_addr(buf.sa);
128 }
129
130 /**
131 * xe_guc_buf_cpu_ptr() - Obtain a CPU pointer to the sub-allocation.
132 * @buf: the &xe_guc_buf to query
133 *
134 * Return: a CPU pointer of the sub-allocation.
135 */
xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf)136 void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf)
137 {
138 return xe_sa_bo_cpu_addr(buf.sa);
139 }
140
141 /**
142 * xe_guc_buf_gpu_addr() - Obtain a GPU address of the sub-allocation.
143 * @buf: the &xe_guc_buf to query
144 *
145 * Return: a GPU address of the sub-allocation.
146 */
xe_guc_buf_gpu_addr(const struct xe_guc_buf buf)147 u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf)
148 {
149 return xe_sa_bo_gpu_addr(buf.sa);
150 }
151
152 /**
153 * xe_guc_cache_gpu_addr_from_ptr() - Lookup a GPU address using the pointer.
154 * @cache: the &xe_guc_buf_cache with sub-allocations
155 * @ptr: the CPU pointer of the sub-allocation
156 * @size: the size of the data
157 *
158 * Return: a GPU address on success or 0 if the pointer was unrelated.
159 */
xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache * cache,const void * ptr,u32 size)160 u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size)
161 {
162 ptrdiff_t offset = ptr - cache->sam->cpu_ptr;
163
164 if (offset < 0 || offset + size > cache->sam->base.size)
165 return 0;
166
167 return cache->sam->gpu_addr + offset;
168 }
169
170 #if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
171 #include "tests/xe_guc_buf_kunit.c"
172 #endif
173