1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include "kfd_mqd_manager.h"
26 #include "amdgpu_amdkfd.h"
27 #include "kfd_device_queue_manager.h"
28 
29 /* Mapping queue priority to pipe priority, indexed by queue priority */
30 int pipe_priority_map[] = {
31 	KFD_PIPE_PRIORITY_CS_LOW,
32 	KFD_PIPE_PRIORITY_CS_LOW,
33 	KFD_PIPE_PRIORITY_CS_LOW,
34 	KFD_PIPE_PRIORITY_CS_LOW,
35 	KFD_PIPE_PRIORITY_CS_LOW,
36 	KFD_PIPE_PRIORITY_CS_LOW,
37 	KFD_PIPE_PRIORITY_CS_LOW,
38 	KFD_PIPE_PRIORITY_CS_MEDIUM,
39 	KFD_PIPE_PRIORITY_CS_MEDIUM,
40 	KFD_PIPE_PRIORITY_CS_MEDIUM,
41 	KFD_PIPE_PRIORITY_CS_MEDIUM,
42 	KFD_PIPE_PRIORITY_CS_HIGH,
43 	KFD_PIPE_PRIORITY_CS_HIGH,
44 	KFD_PIPE_PRIORITY_CS_HIGH,
45 	KFD_PIPE_PRIORITY_CS_HIGH,
46 	KFD_PIPE_PRIORITY_CS_HIGH
47 };
48 
allocate_hiq_mqd(struct kfd_dev * dev,struct queue_properties * q)49 struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, struct queue_properties *q)
50 {
51 	struct kfd_mem_obj *mqd_mem_obj = NULL;
52 
53 	mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
54 	if (!mqd_mem_obj)
55 		return NULL;
56 
57 	mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem;
58 	mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr;
59 	mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr;
60 
61 	return mqd_mem_obj;
62 }
63 
allocate_sdma_mqd(struct kfd_dev * dev,struct queue_properties * q)64 struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev,
65 					struct queue_properties *q)
66 {
67 	struct kfd_mem_obj *mqd_mem_obj = NULL;
68 	uint64_t offset;
69 
70 	mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
71 	if (!mqd_mem_obj)
72 		return NULL;
73 
74 	offset = (q->sdma_engine_id *
75 		dev->device_info.num_sdma_queues_per_engine +
76 		q->sdma_queue_id) *
77 		dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size;
78 
79 	offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
80 
81 	mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem
82 				+ offset);
83 	mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset;
84 	mqd_mem_obj->cpu_ptr = (uint32_t *)((uint64_t)
85 				dev->dqm->hiq_sdma_mqd.cpu_ptr + offset);
86 
87 	return mqd_mem_obj;
88 }
89 
free_mqd_hiq_sdma(struct mqd_manager * mm,void * mqd,struct kfd_mem_obj * mqd_mem_obj)90 void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
91 			struct kfd_mem_obj *mqd_mem_obj)
92 {
93 	WARN_ON(!mqd_mem_obj->gtt_mem);
94 	kfree(mqd_mem_obj);
95 }
96 
mqd_symmetrically_map_cu_mask(struct mqd_manager * mm,const uint32_t * cu_mask,uint32_t cu_mask_count,uint32_t * se_mask)97 void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
98 		const uint32_t *cu_mask, uint32_t cu_mask_count,
99 		uint32_t *se_mask)
100 {
101 	struct kfd_cu_info cu_info;
102 	uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
103 	bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0);
104 	uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1;
105 	int i, se, sh, cu, cu_bitmap_sh_mul, inc = wgp_mode_req ? 2 : 1;
106 
107 	amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info);
108 
109 	if (cu_mask_count > cu_info.cu_active_number)
110 		cu_mask_count = cu_info.cu_active_number;
111 
112 	/* Exceeding these bounds corrupts the stack and indicates a coding error.
113 	 * Returning with no CU's enabled will hang the queue, which should be
114 	 * attention grabbing.
115 	 */
116 	if (cu_info.num_shader_engines > KFD_MAX_NUM_SE) {
117 		pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n", cu_info.num_shader_engines);
118 		return;
119 	}
120 	if (cu_info.num_shader_arrays_per_engine > KFD_MAX_NUM_SH_PER_SE) {
121 		pr_err("Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
122 			cu_info.num_shader_arrays_per_engine * cu_info.num_shader_engines);
123 		return;
124 	}
125 
126 	cu_bitmap_sh_mul = (KFD_GC_VERSION(mm->dev) >= IP_VERSION(11, 0, 0) &&
127 			    KFD_GC_VERSION(mm->dev) < IP_VERSION(12, 0, 0)) ? 2 : 1;
128 
129 	/* Count active CUs per SH.
130 	 *
131 	 * Some CUs in an SH may be disabled.	HW expects disabled CUs to be
132 	 * represented in the high bits of each SH's enable mask (the upper and lower
133 	 * 16 bits of se_mask) and will take care of the actual distribution of
134 	 * disabled CUs within each SH automatically.
135 	 * Each half of se_mask must be filled only on bits 0-cu_per_sh[se][sh]-1.
136 	 *
137 	 * See note on Arcturus cu_bitmap layout in gfx_v9_0_get_cu_info.
138 	 * See note on GFX11 cu_bitmap layout in gfx_v11_0_get_cu_info.
139 	 */
140 	for (se = 0; se < cu_info.num_shader_engines; se++)
141 		for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
142 			cu_per_sh[se][sh] = hweight32(
143 				cu_info.cu_bitmap[se % 4][sh + (se / 4) * cu_bitmap_sh_mul]);
144 
145 	/* Symmetrically map cu_mask to all SEs & SHs:
146 	 * se_mask programs up to 2 SH in the upper and lower 16 bits.
147 	 *
148 	 * Examples
149 	 * Assuming 1 SH/SE, 4 SEs:
150 	 * cu_mask[0] bit0 -> se_mask[0] bit0
151 	 * cu_mask[0] bit1 -> se_mask[1] bit0
152 	 * ...
153 	 * cu_mask[0] bit4 -> se_mask[0] bit1
154 	 * ...
155 	 *
156 	 * Assuming 2 SH/SE, 4 SEs
157 	 * cu_mask[0] bit0 -> se_mask[0] bit0 (SE0,SH0,CU0)
158 	 * cu_mask[0] bit1 -> se_mask[1] bit0 (SE1,SH0,CU0)
159 	 * ...
160 	 * cu_mask[0] bit4 -> se_mask[0] bit16 (SE0,SH1,CU0)
161 	 * cu_mask[0] bit5 -> se_mask[1] bit16 (SE1,SH1,CU0)
162 	 * ...
163 	 * cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
164 	 * ...
165 	 *
166 	 * First ensure all CUs are disabled, then enable user specified CUs.
167 	 */
168 	for (i = 0; i < cu_info.num_shader_engines; i++)
169 		se_mask[i] = 0;
170 
171 	i = 0;
172 	for (cu = 0; cu < 16; cu += inc) {
173 		for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
174 			for (se = 0; se < cu_info.num_shader_engines; se++) {
175 				if (cu_per_sh[se][sh] > cu) {
176 					if (cu_mask[i / 32] & (en_mask << (i % 32)))
177 						se_mask[se] |= en_mask << (cu + sh * 16);
178 					i += inc;
179 					if (i == cu_mask_count)
180 						return;
181 				}
182 			}
183 		}
184 	}
185 }
186 
kfd_hiq_load_mqd_kiq(struct mqd_manager * mm,void * mqd,uint32_t pipe_id,uint32_t queue_id,struct queue_properties * p,struct mm_struct * mms)187 int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
188 		     uint32_t pipe_id, uint32_t queue_id,
189 		     struct queue_properties *p, struct mm_struct *mms)
190 {
191 	return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
192 					      queue_id, p->doorbell_off);
193 }
194 
kfd_destroy_mqd_cp(struct mqd_manager * mm,void * mqd,enum kfd_preempt_type type,unsigned int timeout,uint32_t pipe_id,uint32_t queue_id)195 int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd,
196 		enum kfd_preempt_type type, unsigned int timeout,
197 		uint32_t pipe_id, uint32_t queue_id)
198 {
199 	return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout,
200 						pipe_id, queue_id);
201 }
202 
kfd_free_mqd_cp(struct mqd_manager * mm,void * mqd,struct kfd_mem_obj * mqd_mem_obj)203 void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
204 	      struct kfd_mem_obj *mqd_mem_obj)
205 {
206 	if (mqd_mem_obj->gtt_mem) {
207 		amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, mqd_mem_obj->gtt_mem);
208 		kfree(mqd_mem_obj);
209 	} else {
210 		kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
211 	}
212 }
213 
kfd_is_occupied_cp(struct mqd_manager * mm,void * mqd,uint64_t queue_address,uint32_t pipe_id,uint32_t queue_id)214 bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd,
215 		 uint64_t queue_address, uint32_t pipe_id,
216 		 uint32_t queue_id)
217 {
218 	return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address,
219 						pipe_id, queue_id);
220 }
221 
kfd_load_mqd_sdma(struct mqd_manager * mm,void * mqd,uint32_t pipe_id,uint32_t queue_id,struct queue_properties * p,struct mm_struct * mms)222 int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd,
223 		  uint32_t pipe_id, uint32_t queue_id,
224 		  struct queue_properties *p, struct mm_struct *mms)
225 {
226 	return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
227 						(uint32_t __user *)p->write_ptr,
228 						mms);
229 }
230 
231 /*
232  * preempt type here is ignored because there is only one way
233  * to preempt sdma queue
234  */
kfd_destroy_mqd_sdma(struct mqd_manager * mm,void * mqd,enum kfd_preempt_type type,unsigned int timeout,uint32_t pipe_id,uint32_t queue_id)235 int kfd_destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
236 		     enum kfd_preempt_type type,
237 		     unsigned int timeout, uint32_t pipe_id,
238 		     uint32_t queue_id)
239 {
240 	return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
241 }
242 
kfd_is_occupied_sdma(struct mqd_manager * mm,void * mqd,uint64_t queue_address,uint32_t pipe_id,uint32_t queue_id)243 bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd,
244 		      uint64_t queue_address, uint32_t pipe_id,
245 		      uint32_t queue_id)
246 {
247 	return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
248 }
249