1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4 #include "pvr_free_list.h"
5 #include "pvr_gem.h"
6 #include "pvr_hwrt.h"
7 #include "pvr_rogue_fwif.h"
8 #include "pvr_vm.h"
9
10 #include <drm/drm_gem.h>
11 #include <linux/slab.h>
12 #include <linux/xarray.h>
13 #include <uapi/drm/pvr_drm.h>
14
15 #define FREE_LIST_ENTRY_SIZE sizeof(u32)
16
17 #define FREE_LIST_ALIGNMENT \
18 ((ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE / FREE_LIST_ENTRY_SIZE) - 1)
19
20 #define FREE_LIST_MIN_PAGES 50
21 #define FREE_LIST_MIN_PAGES_BRN66011 40
22 #define FREE_LIST_MIN_PAGES_ROGUEXE 25
23
24 /**
25 * pvr_get_free_list_min_pages() - Get minimum free list size for this device
26 * @pvr_dev: Device pointer.
27 *
28 * Returns:
29 * * Minimum free list size, in PM physical pages.
30 */
31 u32
pvr_get_free_list_min_pages(struct pvr_device * pvr_dev)32 pvr_get_free_list_min_pages(struct pvr_device *pvr_dev)
33 {
34 u32 value;
35
36 if (PVR_HAS_FEATURE(pvr_dev, roguexe)) {
37 if (PVR_HAS_QUIRK(pvr_dev, 66011))
38 value = FREE_LIST_MIN_PAGES_BRN66011;
39 else
40 value = FREE_LIST_MIN_PAGES_ROGUEXE;
41 } else {
42 value = FREE_LIST_MIN_PAGES;
43 }
44
45 return value;
46 }
47
48 static int
free_list_create_kernel_structure(struct pvr_file * pvr_file,struct drm_pvr_ioctl_create_free_list_args * args,struct pvr_free_list * free_list)49 free_list_create_kernel_structure(struct pvr_file *pvr_file,
50 struct drm_pvr_ioctl_create_free_list_args *args,
51 struct pvr_free_list *free_list)
52 {
53 struct pvr_gem_object *free_list_obj;
54 struct pvr_vm_context *vm_ctx;
55 u64 free_list_size;
56 int err;
57
58 if (args->grow_threshold > 100 ||
59 args->initial_num_pages > args->max_num_pages ||
60 args->grow_num_pages > args->max_num_pages ||
61 args->max_num_pages == 0 ||
62 (args->initial_num_pages < args->max_num_pages && !args->grow_num_pages) ||
63 (args->initial_num_pages == args->max_num_pages && args->grow_num_pages))
64 return -EINVAL;
65
66 if ((args->initial_num_pages & FREE_LIST_ALIGNMENT) ||
67 (args->max_num_pages & FREE_LIST_ALIGNMENT) ||
68 (args->grow_num_pages & FREE_LIST_ALIGNMENT))
69 return -EINVAL;
70
71 vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
72 if (!vm_ctx)
73 return -EINVAL;
74
75 free_list_obj = pvr_vm_find_gem_object(vm_ctx, args->free_list_gpu_addr,
76 NULL, &free_list_size);
77 if (!free_list_obj) {
78 err = -EINVAL;
79 goto err_put_vm_context;
80 }
81
82 if ((free_list_obj->flags & DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS) ||
83 !(free_list_obj->flags & DRM_PVR_BO_PM_FW_PROTECT) ||
84 free_list_size < (args->max_num_pages * FREE_LIST_ENTRY_SIZE)) {
85 err = -EINVAL;
86 goto err_put_free_list_obj;
87 }
88
89 free_list->pvr_dev = pvr_file->pvr_dev;
90 free_list->current_pages = 0;
91 free_list->max_pages = args->max_num_pages;
92 free_list->grow_pages = args->grow_num_pages;
93 free_list->grow_threshold = args->grow_threshold;
94 free_list->obj = free_list_obj;
95 free_list->free_list_gpu_addr = args->free_list_gpu_addr;
96 free_list->initial_num_pages = args->initial_num_pages;
97
98 pvr_vm_context_put(vm_ctx);
99
100 return 0;
101
102 err_put_free_list_obj:
103 pvr_gem_object_put(free_list_obj);
104
105 err_put_vm_context:
106 pvr_vm_context_put(vm_ctx);
107
108 return err;
109 }
110
111 static void
free_list_destroy_kernel_structure(struct pvr_free_list * free_list)112 free_list_destroy_kernel_structure(struct pvr_free_list *free_list)
113 {
114 WARN_ON(!list_empty(&free_list->hwrt_list));
115
116 pvr_gem_object_put(free_list->obj);
117 }
118
119 /**
120 * calculate_free_list_ready_pages_locked() - Function to work out the number of free
121 * list pages to reserve for growing within
122 * the FW without having to wait for the
123 * host to progress a grow request
124 * @free_list: Pointer to free list.
125 * @pages: Total pages currently in free list.
126 *
127 * If the threshold or grow size means less than the alignment size (4 pages on
128 * Rogue), then the feature is not used.
129 *
130 * Caller must hold &free_list->lock.
131 *
132 * Return: number of pages to reserve.
133 */
134 static u32
calculate_free_list_ready_pages_locked(struct pvr_free_list * free_list,u32 pages)135 calculate_free_list_ready_pages_locked(struct pvr_free_list *free_list, u32 pages)
136 {
137 u32 ready_pages;
138
139 lockdep_assert_held(&free_list->lock);
140
141 ready_pages = ((pages * free_list->grow_threshold) / 100);
142
143 /* The number of pages must be less than the grow size. */
144 ready_pages = min(ready_pages, free_list->grow_pages);
145
146 /*
147 * The number of pages must be a multiple of the free list align size.
148 */
149 ready_pages &= ~FREE_LIST_ALIGNMENT;
150
151 return ready_pages;
152 }
153
154 static u32
calculate_free_list_ready_pages(struct pvr_free_list * free_list,u32 pages)155 calculate_free_list_ready_pages(struct pvr_free_list *free_list, u32 pages)
156 {
157 u32 ret;
158
159 mutex_lock(&free_list->lock);
160
161 ret = calculate_free_list_ready_pages_locked(free_list, pages);
162
163 mutex_unlock(&free_list->lock);
164
165 return ret;
166 }
167
168 static void
free_list_fw_init(void * cpu_ptr,void * priv)169 free_list_fw_init(void *cpu_ptr, void *priv)
170 {
171 struct rogue_fwif_freelist *fw_data = cpu_ptr;
172 struct pvr_free_list *free_list = priv;
173 u32 ready_pages;
174
175 /* Fill out FW structure */
176 ready_pages = calculate_free_list_ready_pages(free_list,
177 free_list->initial_num_pages);
178
179 fw_data->max_pages = free_list->max_pages;
180 fw_data->current_pages = free_list->initial_num_pages - ready_pages;
181 fw_data->grow_pages = free_list->grow_pages;
182 fw_data->ready_pages = ready_pages;
183 fw_data->freelist_id = free_list->fw_id;
184 fw_data->grow_pending = false;
185 fw_data->current_stack_top = fw_data->current_pages - 1;
186 fw_data->freelist_dev_addr = free_list->free_list_gpu_addr;
187 fw_data->current_dev_addr = (fw_data->freelist_dev_addr +
188 ((fw_data->max_pages - fw_data->current_pages) *
189 FREE_LIST_ENTRY_SIZE)) &
190 ~((u64)ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE - 1);
191 }
192
193 static int
free_list_create_fw_structure(struct pvr_file * pvr_file,struct drm_pvr_ioctl_create_free_list_args * args,struct pvr_free_list * free_list)194 free_list_create_fw_structure(struct pvr_file *pvr_file,
195 struct drm_pvr_ioctl_create_free_list_args *args,
196 struct pvr_free_list *free_list)
197 {
198 struct pvr_device *pvr_dev = pvr_file->pvr_dev;
199
200 /*
201 * Create and map the FW structure so we can initialise it. This is not
202 * accessed on the CPU side post-initialisation so the mapping lifetime
203 * is only for this function.
204 */
205 free_list->fw_data = pvr_fw_object_create_and_map(pvr_dev, sizeof(*free_list->fw_data),
206 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
207 free_list_fw_init, free_list,
208 &free_list->fw_obj);
209 if (IS_ERR(free_list->fw_data))
210 return PTR_ERR(free_list->fw_data);
211
212 return 0;
213 }
214
215 static void
free_list_destroy_fw_structure(struct pvr_free_list * free_list)216 free_list_destroy_fw_structure(struct pvr_free_list *free_list)
217 {
218 pvr_fw_object_unmap_and_destroy(free_list->fw_obj);
219 }
220
221 static int
pvr_free_list_insert_pages_locked(struct pvr_free_list * free_list,struct sg_table * sgt,u32 offset,u32 num_pages)222 pvr_free_list_insert_pages_locked(struct pvr_free_list *free_list,
223 struct sg_table *sgt, u32 offset, u32 num_pages)
224 {
225 struct sg_dma_page_iter dma_iter;
226 u32 *page_list;
227
228 lockdep_assert_held(&free_list->lock);
229
230 page_list = pvr_gem_object_vmap(free_list->obj);
231 if (IS_ERR(page_list))
232 return PTR_ERR(page_list);
233
234 offset /= FREE_LIST_ENTRY_SIZE;
235 /* clang-format off */
236 for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
237 dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
238 u64 dma_pfn = dma_addr >>
239 ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
240
241 BUILD_BUG_ON(ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE > PAGE_SIZE);
242
243 for (u32 dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE;
244 dma_addr_offset += ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE) {
245 WARN_ON_ONCE(dma_pfn >> 32);
246
247 page_list[offset++] = (u32)dma_pfn;
248 dma_pfn++;
249
250 num_pages--;
251 if (!num_pages)
252 break;
253 }
254
255 if (!num_pages)
256 break;
257 }
258 /* clang-format on */
259
260 /* Make sure our free_list update is flushed. */
261 wmb();
262
263 pvr_gem_object_vunmap(free_list->obj);
264
265 return 0;
266 }
267
268 static int
pvr_free_list_insert_node_locked(struct pvr_free_list_node * free_list_node)269 pvr_free_list_insert_node_locked(struct pvr_free_list_node *free_list_node)
270 {
271 struct pvr_free_list *free_list = free_list_node->free_list;
272 struct sg_table *sgt;
273 u32 start_page;
274 u32 offset;
275 int err;
276
277 lockdep_assert_held(&free_list->lock);
278
279 start_page = free_list->max_pages - free_list->current_pages -
280 free_list_node->num_pages;
281 offset = (start_page * FREE_LIST_ENTRY_SIZE) &
282 ~((u64)ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE - 1);
283
284 sgt = drm_gem_shmem_get_pages_sgt(&free_list_node->mem_obj->base);
285 if (WARN_ON(IS_ERR(sgt)))
286 return PTR_ERR(sgt);
287
288 err = pvr_free_list_insert_pages_locked(free_list, sgt,
289 offset, free_list_node->num_pages);
290 if (!err)
291 free_list->current_pages += free_list_node->num_pages;
292
293 return err;
294 }
295
296 static int
pvr_free_list_grow(struct pvr_free_list * free_list,u32 num_pages)297 pvr_free_list_grow(struct pvr_free_list *free_list, u32 num_pages)
298 {
299 struct pvr_device *pvr_dev = free_list->pvr_dev;
300 struct pvr_free_list_node *free_list_node;
301 int err;
302
303 mutex_lock(&free_list->lock);
304
305 if (num_pages & FREE_LIST_ALIGNMENT) {
306 err = -EINVAL;
307 goto err_unlock;
308 }
309
310 free_list_node = kzalloc(sizeof(*free_list_node), GFP_KERNEL);
311 if (!free_list_node) {
312 err = -ENOMEM;
313 goto err_unlock;
314 }
315
316 free_list_node->num_pages = num_pages;
317 free_list_node->free_list = free_list;
318
319 free_list_node->mem_obj = pvr_gem_object_create(pvr_dev,
320 num_pages <<
321 ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
322 PVR_BO_FW_FLAGS_DEVICE_CACHED);
323 if (IS_ERR(free_list_node->mem_obj)) {
324 err = PTR_ERR(free_list_node->mem_obj);
325 goto err_free;
326 }
327
328 err = pvr_free_list_insert_node_locked(free_list_node);
329 if (err)
330 goto err_destroy_gem_object;
331
332 list_add_tail(&free_list_node->node, &free_list->mem_block_list);
333
334 /*
335 * Reserve a number ready pages to allow the FW to process OOM quickly
336 * and asynchronously request a grow.
337 */
338 free_list->ready_pages =
339 calculate_free_list_ready_pages_locked(free_list,
340 free_list->current_pages);
341 free_list->current_pages -= free_list->ready_pages;
342
343 mutex_unlock(&free_list->lock);
344
345 return 0;
346
347 err_destroy_gem_object:
348 pvr_gem_object_put(free_list_node->mem_obj);
349
350 err_free:
351 kfree(free_list_node);
352
353 err_unlock:
354 mutex_unlock(&free_list->lock);
355
356 return err;
357 }
358
pvr_free_list_process_grow_req(struct pvr_device * pvr_dev,struct rogue_fwif_fwccb_cmd_freelist_gs_data * req)359 void pvr_free_list_process_grow_req(struct pvr_device *pvr_dev,
360 struct rogue_fwif_fwccb_cmd_freelist_gs_data *req)
361 {
362 struct pvr_free_list *free_list = pvr_free_list_lookup_id(pvr_dev, req->freelist_id);
363 struct rogue_fwif_kccb_cmd resp_cmd = {
364 .cmd_type = ROGUE_FWIF_KCCB_CMD_FREELIST_GROW_UPDATE,
365 };
366 struct rogue_fwif_freelist_gs_data *resp = &resp_cmd.cmd_data.free_list_gs_data;
367 u32 grow_pages = 0;
368
369 /* If we don't have a freelist registered for this ID, we can't do much. */
370 if (WARN_ON(!free_list))
371 return;
372
373 /* Since the FW made the request, it has already consumed the ready pages,
374 * update the host struct.
375 */
376 free_list->current_pages += free_list->ready_pages;
377 free_list->ready_pages = 0;
378
379 /* If the grow succeeds, update the grow_pages argument. */
380 if (!pvr_free_list_grow(free_list, free_list->grow_pages))
381 grow_pages = free_list->grow_pages;
382
383 /* Now prepare the response and send it back to the FW. */
384 pvr_fw_object_get_fw_addr(free_list->fw_obj, &resp->freelist_fw_addr);
385 resp->delta_pages = grow_pages;
386 resp->new_pages = free_list->current_pages + free_list->ready_pages;
387 resp->ready_pages = free_list->ready_pages;
388 pvr_free_list_put(free_list);
389
390 WARN_ON(pvr_kccb_send_cmd(pvr_dev, &resp_cmd, NULL));
391 }
392
393 static void
pvr_free_list_free_node(struct pvr_free_list_node * free_list_node)394 pvr_free_list_free_node(struct pvr_free_list_node *free_list_node)
395 {
396 pvr_gem_object_put(free_list_node->mem_obj);
397
398 kfree(free_list_node);
399 }
400
401 /**
402 * pvr_free_list_create() - Create a new free list and return an object pointer
403 * @pvr_file: Pointer to pvr_file structure.
404 * @args: Creation arguments from userspace.
405 *
406 * Return:
407 * * Pointer to new free_list, or
408 * * ERR_PTR(-%ENOMEM) on out of memory.
409 */
410 struct pvr_free_list *
pvr_free_list_create(struct pvr_file * pvr_file,struct drm_pvr_ioctl_create_free_list_args * args)411 pvr_free_list_create(struct pvr_file *pvr_file,
412 struct drm_pvr_ioctl_create_free_list_args *args)
413 {
414 struct pvr_free_list *free_list;
415 int err;
416
417 /* Create and fill out the kernel structure */
418 free_list = kzalloc(sizeof(*free_list), GFP_KERNEL);
419
420 if (!free_list)
421 return ERR_PTR(-ENOMEM);
422
423 kref_init(&free_list->ref_count);
424 INIT_LIST_HEAD(&free_list->mem_block_list);
425 INIT_LIST_HEAD(&free_list->hwrt_list);
426 mutex_init(&free_list->lock);
427
428 err = free_list_create_kernel_structure(pvr_file, args, free_list);
429 if (err < 0)
430 goto err_free;
431
432 /* Allocate global object ID for firmware. */
433 err = xa_alloc(&pvr_file->pvr_dev->free_list_ids,
434 &free_list->fw_id,
435 free_list,
436 xa_limit_32b,
437 GFP_KERNEL);
438 if (err)
439 goto err_destroy_kernel_structure;
440
441 err = free_list_create_fw_structure(pvr_file, args, free_list);
442 if (err < 0)
443 goto err_free_fw_id;
444
445 err = pvr_free_list_grow(free_list, args->initial_num_pages);
446 if (err < 0)
447 goto err_fw_struct_cleanup;
448
449 return free_list;
450
451 err_fw_struct_cleanup:
452 WARN_ON(pvr_fw_structure_cleanup(free_list->pvr_dev,
453 ROGUE_FWIF_CLEANUP_FREELIST,
454 free_list->fw_obj, 0));
455
456 err_free_fw_id:
457 xa_erase(&free_list->pvr_dev->free_list_ids, free_list->fw_id);
458
459 err_destroy_kernel_structure:
460 free_list_destroy_kernel_structure(free_list);
461
462 err_free:
463 mutex_destroy(&free_list->lock);
464 kfree(free_list);
465
466 return ERR_PTR(err);
467 }
468
469 static void
pvr_free_list_release(struct kref * ref_count)470 pvr_free_list_release(struct kref *ref_count)
471 {
472 struct pvr_free_list *free_list =
473 container_of(ref_count, struct pvr_free_list, ref_count);
474 struct list_head *pos, *n;
475 int err;
476
477 xa_erase(&free_list->pvr_dev->free_list_ids, free_list->fw_id);
478
479 err = pvr_fw_structure_cleanup(free_list->pvr_dev,
480 ROGUE_FWIF_CLEANUP_FREELIST,
481 free_list->fw_obj, 0);
482 if (err == -EBUSY) {
483 /* Flush the FWCCB to process any HWR or freelist reconstruction
484 * request that might keep the freelist busy, and try again.
485 */
486 pvr_fwccb_process(free_list->pvr_dev);
487 err = pvr_fw_structure_cleanup(free_list->pvr_dev,
488 ROGUE_FWIF_CLEANUP_FREELIST,
489 free_list->fw_obj, 0);
490 }
491
492 WARN_ON(err);
493
494 /* clang-format off */
495 list_for_each_safe(pos, n, &free_list->mem_block_list) {
496 struct pvr_free_list_node *free_list_node =
497 container_of(pos, struct pvr_free_list_node, node);
498
499 list_del(pos);
500 pvr_free_list_free_node(free_list_node);
501 }
502 /* clang-format on */
503
504 free_list_destroy_kernel_structure(free_list);
505 free_list_destroy_fw_structure(free_list);
506 mutex_destroy(&free_list->lock);
507 kfree(free_list);
508 }
509
510 /**
511 * pvr_destroy_free_lists_for_file: Destroy any free lists associated with the
512 * given file.
513 * @pvr_file: Pointer to pvr_file structure.
514 *
515 * Removes all free lists associated with @pvr_file from the device free_list
516 * list and drops initial references. Free lists will then be destroyed once
517 * all outstanding references are dropped.
518 */
pvr_destroy_free_lists_for_file(struct pvr_file * pvr_file)519 void pvr_destroy_free_lists_for_file(struct pvr_file *pvr_file)
520 {
521 struct pvr_free_list *free_list;
522 unsigned long handle;
523
524 xa_for_each(&pvr_file->free_list_handles, handle, free_list) {
525 (void)free_list;
526 pvr_free_list_put(xa_erase(&pvr_file->free_list_handles, handle));
527 }
528 }
529
530 /**
531 * pvr_free_list_put() - Release reference on free list
532 * @free_list: Pointer to list to release reference on
533 */
534 void
pvr_free_list_put(struct pvr_free_list * free_list)535 pvr_free_list_put(struct pvr_free_list *free_list)
536 {
537 if (free_list)
538 kref_put(&free_list->ref_count, pvr_free_list_release);
539 }
540
pvr_free_list_add_hwrt(struct pvr_free_list * free_list,struct pvr_hwrt_data * hwrt_data)541 void pvr_free_list_add_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data)
542 {
543 mutex_lock(&free_list->lock);
544
545 list_add_tail(&hwrt_data->freelist_node, &free_list->hwrt_list);
546
547 mutex_unlock(&free_list->lock);
548 }
549
pvr_free_list_remove_hwrt(struct pvr_free_list * free_list,struct pvr_hwrt_data * hwrt_data)550 void pvr_free_list_remove_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data)
551 {
552 mutex_lock(&free_list->lock);
553
554 list_del(&hwrt_data->freelist_node);
555
556 mutex_unlock(&free_list->lock);
557 }
558
559 static void
pvr_free_list_reconstruct(struct pvr_device * pvr_dev,u32 freelist_id)560 pvr_free_list_reconstruct(struct pvr_device *pvr_dev, u32 freelist_id)
561 {
562 struct pvr_free_list *free_list = pvr_free_list_lookup_id(pvr_dev, freelist_id);
563 struct pvr_free_list_node *free_list_node;
564 struct rogue_fwif_freelist *fw_data;
565 struct pvr_hwrt_data *hwrt_data;
566
567 if (!free_list)
568 return;
569
570 mutex_lock(&free_list->lock);
571
572 /* Rebuild the free list based on the memory block list. */
573 free_list->current_pages = 0;
574
575 list_for_each_entry(free_list_node, &free_list->mem_block_list, node)
576 WARN_ON(pvr_free_list_insert_node_locked(free_list_node));
577
578 /*
579 * Remove the ready pages, which are reserved to allow the FW to process OOM quickly and
580 * asynchronously request a grow.
581 */
582 free_list->current_pages -= free_list->ready_pages;
583
584 fw_data = free_list->fw_data;
585 fw_data->current_stack_top = fw_data->current_pages - 1;
586 fw_data->allocated_page_count = 0;
587 fw_data->allocated_mmu_page_count = 0;
588
589 /* Reset the state of any associated HWRTs. */
590 list_for_each_entry(hwrt_data, &free_list->hwrt_list, freelist_node) {
591 struct rogue_fwif_hwrtdata *hwrt_fw_data = pvr_fw_object_vmap(hwrt_data->fw_obj);
592
593 if (!WARN_ON(IS_ERR(hwrt_fw_data))) {
594 hwrt_fw_data->state = ROGUE_FWIF_RTDATA_STATE_HWR;
595 hwrt_fw_data->hwrt_data_flags &= ~HWRTDATA_HAS_LAST_GEOM;
596 }
597
598 pvr_fw_object_vunmap(hwrt_data->fw_obj);
599 }
600
601 mutex_unlock(&free_list->lock);
602
603 pvr_free_list_put(free_list);
604 }
605
606 void
pvr_free_list_process_reconstruct_req(struct pvr_device * pvr_dev,struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data * req)607 pvr_free_list_process_reconstruct_req(struct pvr_device *pvr_dev,
608 struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data *req)
609 {
610 struct rogue_fwif_kccb_cmd resp_cmd = {
611 .cmd_type = ROGUE_FWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE,
612 };
613 struct rogue_fwif_freelists_reconstruction_data *resp =
614 &resp_cmd.cmd_data.free_lists_reconstruction_data;
615
616 for (u32 i = 0; i < req->freelist_count; i++)
617 pvr_free_list_reconstruct(pvr_dev, req->freelist_ids[i]);
618
619 resp->freelist_count = req->freelist_count;
620 memcpy(resp->freelist_ids, req->freelist_ids,
621 req->freelist_count * sizeof(resp->freelist_ids[0]));
622
623 WARN_ON(pvr_kccb_send_cmd(pvr_dev, &resp_cmd, NULL));
624 }
625