1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2020 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Christian König
24 */
25
26 /* Pooling of allocated pages is necessary because changing the caching
27 * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28 * invalidate for those addresses.
29 *
30 * Additional to that allocations from the DMA coherent API are pooled as well
31 * cause they are rather slow compared to alloc_pages+map.
32 */
33
34 #include <linux/module.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/debugfs.h>
37 #include <linux/highmem.h>
38 #include <linux/sched/mm.h>
39
40 #ifdef CONFIG_X86
41 #include <asm/set_memory.h>
42 #endif
43
44 #include <drm/ttm/ttm_pool.h>
45 #include <drm/ttm/ttm_tt.h>
46 #include <drm/ttm/ttm_bo.h>
47
48 #include "ttm_module.h"
49
50 /**
51 * struct ttm_pool_dma - Helper object for coherent DMA mappings
52 *
53 * @addr: original DMA address returned for the mapping
54 * @vaddr: original vaddr return for the mapping and order in the lower bits
55 */
56 struct ttm_pool_dma {
57 dma_addr_t addr;
58 unsigned long vaddr;
59 };
60
61 static unsigned long page_pool_size;
62
63 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
64 module_param(page_pool_size, ulong, 0644);
65
66 static atomic_long_t allocated_pages;
67
68 static struct ttm_pool_type global_write_combined[MAX_ORDER];
69 static struct ttm_pool_type global_uncached[MAX_ORDER];
70
71 static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
72 static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
73
74 static spinlock_t shrinker_lock;
75 static struct list_head shrinker_list;
76 static struct shrinker mm_shrinker;
77
78 /* Allocate pages of size 1 << order with the given gfp_flags */
ttm_pool_alloc_page(struct ttm_pool * pool,gfp_t gfp_flags,unsigned int order)79 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
80 unsigned int order)
81 {
82 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
83 struct ttm_pool_dma *dma;
84 struct page *p;
85 void *vaddr;
86
87 /* Don't set the __GFP_COMP flag for higher order allocations.
88 * Mapping pages directly into an userspace process and calling
89 * put_page() on a TTM allocated page is illegal.
90 */
91 if (order)
92 gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
93 __GFP_KSWAPD_RECLAIM;
94
95 if (!pool->use_dma_alloc) {
96 p = alloc_pages(gfp_flags, order);
97 if (p)
98 p->private = order;
99 return p;
100 }
101
102 dma = kmalloc(sizeof(*dma), GFP_KERNEL);
103 if (!dma)
104 return NULL;
105
106 if (order)
107 attr |= DMA_ATTR_NO_WARN;
108
109 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
110 &dma->addr, gfp_flags, attr);
111 if (!vaddr)
112 goto error_free;
113
114 /* TODO: This is an illegal abuse of the DMA API, but we need to rework
115 * TTM page fault handling and extend the DMA API to clean this up.
116 */
117 if (is_vmalloc_addr(vaddr))
118 p = vmalloc_to_page(vaddr);
119 else
120 p = virt_to_page(vaddr);
121
122 dma->vaddr = (unsigned long)vaddr | order;
123 p->private = (unsigned long)dma;
124 return p;
125
126 error_free:
127 kfree(dma);
128 return NULL;
129 }
130
131 /* Reset the caching and pages of size 1 << order */
ttm_pool_free_page(struct ttm_pool * pool,enum ttm_caching caching,unsigned int order,struct page * p)132 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
133 unsigned int order, struct page *p)
134 {
135 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
136 struct ttm_pool_dma *dma;
137 void *vaddr;
138
139 #ifdef CONFIG_X86
140 /* We don't care that set_pages_wb is inefficient here. This is only
141 * used when we have to shrink and CPU overhead is irrelevant then.
142 */
143 if (caching != ttm_cached && !PageHighMem(p))
144 set_pages_wb(p, 1 << order);
145 #endif
146
147 if (!pool || !pool->use_dma_alloc) {
148 __free_pages(p, order);
149 return;
150 }
151
152 if (order)
153 attr |= DMA_ATTR_NO_WARN;
154
155 dma = (void *)p->private;
156 vaddr = (void *)(dma->vaddr & PAGE_MASK);
157 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
158 attr);
159 kfree(dma);
160 }
161
162 /* Apply a new caching to an array of pages */
ttm_pool_apply_caching(struct page ** first,struct page ** last,enum ttm_caching caching)163 static int ttm_pool_apply_caching(struct page **first, struct page **last,
164 enum ttm_caching caching)
165 {
166 #ifdef CONFIG_X86
167 unsigned int num_pages = last - first;
168
169 if (!num_pages)
170 return 0;
171
172 switch (caching) {
173 case ttm_cached:
174 break;
175 case ttm_write_combined:
176 return set_pages_array_wc(first, num_pages);
177 case ttm_uncached:
178 return set_pages_array_uc(first, num_pages);
179 }
180 #endif
181 return 0;
182 }
183
184 /* Map pages of 1 << order size and fill the DMA address array */
ttm_pool_map(struct ttm_pool * pool,unsigned int order,struct page * p,dma_addr_t ** dma_addr)185 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
186 struct page *p, dma_addr_t **dma_addr)
187 {
188 dma_addr_t addr;
189 unsigned int i;
190
191 if (pool->use_dma_alloc) {
192 struct ttm_pool_dma *dma = (void *)p->private;
193
194 addr = dma->addr;
195 } else {
196 size_t size = (1ULL << order) * PAGE_SIZE;
197
198 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
199 if (dma_mapping_error(pool->dev, addr))
200 return -EFAULT;
201 }
202
203 for (i = 1 << order; i ; --i) {
204 *(*dma_addr)++ = addr;
205 addr += PAGE_SIZE;
206 }
207
208 return 0;
209 }
210
211 /* Unmap pages of 1 << order size */
ttm_pool_unmap(struct ttm_pool * pool,dma_addr_t dma_addr,unsigned int num_pages)212 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
213 unsigned int num_pages)
214 {
215 /* Unmapped while freeing the page */
216 if (pool->use_dma_alloc)
217 return;
218
219 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
220 DMA_BIDIRECTIONAL);
221 }
222
223 /* Give pages into a specific pool_type */
ttm_pool_type_give(struct ttm_pool_type * pt,struct page * p)224 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
225 {
226 unsigned int i, num_pages = 1 << pt->order;
227
228 for (i = 0; i < num_pages; ++i) {
229 if (PageHighMem(p))
230 clear_highpage(p + i);
231 else
232 clear_page(page_address(p + i));
233 }
234
235 spin_lock(&pt->lock);
236 list_add(&p->lru, &pt->pages);
237 spin_unlock(&pt->lock);
238 atomic_long_add(1 << pt->order, &allocated_pages);
239 }
240
241 /* Take pages from a specific pool_type, return NULL when nothing available */
ttm_pool_type_take(struct ttm_pool_type * pt)242 static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
243 {
244 struct page *p;
245
246 spin_lock(&pt->lock);
247 p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
248 if (p) {
249 atomic_long_sub(1 << pt->order, &allocated_pages);
250 list_del(&p->lru);
251 }
252 spin_unlock(&pt->lock);
253
254 return p;
255 }
256
257 /* Initialize and add a pool type to the global shrinker list */
ttm_pool_type_init(struct ttm_pool_type * pt,struct ttm_pool * pool,enum ttm_caching caching,unsigned int order)258 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
259 enum ttm_caching caching, unsigned int order)
260 {
261 pt->pool = pool;
262 pt->caching = caching;
263 pt->order = order;
264 spin_lock_init(&pt->lock);
265 INIT_LIST_HEAD(&pt->pages);
266
267 spin_lock(&shrinker_lock);
268 list_add_tail(&pt->shrinker_list, &shrinker_list);
269 spin_unlock(&shrinker_lock);
270 }
271
272 /* Remove a pool_type from the global shrinker list and free all pages */
ttm_pool_type_fini(struct ttm_pool_type * pt)273 static void ttm_pool_type_fini(struct ttm_pool_type *pt)
274 {
275 struct page *p;
276
277 spin_lock(&shrinker_lock);
278 list_del(&pt->shrinker_list);
279 spin_unlock(&shrinker_lock);
280
281 while ((p = ttm_pool_type_take(pt)))
282 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
283 }
284
285 /* Return the pool_type to use for the given caching and order */
ttm_pool_select_type(struct ttm_pool * pool,enum ttm_caching caching,unsigned int order)286 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
287 enum ttm_caching caching,
288 unsigned int order)
289 {
290 if (pool->use_dma_alloc)
291 return &pool->caching[caching].orders[order];
292
293 #ifdef CONFIG_X86
294 switch (caching) {
295 case ttm_write_combined:
296 if (pool->use_dma32)
297 return &global_dma32_write_combined[order];
298
299 return &global_write_combined[order];
300 case ttm_uncached:
301 if (pool->use_dma32)
302 return &global_dma32_uncached[order];
303
304 return &global_uncached[order];
305 default:
306 break;
307 }
308 #endif
309
310 return NULL;
311 }
312
313 /* Free pages using the global shrinker list */
ttm_pool_shrink(void)314 static unsigned int ttm_pool_shrink(void)
315 {
316 struct ttm_pool_type *pt;
317 unsigned int num_pages;
318 struct page *p;
319
320 spin_lock(&shrinker_lock);
321 pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
322 list_move_tail(&pt->shrinker_list, &shrinker_list);
323 spin_unlock(&shrinker_lock);
324
325 p = ttm_pool_type_take(pt);
326 if (p) {
327 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
328 num_pages = 1 << pt->order;
329 } else {
330 num_pages = 0;
331 }
332
333 return num_pages;
334 }
335
336 /* Return the allocation order based for a page */
ttm_pool_page_order(struct ttm_pool * pool,struct page * p)337 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
338 {
339 if (pool->use_dma_alloc) {
340 struct ttm_pool_dma *dma = (void *)p->private;
341
342 return dma->vaddr & ~PAGE_MASK;
343 }
344
345 return p->private;
346 }
347
348 /* Called when we got a page, either from a pool or newly allocated */
ttm_pool_page_allocated(struct ttm_pool * pool,unsigned int order,struct page * p,dma_addr_t ** dma_addr,unsigned long * num_pages,struct page *** pages)349 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
350 struct page *p, dma_addr_t **dma_addr,
351 unsigned long *num_pages,
352 struct page ***pages)
353 {
354 unsigned int i;
355 int r;
356
357 if (*dma_addr) {
358 r = ttm_pool_map(pool, order, p, dma_addr);
359 if (r)
360 return r;
361 }
362
363 *num_pages -= 1 << order;
364 for (i = 1 << order; i; --i, ++(*pages), ++p)
365 **pages = p;
366
367 return 0;
368 }
369
370 /**
371 * ttm_pool_alloc - Fill a ttm_tt object
372 *
373 * @pool: ttm_pool to use
374 * @tt: ttm_tt object to fill
375 * @ctx: operation context
376 *
377 * Fill the ttm_tt object with pages and also make sure to DMA map them when
378 * necessary.
379 *
380 * Returns: 0 on successe, negative error code otherwise.
381 */
ttm_pool_alloc(struct ttm_pool * pool,struct ttm_tt * tt,struct ttm_operation_ctx * ctx)382 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
383 struct ttm_operation_ctx *ctx)
384 {
385 unsigned long num_pages = tt->num_pages;
386 dma_addr_t *dma_addr = tt->dma_address;
387 struct page **caching = tt->pages;
388 struct page **pages = tt->pages;
389 gfp_t gfp_flags = GFP_USER;
390 unsigned int i, order;
391 struct page *p;
392 int r;
393
394 WARN_ON(!num_pages || ttm_tt_is_populated(tt));
395 WARN_ON(dma_addr && !pool->dev);
396
397 if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
398 gfp_flags |= __GFP_ZERO;
399
400 if (ctx->gfp_retry_mayfail)
401 gfp_flags |= __GFP_RETRY_MAYFAIL;
402
403 if (pool->use_dma32)
404 gfp_flags |= GFP_DMA32;
405 else
406 gfp_flags |= GFP_HIGHUSER;
407
408 for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
409 num_pages;
410 order = min_t(unsigned int, order, __fls(num_pages))) {
411 struct ttm_pool_type *pt;
412
413 pt = ttm_pool_select_type(pool, tt->caching, order);
414 p = pt ? ttm_pool_type_take(pt) : NULL;
415 if (p) {
416 r = ttm_pool_apply_caching(caching, pages,
417 tt->caching);
418 if (r)
419 goto error_free_page;
420
421 do {
422 r = ttm_pool_page_allocated(pool, order, p,
423 &dma_addr,
424 &num_pages,
425 &pages);
426 if (r)
427 goto error_free_page;
428
429 if (num_pages < (1 << order))
430 break;
431
432 p = ttm_pool_type_take(pt);
433 } while (p);
434 caching = pages;
435 }
436
437 while (num_pages >= (1 << order) &&
438 (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
439
440 if (PageHighMem(p)) {
441 r = ttm_pool_apply_caching(caching, pages,
442 tt->caching);
443 if (r)
444 goto error_free_page;
445 }
446 r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
447 &num_pages, &pages);
448 if (r)
449 goto error_free_page;
450 if (PageHighMem(p))
451 caching = pages;
452 }
453
454 if (!p) {
455 if (order) {
456 --order;
457 continue;
458 }
459 r = -ENOMEM;
460 goto error_free_all;
461 }
462 }
463
464 r = ttm_pool_apply_caching(caching, pages, tt->caching);
465 if (r)
466 goto error_free_all;
467
468 return 0;
469
470 error_free_page:
471 ttm_pool_free_page(pool, tt->caching, order, p);
472
473 error_free_all:
474 num_pages = tt->num_pages - num_pages;
475 for (i = 0; i < num_pages; ) {
476 order = ttm_pool_page_order(pool, tt->pages[i]);
477 ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
478 i += 1 << order;
479 }
480
481 return r;
482 }
483 EXPORT_SYMBOL(ttm_pool_alloc);
484
485 /**
486 * ttm_pool_free - Free the backing pages from a ttm_tt object
487 *
488 * @pool: Pool to give pages back to.
489 * @tt: ttm_tt object to unpopulate
490 *
491 * Give the packing pages back to a pool or free them
492 */
ttm_pool_free(struct ttm_pool * pool,struct ttm_tt * tt)493 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
494 {
495 unsigned int i;
496
497 for (i = 0; i < tt->num_pages; ) {
498 struct page *p = tt->pages[i];
499 unsigned int order, num_pages;
500 struct ttm_pool_type *pt;
501
502 order = ttm_pool_page_order(pool, p);
503 num_pages = 1ULL << order;
504 if (tt->dma_address)
505 ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
506
507 pt = ttm_pool_select_type(pool, tt->caching, order);
508 if (pt)
509 ttm_pool_type_give(pt, tt->pages[i]);
510 else
511 ttm_pool_free_page(pool, tt->caching, order,
512 tt->pages[i]);
513
514 i += num_pages;
515 }
516
517 while (atomic_long_read(&allocated_pages) > page_pool_size)
518 ttm_pool_shrink();
519 }
520 EXPORT_SYMBOL(ttm_pool_free);
521
522 /**
523 * ttm_pool_init - Initialize a pool
524 *
525 * @pool: the pool to initialize
526 * @dev: device for DMA allocations and mappings
527 * @use_dma_alloc: true if coherent DMA alloc should be used
528 * @use_dma32: true if GFP_DMA32 should be used
529 *
530 * Initialize the pool and its pool types.
531 */
ttm_pool_init(struct ttm_pool * pool,struct device * dev,bool use_dma_alloc,bool use_dma32)532 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
533 bool use_dma_alloc, bool use_dma32)
534 {
535 unsigned int i, j;
536
537 WARN_ON(!dev && use_dma_alloc);
538
539 pool->dev = dev;
540 pool->use_dma_alloc = use_dma_alloc;
541 pool->use_dma32 = use_dma32;
542
543 if (use_dma_alloc) {
544 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
545 for (j = 0; j < MAX_ORDER; ++j)
546 ttm_pool_type_init(&pool->caching[i].orders[j],
547 pool, i, j);
548 }
549 }
550
551 /**
552 * ttm_pool_fini - Cleanup a pool
553 *
554 * @pool: the pool to clean up
555 *
556 * Free all pages in the pool and unregister the types from the global
557 * shrinker.
558 */
ttm_pool_fini(struct ttm_pool * pool)559 void ttm_pool_fini(struct ttm_pool *pool)
560 {
561 unsigned int i, j;
562
563 if (pool->use_dma_alloc) {
564 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
565 for (j = 0; j < MAX_ORDER; ++j)
566 ttm_pool_type_fini(&pool->caching[i].orders[j]);
567 }
568
569 /* We removed the pool types from the LRU, but we need to also make sure
570 * that no shrinker is concurrently freeing pages from the pool.
571 */
572 synchronize_shrinkers();
573 }
574
575 /* As long as pages are available make sure to release at least one */
ttm_pool_shrinker_scan(struct shrinker * shrink,struct shrink_control * sc)576 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
577 struct shrink_control *sc)
578 {
579 unsigned long num_freed = 0;
580
581 do
582 num_freed += ttm_pool_shrink();
583 while (!num_freed && atomic_long_read(&allocated_pages));
584
585 return num_freed;
586 }
587
588 /* Return the number of pages available or SHRINK_EMPTY if we have none */
ttm_pool_shrinker_count(struct shrinker * shrink,struct shrink_control * sc)589 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
590 struct shrink_control *sc)
591 {
592 unsigned long num_pages = atomic_long_read(&allocated_pages);
593
594 return num_pages ? num_pages : SHRINK_EMPTY;
595 }
596
597 #ifdef CONFIG_DEBUG_FS
598 /* Count the number of pages available in a pool_type */
ttm_pool_type_count(struct ttm_pool_type * pt)599 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
600 {
601 unsigned int count = 0;
602 struct page *p;
603
604 spin_lock(&pt->lock);
605 /* Only used for debugfs, the overhead doesn't matter */
606 list_for_each_entry(p, &pt->pages, lru)
607 ++count;
608 spin_unlock(&pt->lock);
609
610 return count;
611 }
612
613 /* Print a nice header for the order */
ttm_pool_debugfs_header(struct seq_file * m)614 static void ttm_pool_debugfs_header(struct seq_file *m)
615 {
616 unsigned int i;
617
618 seq_puts(m, "\t ");
619 for (i = 0; i < MAX_ORDER; ++i)
620 seq_printf(m, " ---%2u---", i);
621 seq_puts(m, "\n");
622 }
623
624 /* Dump information about the different pool types */
ttm_pool_debugfs_orders(struct ttm_pool_type * pt,struct seq_file * m)625 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
626 struct seq_file *m)
627 {
628 unsigned int i;
629
630 for (i = 0; i < MAX_ORDER; ++i)
631 seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
632 seq_puts(m, "\n");
633 }
634
635 /* Dump the total amount of allocated pages */
ttm_pool_debugfs_footer(struct seq_file * m)636 static void ttm_pool_debugfs_footer(struct seq_file *m)
637 {
638 seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
639 atomic_long_read(&allocated_pages), page_pool_size);
640 }
641
642 /* Dump the information for the global pools */
ttm_pool_debugfs_globals_show(struct seq_file * m,void * data)643 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
644 {
645 ttm_pool_debugfs_header(m);
646
647 spin_lock(&shrinker_lock);
648 seq_puts(m, "wc\t:");
649 ttm_pool_debugfs_orders(global_write_combined, m);
650 seq_puts(m, "uc\t:");
651 ttm_pool_debugfs_orders(global_uncached, m);
652 seq_puts(m, "wc 32\t:");
653 ttm_pool_debugfs_orders(global_dma32_write_combined, m);
654 seq_puts(m, "uc 32\t:");
655 ttm_pool_debugfs_orders(global_dma32_uncached, m);
656 spin_unlock(&shrinker_lock);
657
658 ttm_pool_debugfs_footer(m);
659
660 return 0;
661 }
662 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
663
664 /**
665 * ttm_pool_debugfs - Debugfs dump function for a pool
666 *
667 * @pool: the pool to dump the information for
668 * @m: seq_file to dump to
669 *
670 * Make a debugfs dump with the per pool and global information.
671 */
ttm_pool_debugfs(struct ttm_pool * pool,struct seq_file * m)672 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
673 {
674 unsigned int i;
675
676 if (!pool->use_dma_alloc) {
677 seq_puts(m, "unused\n");
678 return 0;
679 }
680
681 ttm_pool_debugfs_header(m);
682
683 spin_lock(&shrinker_lock);
684 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
685 seq_puts(m, "DMA ");
686 switch (i) {
687 case ttm_cached:
688 seq_puts(m, "\t:");
689 break;
690 case ttm_write_combined:
691 seq_puts(m, "wc\t:");
692 break;
693 case ttm_uncached:
694 seq_puts(m, "uc\t:");
695 break;
696 }
697 ttm_pool_debugfs_orders(pool->caching[i].orders, m);
698 }
699 spin_unlock(&shrinker_lock);
700
701 ttm_pool_debugfs_footer(m);
702 return 0;
703 }
704 EXPORT_SYMBOL(ttm_pool_debugfs);
705
706 /* Test the shrinker functions and dump the result */
ttm_pool_debugfs_shrink_show(struct seq_file * m,void * data)707 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
708 {
709 struct shrink_control sc = { .gfp_mask = GFP_NOFS };
710
711 fs_reclaim_acquire(GFP_KERNEL);
712 seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
713 ttm_pool_shrinker_scan(&mm_shrinker, &sc));
714 fs_reclaim_release(GFP_KERNEL);
715
716 return 0;
717 }
718 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
719
720 #endif
721
722 /**
723 * ttm_pool_mgr_init - Initialize globals
724 *
725 * @num_pages: default number of pages
726 *
727 * Initialize the global locks and lists for the MM shrinker.
728 */
ttm_pool_mgr_init(unsigned long num_pages)729 int ttm_pool_mgr_init(unsigned long num_pages)
730 {
731 unsigned int i;
732
733 if (!page_pool_size)
734 page_pool_size = num_pages;
735
736 spin_lock_init(&shrinker_lock);
737 INIT_LIST_HEAD(&shrinker_list);
738
739 for (i = 0; i < MAX_ORDER; ++i) {
740 ttm_pool_type_init(&global_write_combined[i], NULL,
741 ttm_write_combined, i);
742 ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
743
744 ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
745 ttm_write_combined, i);
746 ttm_pool_type_init(&global_dma32_uncached[i], NULL,
747 ttm_uncached, i);
748 }
749
750 #ifdef CONFIG_DEBUG_FS
751 debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
752 &ttm_pool_debugfs_globals_fops);
753 debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
754 &ttm_pool_debugfs_shrink_fops);
755 #endif
756
757 mm_shrinker.count_objects = ttm_pool_shrinker_count;
758 mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
759 mm_shrinker.seeks = 1;
760 return register_shrinker(&mm_shrinker, "drm-ttm_pool");
761 }
762
763 /**
764 * ttm_pool_mgr_fini - Finalize globals
765 *
766 * Cleanup the global pools and unregister the MM shrinker.
767 */
ttm_pool_mgr_fini(void)768 void ttm_pool_mgr_fini(void)
769 {
770 unsigned int i;
771
772 for (i = 0; i < MAX_ORDER; ++i) {
773 ttm_pool_type_fini(&global_write_combined[i]);
774 ttm_pool_type_fini(&global_uncached[i]);
775
776 ttm_pool_type_fini(&global_dma32_write_combined[i]);
777 ttm_pool_type_fini(&global_dma32_uncached[i]);
778 }
779
780 unregister_shrinker(&mm_shrinker);
781 WARN_ON(!list_empty(&shrinker_list));
782 }
783