1 // SPDX-License-Identifier: MIT
2 /* Copyright © 2024 Intel Corporation */
3
4 #include <drm/drm_cache.h>
5 #include <drm/drm_gem.h>
6 #include <drm/drm_panic.h>
7
8 #include "intel_fb.h"
9 #include "intel_display_types.h"
10
11 #include "xe_bo.h"
12 #include "intel_bo.h"
13
intel_bo_is_tiled(struct drm_gem_object * obj)14 bool intel_bo_is_tiled(struct drm_gem_object *obj)
15 {
16 /* legacy tiling is unused */
17 return false;
18 }
19
intel_bo_is_userptr(struct drm_gem_object * obj)20 bool intel_bo_is_userptr(struct drm_gem_object *obj)
21 {
22 /* xe does not have userptr bos */
23 return false;
24 }
25
intel_bo_is_shmem(struct drm_gem_object * obj)26 bool intel_bo_is_shmem(struct drm_gem_object *obj)
27 {
28 return false;
29 }
30
intel_bo_is_protected(struct drm_gem_object * obj)31 bool intel_bo_is_protected(struct drm_gem_object *obj)
32 {
33 return xe_bo_is_protected(gem_to_xe_bo(obj));
34 }
35
intel_bo_flush_if_display(struct drm_gem_object * obj)36 void intel_bo_flush_if_display(struct drm_gem_object *obj)
37 {
38 }
39
intel_bo_fb_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)40 int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
41 {
42 return drm_gem_prime_mmap(obj, vma);
43 }
44
intel_bo_read_from_page(struct drm_gem_object * obj,u64 offset,void * dst,int size)45 int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
46 {
47 struct xe_bo *bo = gem_to_xe_bo(obj);
48
49 return xe_bo_read(bo, offset, dst, size);
50 }
51
intel_bo_get_frontbuffer(struct drm_gem_object * obj)52 struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj)
53 {
54 return NULL;
55 }
56
intel_bo_set_frontbuffer(struct drm_gem_object * obj,struct intel_frontbuffer * front)57 struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
58 struct intel_frontbuffer *front)
59 {
60 return front;
61 }
62
intel_bo_describe(struct seq_file * m,struct drm_gem_object * obj)63 void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
64 {
65 /* FIXME */
66 }
67
68 struct xe_panic_data {
69 struct page **pages;
70 int page;
71 void *vaddr;
72 };
73
74 struct xe_framebuffer {
75 struct intel_framebuffer base;
76 struct xe_panic_data panic;
77 };
78
to_xe_panic_data(struct intel_framebuffer * fb)79 static inline struct xe_panic_data *to_xe_panic_data(struct intel_framebuffer *fb)
80 {
81 return &container_of_const(fb, struct xe_framebuffer, base)->panic;
82 }
83
xe_panic_kunmap(struct xe_panic_data * panic)84 static void xe_panic_kunmap(struct xe_panic_data *panic)
85 {
86 if (panic->vaddr) {
87 drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
88 kunmap_local(panic->vaddr);
89 panic->vaddr = NULL;
90 }
91 }
92
93 /*
94 * The scanout buffer pages are not mapped, so for each pixel,
95 * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
96 * Try to keep the map from the previous pixel, to avoid too much map/unmap.
97 */
xe_panic_page_set_pixel(struct drm_scanout_buffer * sb,unsigned int x,unsigned int y,u32 color)98 static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
99 unsigned int y, u32 color)
100 {
101 struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
102 struct xe_panic_data *panic = to_xe_panic_data(fb);
103 struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
104 unsigned int new_page;
105 unsigned int offset;
106
107 if (fb->panic_tiling)
108 offset = fb->panic_tiling(sb->width, x, y);
109 else
110 offset = y * sb->pitch[0] + x * sb->format->cpp[0];
111
112 new_page = offset >> PAGE_SHIFT;
113 offset = offset % PAGE_SIZE;
114 if (new_page != panic->page) {
115 xe_panic_kunmap(panic);
116 panic->page = new_page;
117 panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm,
118 panic->page);
119 }
120 if (panic->vaddr) {
121 u32 *pix = panic->vaddr + offset;
122 *pix = color;
123 }
124 }
125
intel_bo_alloc_framebuffer(void)126 struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
127 {
128 struct xe_framebuffer *xe_fb;
129
130 xe_fb = kzalloc(sizeof(*xe_fb), GFP_KERNEL);
131 if (xe_fb)
132 return &xe_fb->base;
133 return NULL;
134 }
135
intel_bo_panic_setup(struct drm_scanout_buffer * sb)136 int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
137 {
138 struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
139 struct xe_panic_data *panic = to_xe_panic_data(fb);
140
141 panic->page = -1;
142 sb->set_pixel = xe_panic_page_set_pixel;
143 return 0;
144 }
145
intel_bo_panic_finish(struct intel_framebuffer * fb)146 void intel_bo_panic_finish(struct intel_framebuffer *fb)
147 {
148 struct xe_panic_data *panic = to_xe_panic_data(fb);
149
150 xe_panic_kunmap(panic);
151 panic->page = -1;
152 }
153