1 // SPDX-License-Identifier: GPL-2.0-only
2 /**************************************************************************
3 * Copyright (c) 2007-2011, Intel Corporation.
4 * All Rights Reserved.
5 *
6 **************************************************************************/
7
8 #include <linux/fb.h>
9
10 #include <drm/drm_crtc_helper.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_fb_helper.h>
13 #include <drm/drm_framebuffer.h>
14
15 #include "gem.h"
16 #include "psb_drv.h"
17
18 /*
19 * VM area struct
20 */
21
psb_fbdev_vm_fault(struct vm_fault * vmf)22 static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf)
23 {
24 struct vm_area_struct *vma = vmf->vma;
25 struct fb_info *info = vma->vm_private_data;
26 unsigned long address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
27 unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
28 vm_fault_t err = VM_FAULT_SIGBUS;
29 unsigned long page_num = vma_pages(vma);
30 unsigned long i;
31
32 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
33
34 for (i = 0; i < page_num; ++i) {
35 err = vmf_insert_mixed(vma, address, pfn);
36 if (unlikely(err & VM_FAULT_ERROR))
37 break;
38 address += PAGE_SIZE;
39 ++pfn;
40 }
41
42 return err;
43 }
44
45 static const struct vm_operations_struct psb_fbdev_vm_ops = {
46 .fault = psb_fbdev_vm_fault,
47 };
48
49 /*
50 * struct fb_ops
51 */
52
53 #define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
54
psb_fbdev_fb_setcolreg(unsigned int regno,unsigned int red,unsigned int green,unsigned int blue,unsigned int transp,struct fb_info * info)55 static int psb_fbdev_fb_setcolreg(unsigned int regno,
56 unsigned int red, unsigned int green,
57 unsigned int blue, unsigned int transp,
58 struct fb_info *info)
59 {
60 struct drm_fb_helper *fb_helper = info->par;
61 struct drm_framebuffer *fb = fb_helper->fb;
62 uint32_t v;
63
64 if (!fb)
65 return -ENOMEM;
66
67 if (regno > 255)
68 return 1;
69
70 red = CMAP_TOHW(red, info->var.red.length);
71 blue = CMAP_TOHW(blue, info->var.blue.length);
72 green = CMAP_TOHW(green, info->var.green.length);
73 transp = CMAP_TOHW(transp, info->var.transp.length);
74
75 v = (red << info->var.red.offset) |
76 (green << info->var.green.offset) |
77 (blue << info->var.blue.offset) |
78 (transp << info->var.transp.offset);
79
80 if (regno < 16) {
81 switch (fb->format->cpp[0] * 8) {
82 case 16:
83 ((uint32_t *) info->pseudo_palette)[regno] = v;
84 break;
85 case 24:
86 case 32:
87 ((uint32_t *) info->pseudo_palette)[regno] = v;
88 break;
89 }
90 }
91
92 return 0;
93 }
94
psb_fbdev_fb_mmap(struct fb_info * info,struct vm_area_struct * vma)95 static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
96 {
97 if (vma->vm_pgoff != 0)
98 return -EINVAL;
99 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
100 return -EINVAL;
101
102 /*
103 * If this is a GEM object then info->screen_base is the virtual
104 * kernel remapping of the object. FIXME: Review if this is
105 * suitable for our mmap work
106 */
107 vma->vm_ops = &psb_fbdev_vm_ops;
108 vma->vm_private_data = info;
109 vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
110
111 return 0;
112 }
113
psb_fbdev_fb_destroy(struct fb_info * info)114 static void psb_fbdev_fb_destroy(struct fb_info *info)
115 {
116 struct drm_fb_helper *fb_helper = info->par;
117 struct drm_framebuffer *fb = fb_helper->fb;
118 struct drm_gem_object *obj = fb->obj[0];
119
120 drm_fb_helper_fini(fb_helper);
121
122 drm_framebuffer_unregister_private(fb);
123 fb->obj[0] = NULL;
124 drm_framebuffer_cleanup(fb);
125 kfree(fb);
126
127 drm_gem_object_put(obj);
128
129 drm_client_release(&fb_helper->client);
130
131 drm_fb_helper_unprepare(fb_helper);
132 kfree(fb_helper);
133 }
134
135 static const struct fb_ops psb_fbdev_fb_ops = {
136 .owner = THIS_MODULE,
137 __FB_DEFAULT_IOMEM_OPS_RDWR,
138 DRM_FB_HELPER_DEFAULT_OPS,
139 .fb_setcolreg = psb_fbdev_fb_setcolreg,
140 __FB_DEFAULT_IOMEM_OPS_DRAW,
141 .fb_mmap = psb_fbdev_fb_mmap,
142 .fb_destroy = psb_fbdev_fb_destroy,
143 };
144
145 static const struct drm_fb_helper_funcs psb_fbdev_fb_helper_funcs = {
146 };
147
148 /*
149 * struct drm_driver
150 */
151
psb_fbdev_driver_fbdev_probe(struct drm_fb_helper * fb_helper,struct drm_fb_helper_surface_size * sizes)152 int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
153 struct drm_fb_helper_surface_size *sizes)
154 {
155 struct drm_device *dev = fb_helper->dev;
156 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
157 struct pci_dev *pdev = to_pci_dev(dev->dev);
158 struct fb_info *info;
159 struct drm_framebuffer *fb;
160 struct drm_mode_fb_cmd2 mode_cmd = { };
161 int size;
162 int ret;
163 struct psb_gem_object *backing;
164 struct drm_gem_object *obj;
165 u32 bpp, depth;
166
167 /* No 24-bit packed mode */
168 if (sizes->surface_bpp == 24) {
169 sizes->surface_bpp = 32;
170 sizes->surface_depth = 24;
171 }
172 bpp = sizes->surface_bpp;
173 depth = sizes->surface_depth;
174
175 /*
176 * If the mode does not fit in 32 bit then switch to 16 bit to get
177 * a console on full resolution. The X mode setting server will
178 * allocate its own 32-bit GEM framebuffer.
179 */
180 size = ALIGN(sizes->surface_width * DIV_ROUND_UP(bpp, 8), 64) *
181 sizes->surface_height;
182 size = ALIGN(size, PAGE_SIZE);
183
184 if (size > dev_priv->vram_stolen_size) {
185 sizes->surface_bpp = 16;
186 sizes->surface_depth = 16;
187 }
188 bpp = sizes->surface_bpp;
189 depth = sizes->surface_depth;
190
191 mode_cmd.width = sizes->surface_width;
192 mode_cmd.height = sizes->surface_height;
193 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(bpp, 8), 64);
194 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
195
196 size = mode_cmd.pitches[0] * mode_cmd.height;
197 size = ALIGN(size, PAGE_SIZE);
198
199 /* Allocate the framebuffer in the GTT with stolen page backing */
200 backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE);
201 if (IS_ERR(backing))
202 return PTR_ERR(backing);
203 obj = &backing->base;
204
205 fb = psb_framebuffer_create(dev,
206 drm_get_format_info(dev, mode_cmd.pixel_format,
207 mode_cmd.modifier[0]),
208 &mode_cmd, obj);
209 if (IS_ERR(fb)) {
210 ret = PTR_ERR(fb);
211 goto err_drm_gem_object_put;
212 }
213
214 fb_helper->funcs = &psb_fbdev_fb_helper_funcs;
215 fb_helper->fb = fb;
216
217 info = drm_fb_helper_alloc_info(fb_helper);
218 if (IS_ERR(info)) {
219 ret = PTR_ERR(info);
220 goto err_drm_framebuffer_unregister_private;
221 }
222
223 info->fbops = &psb_fbdev_fb_ops;
224
225 /* Accessed stolen memory directly */
226 info->screen_base = dev_priv->vram_addr + backing->offset;
227 info->screen_size = size;
228
229 drm_fb_helper_fill_info(info, fb_helper, sizes);
230
231 info->fix.smem_start = dev_priv->stolen_base + backing->offset;
232 info->fix.smem_len = size;
233 info->fix.ywrapstep = 0;
234 info->fix.ypanstep = 0;
235 info->fix.mmio_start = pci_resource_start(pdev, 0);
236 info->fix.mmio_len = pci_resource_len(pdev, 0);
237
238 fb_memset_io(info->screen_base, 0, info->screen_size);
239
240 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
241
242 dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
243
244 return 0;
245
246 err_drm_framebuffer_unregister_private:
247 drm_framebuffer_unregister_private(fb);
248 fb->obj[0] = NULL;
249 drm_framebuffer_cleanup(fb);
250 kfree(fb);
251 err_drm_gem_object_put:
252 drm_gem_object_put(obj);
253 return ret;
254 }
255