1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 
26 #include <linux/io-mapping.h>
27 #include <linux/pci.h>
28 
29 #include <drm/drm_drv.h>
30 #include <drm/drm_managed.h>
31 #include <drm/drm_probe_helper.h>
32 
33 #include "qxl_drv.h"
34 #include "qxl_object.h"
35 
qxl_check_device(struct qxl_device * qdev)36 static bool qxl_check_device(struct qxl_device *qdev)
37 {
38 	struct qxl_rom *rom = qdev->rom;
39 
40 	if (rom->magic != 0x4f525851) {
41 		DRM_ERROR("bad rom signature %x\n", rom->magic);
42 		return false;
43 	}
44 
45 	DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
46 	DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
47 		 rom->log_level);
48 	DRM_INFO("%d io pages at offset 0x%x\n",
49 		 rom->num_io_pages, rom->pages_offset);
50 	DRM_INFO("%d byte draw area at offset 0x%x\n",
51 		 rom->surface0_area_size, rom->draw_area_offset);
52 
53 	qdev->vram_size = rom->surface0_area_size;
54 	DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
55 	return true;
56 }
57 
setup_hw_slot(struct qxl_device * qdev,struct qxl_memslot * slot)58 static void setup_hw_slot(struct qxl_device *qdev, struct qxl_memslot *slot)
59 {
60 	qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
61 	qdev->ram_header->mem_slot.mem_end = slot->start_phys_addr + slot->size;
62 	qxl_io_memslot_add(qdev, qdev->rom->slots_start + slot->index);
63 }
64 
setup_slot(struct qxl_device * qdev,struct qxl_memslot * slot,unsigned int slot_index,const char * slot_name,unsigned long start_phys_addr,unsigned long size)65 static void setup_slot(struct qxl_device *qdev,
66 		       struct qxl_memslot *slot,
67 		       unsigned int slot_index,
68 		       const char *slot_name,
69 		       unsigned long start_phys_addr,
70 		       unsigned long size)
71 {
72 	uint64_t high_bits;
73 
74 	slot->index = slot_index;
75 	slot->name = slot_name;
76 	slot->start_phys_addr = start_phys_addr;
77 	slot->size = size;
78 
79 	setup_hw_slot(qdev, slot);
80 
81 	slot->generation = qdev->rom->slot_generation;
82 	high_bits = (qdev->rom->slots_start + slot->index)
83 		<< qdev->rom->slot_gen_bits;
84 	high_bits |= slot->generation;
85 	high_bits <<= (64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits));
86 	slot->high_bits = high_bits;
87 
88 	DRM_INFO("slot %d (%s): base 0x%08lx, size 0x%08lx\n",
89 		 slot->index, slot->name,
90 		 (unsigned long)slot->start_phys_addr,
91 		 (unsigned long)slot->size);
92 }
93 
qxl_reinit_memslots(struct qxl_device * qdev)94 void qxl_reinit_memslots(struct qxl_device *qdev)
95 {
96 	setup_hw_slot(qdev, &qdev->main_slot);
97 	setup_hw_slot(qdev, &qdev->surfaces_slot);
98 }
99 
qxl_gc_work(struct work_struct * work)100 static void qxl_gc_work(struct work_struct *work)
101 {
102 	struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
103 
104 	qxl_garbage_collect(qdev);
105 }
106 
qxl_device_init(struct qxl_device * qdev,struct pci_dev * pdev)107 int qxl_device_init(struct qxl_device *qdev,
108 		    struct pci_dev *pdev)
109 {
110 	int r, sb;
111 
112 	pci_set_drvdata(pdev, &qdev->ddev);
113 
114 	mutex_init(&qdev->gem.mutex);
115 	mutex_init(&qdev->update_area_mutex);
116 	mutex_init(&qdev->release_mutex);
117 	mutex_init(&qdev->surf_evict_mutex);
118 	qxl_gem_init(qdev);
119 
120 	qdev->rom_base = pci_resource_start(pdev, 2);
121 	qdev->rom_size = pci_resource_len(pdev, 2);
122 	qdev->vram_base = pci_resource_start(pdev, 0);
123 	qdev->io_base = pci_resource_start(pdev, 3);
124 
125 	qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
126 	if (!qdev->vram_mapping) {
127 		pr_err("Unable to create vram_mapping");
128 		return -ENOMEM;
129 	}
130 
131 	if (pci_resource_len(pdev, 4) > 0) {
132 		/* 64bit surface bar present */
133 		sb = 4;
134 		qdev->surfaceram_base = pci_resource_start(pdev, sb);
135 		qdev->surfaceram_size = pci_resource_len(pdev, sb);
136 		qdev->surface_mapping =
137 			io_mapping_create_wc(qdev->surfaceram_base,
138 					     qdev->surfaceram_size);
139 	}
140 	if (qdev->surface_mapping == NULL) {
141 		/* 64bit surface bar not present (or mapping failed) */
142 		sb = 1;
143 		qdev->surfaceram_base = pci_resource_start(pdev, sb);
144 		qdev->surfaceram_size = pci_resource_len(pdev, sb);
145 		qdev->surface_mapping =
146 			io_mapping_create_wc(qdev->surfaceram_base,
147 					     qdev->surfaceram_size);
148 		if (!qdev->surface_mapping) {
149 			pr_err("Unable to create surface_mapping");
150 			r = -ENOMEM;
151 			goto vram_mapping_free;
152 		}
153 	}
154 
155 	DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
156 		 (unsigned long long)qdev->vram_base,
157 		 (unsigned long long)pci_resource_end(pdev, 0),
158 		 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
159 		 (int)pci_resource_len(pdev, 0) / 1024,
160 		 (unsigned long long)qdev->surfaceram_base,
161 		 (unsigned long long)pci_resource_end(pdev, sb),
162 		 (int)qdev->surfaceram_size / 1024 / 1024,
163 		 (int)qdev->surfaceram_size / 1024,
164 		 (sb == 4) ? "64bit" : "32bit");
165 
166 	qdev->rom = ioremap_wc(qdev->rom_base, qdev->rom_size);
167 	if (!qdev->rom) {
168 		pr_err("Unable to ioremap ROM\n");
169 		r = -ENOMEM;
170 		goto surface_mapping_free;
171 	}
172 
173 	if (!qxl_check_device(qdev)) {
174 		r = -ENODEV;
175 		goto rom_unmap;
176 	}
177 
178 	r = qxl_bo_init(qdev);
179 	if (r) {
180 		DRM_ERROR("bo init failed %d\n", r);
181 		goto rom_unmap;
182 	}
183 
184 	qdev->ram_header = ioremap_wc(qdev->vram_base +
185 				   qdev->rom->ram_header_offset,
186 				   sizeof(*qdev->ram_header));
187 	if (!qdev->ram_header) {
188 		DRM_ERROR("Unable to ioremap RAM header\n");
189 		r = -ENOMEM;
190 		goto bo_fini;
191 	}
192 
193 	qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
194 					     sizeof(struct qxl_command),
195 					     QXL_COMMAND_RING_SIZE,
196 					     qdev->io_base + QXL_IO_NOTIFY_CMD,
197 					     &qdev->display_event);
198 	if (!qdev->command_ring) {
199 		DRM_ERROR("Unable to create command ring\n");
200 		r = -ENOMEM;
201 		goto ram_header_unmap;
202 	}
203 
204 	qdev->cursor_ring = qxl_ring_create(
205 				&(qdev->ram_header->cursor_ring_hdr),
206 				sizeof(struct qxl_command),
207 				QXL_CURSOR_RING_SIZE,
208 				qdev->io_base + QXL_IO_NOTIFY_CURSOR,
209 				&qdev->cursor_event);
210 
211 	if (!qdev->cursor_ring) {
212 		DRM_ERROR("Unable to create cursor ring\n");
213 		r = -ENOMEM;
214 		goto command_ring_free;
215 	}
216 
217 	qdev->release_ring = qxl_ring_create(
218 				&(qdev->ram_header->release_ring_hdr),
219 				sizeof(uint64_t),
220 				QXL_RELEASE_RING_SIZE, 0,
221 				NULL);
222 
223 	if (!qdev->release_ring) {
224 		DRM_ERROR("Unable to create release ring\n");
225 		r = -ENOMEM;
226 		goto cursor_ring_free;
227 	}
228 
229 	idr_init_base(&qdev->release_idr, 1);
230 	spin_lock_init(&qdev->release_idr_lock);
231 	spin_lock_init(&qdev->release_lock);
232 
233 	idr_init_base(&qdev->surf_id_idr, 1);
234 	spin_lock_init(&qdev->surf_id_idr_lock);
235 
236 	mutex_init(&qdev->async_io_mutex);
237 
238 	/* reset the device into a known state - no memslots, no primary
239 	 * created, no surfaces. */
240 	qxl_io_reset(qdev);
241 
242 	/* must initialize irq before first async io - slot creation */
243 	r = qxl_irq_init(qdev);
244 	if (r) {
245 		DRM_ERROR("Unable to init qxl irq\n");
246 		goto release_ring_free;
247 	}
248 
249 	/*
250 	 * Note that virtual is surface0. We rely on the single ioremap done
251 	 * before.
252 	 */
253 	setup_slot(qdev, &qdev->main_slot, 0, "main",
254 		   (unsigned long)qdev->vram_base,
255 		   (unsigned long)qdev->rom->ram_header_offset);
256 	setup_slot(qdev, &qdev->surfaces_slot, 1, "surfaces",
257 		   (unsigned long)qdev->surfaceram_base,
258 		   (unsigned long)qdev->surfaceram_size);
259 
260 	INIT_WORK(&qdev->gc_work, qxl_gc_work);
261 
262 	return 0;
263 
264 release_ring_free:
265 	qxl_ring_free(qdev->release_ring);
266 cursor_ring_free:
267 	qxl_ring_free(qdev->cursor_ring);
268 command_ring_free:
269 	qxl_ring_free(qdev->command_ring);
270 ram_header_unmap:
271 	iounmap(qdev->ram_header);
272 bo_fini:
273 	qxl_bo_fini(qdev);
274 rom_unmap:
275 	iounmap(qdev->rom);
276 surface_mapping_free:
277 	io_mapping_free(qdev->surface_mapping);
278 vram_mapping_free:
279 	io_mapping_free(qdev->vram_mapping);
280 	return r;
281 }
282 
qxl_device_fini(struct qxl_device * qdev)283 void qxl_device_fini(struct qxl_device *qdev)
284 {
285 	int cur_idx;
286 
287 	/* check if qxl_device_init() was successful (gc_work is initialized last) */
288 	if (!qdev->gc_work.func)
289 		return;
290 
291 	for (cur_idx = 0; cur_idx < 3; cur_idx++) {
292 		if (!qdev->current_release_bo[cur_idx])
293 			continue;
294 		qxl_bo_unpin(qdev->current_release_bo[cur_idx]);
295 		qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
296 		qdev->current_release_bo_offset[cur_idx] = 0;
297 		qdev->current_release_bo[cur_idx] = NULL;
298 	}
299 
300 	/*
301 	 * Ask host to release resources (+fill release ring),
302 	 * then wait for the release actually happening.
303 	 */
304 	qxl_io_notify_oom(qdev);
305 	wait_event_timeout(qdev->release_event,
306 			   atomic_read(&qdev->release_count) == 0,
307 			   HZ);
308 	flush_work(&qdev->gc_work);
309 	qxl_surf_evict(qdev);
310 	qxl_vram_evict(qdev);
311 
312 	qxl_gem_fini(qdev);
313 	qxl_bo_fini(qdev);
314 	qxl_ring_free(qdev->command_ring);
315 	qxl_ring_free(qdev->cursor_ring);
316 	qxl_ring_free(qdev->release_ring);
317 	io_mapping_free(qdev->surface_mapping);
318 	io_mapping_free(qdev->vram_mapping);
319 	iounmap(qdev->ram_header);
320 	iounmap(qdev->rom);
321 	qdev->rom = NULL;
322 }
323