1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv31.h"
25 
26 #include <core/client.h>
27 #include <core/gpuobj.h>
28 #include <subdev/fb.h>
29 #include <subdev/timer.h>
30 #include <engine/fifo.h>
31 
32 #include <nvif/class.h>
33 
34 /*******************************************************************************
35  * MPEG object classes
36  ******************************************************************************/
37 
38 static int
nv31_mpeg_object_bind(struct nvkm_object * object,struct nvkm_gpuobj * parent,int align,struct nvkm_gpuobj ** pgpuobj)39 nv31_mpeg_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
40 		      int align, struct nvkm_gpuobj **pgpuobj)
41 {
42 	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align,
43 				  false, parent, pgpuobj);
44 	if (ret == 0) {
45 		nvkm_kmap(*pgpuobj);
46 		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
47 		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
48 		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
49 		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
50 		nvkm_done(*pgpuobj);
51 	}
52 	return ret;
53 }
54 
55 const struct nvkm_object_func
56 nv31_mpeg_object = {
57 	.bind = nv31_mpeg_object_bind,
58 };
59 
60 /*******************************************************************************
61  * PMPEG context
62  ******************************************************************************/
63 
64 static void *
nv31_mpeg_chan_dtor(struct nvkm_object * object)65 nv31_mpeg_chan_dtor(struct nvkm_object *object)
66 {
67 	struct nv31_mpeg_chan *chan = nv31_mpeg_chan(object);
68 	struct nv31_mpeg *mpeg = chan->mpeg;
69 	unsigned long flags;
70 
71 	spin_lock_irqsave(&mpeg->engine.lock, flags);
72 	if (mpeg->chan == chan)
73 		mpeg->chan = NULL;
74 	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
75 	return chan;
76 }
77 
78 static const struct nvkm_object_func
79 nv31_mpeg_chan = {
80 	.dtor = nv31_mpeg_chan_dtor,
81 };
82 
83 int
nv31_mpeg_chan_new(struct nvkm_fifo_chan * fifoch,const struct nvkm_oclass * oclass,struct nvkm_object ** pobject)84 nv31_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
85 		   const struct nvkm_oclass *oclass,
86 		   struct nvkm_object **pobject)
87 {
88 	struct nv31_mpeg *mpeg = nv31_mpeg(oclass->engine);
89 	struct nv31_mpeg_chan *chan;
90 	unsigned long flags;
91 	int ret = -EBUSY;
92 
93 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
94 		return -ENOMEM;
95 	nvkm_object_ctor(&nv31_mpeg_chan, oclass, &chan->object);
96 	chan->mpeg = mpeg;
97 	chan->fifo = fifoch;
98 	*pobject = &chan->object;
99 
100 	spin_lock_irqsave(&mpeg->engine.lock, flags);
101 	if (!mpeg->chan) {
102 		mpeg->chan = chan;
103 		ret = 0;
104 	}
105 	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
106 	return ret;
107 }
108 
109 /*******************************************************************************
110  * PMPEG engine/subdev functions
111  ******************************************************************************/
112 
113 void
nv31_mpeg_tile(struct nvkm_engine * engine,int i,struct nvkm_fb_tile * tile)114 nv31_mpeg_tile(struct nvkm_engine *engine, int i, struct nvkm_fb_tile *tile)
115 {
116 	struct nv31_mpeg *mpeg = nv31_mpeg(engine);
117 	struct nvkm_device *device = mpeg->engine.subdev.device;
118 
119 	nvkm_wr32(device, 0x00b008 + (i * 0x10), tile->pitch);
120 	nvkm_wr32(device, 0x00b004 + (i * 0x10), tile->limit);
121 	nvkm_wr32(device, 0x00b000 + (i * 0x10), tile->addr);
122 }
123 
124 static bool
nv31_mpeg_mthd_dma(struct nvkm_device * device,u32 mthd,u32 data)125 nv31_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
126 {
127 	struct nv31_mpeg *mpeg = nv31_mpeg(device->mpeg);
128 	struct nvkm_subdev *subdev = &mpeg->engine.subdev;
129 	u32 inst = data << 4;
130 	u32 dma0 = nvkm_rd32(device, 0x700000 + inst);
131 	u32 dma1 = nvkm_rd32(device, 0x700004 + inst);
132 	u32 dma2 = nvkm_rd32(device, 0x700008 + inst);
133 	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
134 	u32 size = dma1 + 1;
135 
136 	/* only allow linear DMA objects */
137 	if (!(dma0 & 0x00002000)) {
138 		nvkm_error(subdev, "inst %08x dma0 %08x dma1 %08x dma2 %08x\n",
139 			   inst, dma0, dma1, dma2);
140 		return false;
141 	}
142 
143 	if (mthd == 0x0190) {
144 		/* DMA_CMD */
145 		nvkm_mask(device, 0x00b300, 0x00010000,
146 				  (dma0 & 0x00030000) ? 0x00010000 : 0);
147 		nvkm_wr32(device, 0x00b334, base);
148 		nvkm_wr32(device, 0x00b324, size);
149 	} else
150 	if (mthd == 0x01a0) {
151 		/* DMA_DATA */
152 		nvkm_mask(device, 0x00b300, 0x00020000,
153 				  (dma0 & 0x00030000) ? 0x00020000 : 0);
154 		nvkm_wr32(device, 0x00b360, base);
155 		nvkm_wr32(device, 0x00b364, size);
156 	} else {
157 		/* DMA_IMAGE, VRAM only */
158 		if (dma0 & 0x00030000)
159 			return false;
160 
161 		nvkm_wr32(device, 0x00b370, base);
162 		nvkm_wr32(device, 0x00b374, size);
163 	}
164 
165 	return true;
166 }
167 
168 static bool
nv31_mpeg_mthd(struct nv31_mpeg * mpeg,u32 mthd,u32 data)169 nv31_mpeg_mthd(struct nv31_mpeg *mpeg, u32 mthd, u32 data)
170 {
171 	struct nvkm_device *device = mpeg->engine.subdev.device;
172 	switch (mthd) {
173 	case 0x190:
174 	case 0x1a0:
175 	case 0x1b0:
176 		return mpeg->func->mthd_dma(device, mthd, data);
177 	default:
178 		break;
179 	}
180 	return false;
181 }
182 
183 static void
nv31_mpeg_intr(struct nvkm_engine * engine)184 nv31_mpeg_intr(struct nvkm_engine *engine)
185 {
186 	struct nv31_mpeg *mpeg = nv31_mpeg(engine);
187 	struct nvkm_subdev *subdev = &mpeg->engine.subdev;
188 	struct nvkm_device *device = subdev->device;
189 	u32 stat = nvkm_rd32(device, 0x00b100);
190 	u32 type = nvkm_rd32(device, 0x00b230);
191 	u32 mthd = nvkm_rd32(device, 0x00b234);
192 	u32 data = nvkm_rd32(device, 0x00b238);
193 	u32 show = stat;
194 	unsigned long flags;
195 
196 	spin_lock_irqsave(&mpeg->engine.lock, flags);
197 
198 	if (stat & 0x01000000) {
199 		/* happens on initial binding of the object */
200 		if (type == 0x00000020 && mthd == 0x0000) {
201 			nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000);
202 			show &= ~0x01000000;
203 		}
204 
205 		if (type == 0x00000010) {
206 			if (nv31_mpeg_mthd(mpeg, mthd, data))
207 				show &= ~0x01000000;
208 		}
209 	}
210 
211 	nvkm_wr32(device, 0x00b100, stat);
212 	nvkm_wr32(device, 0x00b230, 0x00000001);
213 
214 	if (show) {
215 		nvkm_error(subdev, "ch %d [%s] %08x %08x %08x %08x\n",
216 			   mpeg->chan ? mpeg->chan->fifo->id : -1,
217 			   mpeg->chan ? mpeg->chan->fifo->name :
218 			   "unknown", stat, type, mthd, data);
219 	}
220 
221 	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
222 }
223 
224 int
nv31_mpeg_init(struct nvkm_engine * mpeg)225 nv31_mpeg_init(struct nvkm_engine *mpeg)
226 {
227 	struct nvkm_subdev *subdev = &mpeg->subdev;
228 	struct nvkm_device *device = subdev->device;
229 
230 	/* VPE init */
231 	nvkm_wr32(device, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
232 	nvkm_wr32(device, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
233 
234 	/* PMPEG init */
235 	nvkm_wr32(device, 0x00b32c, 0x00000000);
236 	nvkm_wr32(device, 0x00b314, 0x00000100);
237 	nvkm_wr32(device, 0x00b220, 0x00000031);
238 	nvkm_wr32(device, 0x00b300, 0x02001ec1);
239 	nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
240 
241 	nvkm_wr32(device, 0x00b100, 0xffffffff);
242 	nvkm_wr32(device, 0x00b140, 0xffffffff);
243 
244 	if (nvkm_msec(device, 2000,
245 		if (!(nvkm_rd32(device, 0x00b200) & 0x00000001))
246 			break;
247 	) < 0) {
248 		nvkm_error(subdev, "timeout %08x\n",
249 			   nvkm_rd32(device, 0x00b200));
250 		return -EBUSY;
251 	}
252 
253 	return 0;
254 }
255 
256 static void *
nv31_mpeg_dtor(struct nvkm_engine * engine)257 nv31_mpeg_dtor(struct nvkm_engine *engine)
258 {
259 	return nv31_mpeg(engine);
260 }
261 
262 static const struct nvkm_engine_func
263 nv31_mpeg_ = {
264 	.dtor = nv31_mpeg_dtor,
265 	.init = nv31_mpeg_init,
266 	.intr = nv31_mpeg_intr,
267 	.tile = nv31_mpeg_tile,
268 	.fifo.cclass = nv31_mpeg_chan_new,
269 	.sclass = {
270 		{ -1, -1, NV31_MPEG, &nv31_mpeg_object },
271 		{}
272 	}
273 };
274 
275 int
nv31_mpeg_new_(const struct nv31_mpeg_func * func,struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_engine ** pmpeg)276 nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device,
277 	       enum nvkm_subdev_type type, int inst, struct nvkm_engine **pmpeg)
278 {
279 	struct nv31_mpeg *mpeg;
280 
281 	if (!(mpeg = kzalloc(sizeof(*mpeg), GFP_KERNEL)))
282 		return -ENOMEM;
283 	mpeg->func = func;
284 	*pmpeg = &mpeg->engine;
285 
286 	return nvkm_engine_ctor(&nv31_mpeg_, device, type, inst, true, &mpeg->engine);
287 }
288 
289 static const struct nv31_mpeg_func
290 nv31_mpeg = {
291 	.mthd_dma = nv31_mpeg_mthd_dma,
292 };
293 
294 int
nv31_mpeg_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_engine ** pmpeg)295 nv31_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
296 	      struct nvkm_engine **pmpeg)
297 {
298 	return nv31_mpeg_new_(&nv31_mpeg, device, type, inst, pmpeg);
299 }
300