1 /* SPDX-License-Identifier: MIT
2 *
3 * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
4 */
5 #include "engine.h"
6 #include "gpu.h"
7
8 #include <core/object.h>
9 #include <engine/fifo/chan.h>
10
11 struct nvkm_rm_engine {
12 struct nvkm_engine engine;
13
14 struct nvkm_engine_func func;
15 };
16
17 struct nvkm_rm_engine_obj {
18 struct nvkm_object object;
19 struct nvkm_gsp_object rm;
20 };
21
22 static void*
nvkm_rm_engine_obj_dtor(struct nvkm_object * object)23 nvkm_rm_engine_obj_dtor(struct nvkm_object *object)
24 {
25 struct nvkm_rm_engine_obj *obj = container_of(object, typeof(*obj), object);
26
27 nvkm_gsp_rm_free(&obj->rm);
28 return obj;
29 }
30
31 static const struct nvkm_object_func
32 nvkm_rm_engine_obj = {
33 .dtor = nvkm_rm_engine_obj_dtor,
34 };
35
36 int
nvkm_rm_engine_obj_new(struct nvkm_gsp_object * chan,int chid,const struct nvkm_oclass * oclass,struct nvkm_object ** pobject)37 nvkm_rm_engine_obj_new(struct nvkm_gsp_object *chan, int chid, const struct nvkm_oclass *oclass,
38 struct nvkm_object **pobject)
39 {
40 struct nvkm_rm *rm = chan->client->gsp->rm;
41 const int inst = oclass->engine->subdev.inst;
42 const u32 class = oclass->base.oclass;
43 const u32 handle = oclass->handle;
44 struct nvkm_rm_engine_obj *obj;
45 int ret;
46
47 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
48 if (!obj)
49 return -ENOMEM;
50
51 switch (oclass->engine->subdev.type) {
52 case NVKM_ENGINE_CE:
53 ret = rm->api->ce->alloc(chan, handle, class, inst, &obj->rm);
54 break;
55 case NVKM_ENGINE_GR:
56 ret = nvkm_gsp_rm_alloc(chan, handle, class, 0, &obj->rm);
57 break;
58 case NVKM_ENGINE_NVDEC:
59 ret = rm->api->nvdec->alloc(chan, handle, class, inst, &obj->rm);
60 break;
61 case NVKM_ENGINE_NVENC:
62 ret = rm->api->nvenc->alloc(chan, handle, class, inst, &obj->rm);
63 break;
64 case NVKM_ENGINE_NVJPG:
65 ret = rm->api->nvjpg->alloc(chan, handle, class, inst, &obj->rm);
66 break;
67 case NVKM_ENGINE_OFA:
68 ret = rm->api->ofa->alloc(chan, handle, class, inst, &obj->rm);
69 break;
70 default:
71 ret = -EINVAL;
72 WARN_ON(1);
73 break;
74 }
75
76 if (ret) {
77 kfree(obj);
78 return ret;
79 }
80
81 nvkm_object_ctor(&nvkm_rm_engine_obj, oclass, &obj->object);
82 *pobject = &obj->object;
83 return 0;
84 }
85
86 static int
nvkm_rm_engine_obj_ctor(const struct nvkm_oclass * oclass,void * argv,u32 argc,struct nvkm_object ** pobject)87 nvkm_rm_engine_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
88 struct nvkm_object **pobject)
89 {
90 struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
91
92 return nvkm_rm_engine_obj_new(&chan->rm.object, chan->id, oclass, pobject);
93 }
94
95 static void *
nvkm_rm_engine_dtor(struct nvkm_engine * engine)96 nvkm_rm_engine_dtor(struct nvkm_engine *engine)
97 {
98 kfree(engine->func);
99 return engine;
100 }
101
102 int
nvkm_rm_engine_ctor(void * (* dtor)(struct nvkm_engine *),struct nvkm_rm * rm,enum nvkm_subdev_type type,int inst,const u32 * class,int nclass,struct nvkm_engine * engine)103 nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *rm,
104 enum nvkm_subdev_type type, int inst,
105 const u32 *class, int nclass, struct nvkm_engine *engine)
106 {
107 struct nvkm_engine_func *func;
108
109 func = kzalloc(struct_size(func, sclass, nclass + 1), GFP_KERNEL);
110 if (!func)
111 return -ENOMEM;
112
113 func->dtor = dtor;
114
115 for (int i = 0; i < nclass; i++) {
116 func->sclass[i].oclass = class[i];
117 func->sclass[i].minver = -1;
118 func->sclass[i].maxver = 0;
119 func->sclass[i].ctor = nvkm_rm_engine_obj_ctor;
120 }
121
122 nvkm_engine_ctor(func, rm->device, type, inst, true, engine);
123 return 0;
124 }
125
126 static int
nvkm_rm_engine_new_(struct nvkm_rm * rm,enum nvkm_subdev_type type,int inst,u32 class,struct nvkm_engine ** pengine)127 nvkm_rm_engine_new_(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst, u32 class,
128 struct nvkm_engine **pengine)
129 {
130 struct nvkm_engine *engine;
131 int ret;
132
133 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
134 if (!engine)
135 return -ENOMEM;
136
137 ret = nvkm_rm_engine_ctor(nvkm_rm_engine_dtor, rm, type, inst, &class, 1, engine);
138 if (ret) {
139 kfree(engine);
140 return ret;
141 }
142
143 *pengine = engine;
144 return 0;
145 }
146
147 int
nvkm_rm_engine_new(struct nvkm_rm * rm,enum nvkm_subdev_type type,int inst)148 nvkm_rm_engine_new(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst)
149 {
150 const struct nvkm_rm_gpu *gpu = rm->gpu;
151 struct nvkm_device *device = rm->device;
152
153 switch (type) {
154 case NVKM_ENGINE_CE:
155 if (WARN_ON(inst >= ARRAY_SIZE(device->ce)))
156 return -EINVAL;
157
158 return nvkm_rm_engine_new_(rm, type, inst, gpu->ce.class, &device->ce[inst]);
159 case NVKM_ENGINE_GR:
160 if (inst != 0)
161 return -ENODEV; /* MiG not supported, just ignore. */
162
163 return nvkm_rm_gr_new(rm);
164 case NVKM_ENGINE_NVDEC:
165 if (WARN_ON(inst >= ARRAY_SIZE(device->nvdec)))
166 return -EINVAL;
167
168 return nvkm_rm_nvdec_new(rm, inst);
169 case NVKM_ENGINE_NVENC:
170 if (WARN_ON(inst >= ARRAY_SIZE(device->nvenc)))
171 return -EINVAL;
172
173 return nvkm_rm_nvenc_new(rm, inst);
174 case NVKM_ENGINE_NVJPG:
175 if (WARN_ON(inst >= ARRAY_SIZE(device->nvjpg)))
176 return -EINVAL;
177
178 return nvkm_rm_engine_new_(rm, type, inst, gpu->nvjpg.class, &device->nvjpg[inst]);
179 case NVKM_ENGINE_OFA:
180 if (WARN_ON(inst >= ARRAY_SIZE(device->ofa)))
181 return -EINVAL;
182
183 return nvkm_rm_engine_new_(rm, type, inst, gpu->ofa.class, &device->ofa[inst]);
184 default:
185 break;
186 }
187
188 return -ENODEV;
189 }
190