1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "gt/intel_engine_pm.h"
7 #include "gt/intel_gpu_commands.h"
8 #include "gt/intel_gt.h"
9 #include "gt/intel_ring.h"
10 #include "intel_gsc_fw.h"
11
12 #define GSC_FW_STATUS_REG _MMIO(0x116C40)
13 #define GSC_FW_CURRENT_STATE REG_GENMASK(3, 0)
14 #define GSC_FW_CURRENT_STATE_RESET 0
15 #define GSC_FW_INIT_COMPLETE_BIT REG_BIT(9)
16
gsc_is_in_reset(struct intel_uncore * uncore)17 static bool gsc_is_in_reset(struct intel_uncore *uncore)
18 {
19 u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
20
21 return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) ==
22 GSC_FW_CURRENT_STATE_RESET;
23 }
24
intel_gsc_uc_fw_init_done(struct intel_gsc_uc * gsc)25 bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc)
26 {
27 struct intel_uncore *uncore = gsc_uc_to_gt(gsc)->uncore;
28 u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
29
30 return fw_status & GSC_FW_INIT_COMPLETE_BIT;
31 }
32
emit_gsc_fw_load(struct i915_request * rq,struct intel_gsc_uc * gsc)33 static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc)
34 {
35 u32 offset = i915_ggtt_offset(gsc->local);
36 u32 *cs;
37
38 cs = intel_ring_begin(rq, 4);
39 if (IS_ERR(cs))
40 return PTR_ERR(cs);
41
42 *cs++ = GSC_FW_LOAD;
43 *cs++ = lower_32_bits(offset);
44 *cs++ = upper_32_bits(offset);
45 *cs++ = (gsc->local->size / SZ_4K) | HECI1_FW_LIMIT_VALID;
46
47 intel_ring_advance(rq, cs);
48
49 return 0;
50 }
51
gsc_fw_load(struct intel_gsc_uc * gsc)52 static int gsc_fw_load(struct intel_gsc_uc *gsc)
53 {
54 struct intel_context *ce = gsc->ce;
55 struct i915_request *rq;
56 int err;
57
58 if (!ce)
59 return -ENODEV;
60
61 rq = i915_request_create(ce);
62 if (IS_ERR(rq))
63 return PTR_ERR(rq);
64
65 if (ce->engine->emit_init_breadcrumb) {
66 err = ce->engine->emit_init_breadcrumb(rq);
67 if (err)
68 goto out_rq;
69 }
70
71 err = emit_gsc_fw_load(rq, gsc);
72 if (err)
73 goto out_rq;
74
75 err = ce->engine->emit_flush(rq, 0);
76
77 out_rq:
78 i915_request_get(rq);
79
80 if (unlikely(err))
81 i915_request_set_error_once(rq, err);
82
83 i915_request_add(rq);
84
85 if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0)
86 err = -ETIME;
87
88 i915_request_put(rq);
89
90 if (err)
91 drm_err(&gsc_uc_to_gt(gsc)->i915->drm,
92 "Request submission for GSC load failed (%d)\n",
93 err);
94
95 return err;
96 }
97
gsc_fw_load_prepare(struct intel_gsc_uc * gsc)98 static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
99 {
100 struct intel_gt *gt = gsc_uc_to_gt(gsc);
101 struct drm_i915_private *i915 = gt->i915;
102 struct drm_i915_gem_object *obj;
103 void *src, *dst;
104
105 if (!gsc->local)
106 return -ENODEV;
107
108 obj = gsc->local->obj;
109
110 if (obj->base.size < gsc->fw.size)
111 return -ENOSPC;
112
113 dst = i915_gem_object_pin_map_unlocked(obj,
114 i915_coherent_map_type(i915, obj, true));
115 if (IS_ERR(dst))
116 return PTR_ERR(dst);
117
118 src = i915_gem_object_pin_map_unlocked(gsc->fw.obj,
119 i915_coherent_map_type(i915, gsc->fw.obj, true));
120 if (IS_ERR(src)) {
121 i915_gem_object_unpin_map(obj);
122 return PTR_ERR(src);
123 }
124
125 memset(dst, 0, obj->base.size);
126 memcpy(dst, src, gsc->fw.size);
127
128 i915_gem_object_unpin_map(gsc->fw.obj);
129 i915_gem_object_unpin_map(obj);
130
131 return 0;
132 }
133
gsc_fw_wait(struct intel_gt * gt)134 static int gsc_fw_wait(struct intel_gt *gt)
135 {
136 return intel_wait_for_register(gt->uncore,
137 GSC_FW_STATUS_REG,
138 GSC_FW_INIT_COMPLETE_BIT,
139 GSC_FW_INIT_COMPLETE_BIT,
140 500);
141 }
142
intel_gsc_uc_fw_upload(struct intel_gsc_uc * gsc)143 int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
144 {
145 struct intel_gt *gt = gsc_uc_to_gt(gsc);
146 struct intel_uc_fw *gsc_fw = &gsc->fw;
147 int err;
148
149 /* check current fw status */
150 if (intel_gsc_uc_fw_init_done(gsc)) {
151 if (GEM_WARN_ON(!intel_uc_fw_is_loaded(gsc_fw)))
152 intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
153 return -EEXIST;
154 }
155
156 if (!intel_uc_fw_is_loadable(gsc_fw))
157 return -ENOEXEC;
158
159 /* FW blob is ok, so clean the status */
160 intel_uc_fw_sanitize(&gsc->fw);
161
162 if (!gsc_is_in_reset(gt->uncore))
163 return -EIO;
164
165 err = gsc_fw_load_prepare(gsc);
166 if (err)
167 goto fail;
168
169 /*
170 * GSC is only killed by an FLR, so we need to trigger one on unload to
171 * make sure we stop it. This is because we assign a chunk of memory to
172 * the GSC as part of the FW load , so we need to make sure it stops
173 * using it when we release it to the system on driver unload. Note that
174 * this is not a problem of the unload per-se, because the GSC will not
175 * touch that memory unless there are requests for it coming from the
176 * driver; therefore, no accesses will happen while i915 is not loaded,
177 * but if we re-load the driver then the GSC might wake up and try to
178 * access that old memory location again.
179 * Given that an FLR is a very disruptive action (see the FLR function
180 * for details), we want to do it as the last action before releasing
181 * the access to the MMIO bar, which means we need to do it as part of
182 * the primary uncore cleanup.
183 * An alternative approach to the FLR would be to use a memory location
184 * that survives driver unload, like e.g. stolen memory, and keep the
185 * GSC loaded across reloads. However, this requires us to make sure we
186 * preserve that memory location on unload and then determine and
187 * reserve its offset on each subsequent load, which is not trivial, so
188 * it is easier to just kill everything and start fresh.
189 */
190 intel_uncore_set_flr_on_fini(>->i915->uncore);
191
192 err = gsc_fw_load(gsc);
193 if (err)
194 goto fail;
195
196 err = gsc_fw_wait(gt);
197 if (err)
198 goto fail;
199
200 /* FW is not fully operational until we enable SW proxy */
201 intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
202
203 drm_info(>->i915->drm, "Loaded GSC firmware %s\n",
204 gsc_fw->file_selected.path);
205
206 return 0;
207
208 fail:
209 return intel_uc_fw_mark_load_failed(gsc_fw, err);
210 }
211