1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "s905d2-gpu.h"
6 #include "s912-gpu.h"
7 #include "t931-gpu.h"
8 #include <ddk/binding.h>
9 #include <ddk/debug.h>
10 #include <ddk/device.h>
11 #include <ddk/driver.h>
12 #include <ddk/platform-defs.h>
13 #include <ddk/protocol/iommu.h>
14 #include <ddk/protocol/platform-device-lib.h>
15 #include <ddk/protocol/platform/bus.h>
16 #include <ddk/protocol/platform/device.h>
17 #include <fuchsia/hardware/gpu/clock/c/fidl.h>
18 #include <hw/reg.h>
19 #include <stdint.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <zircon/process.h>
25 #include <zircon/syscalls.h>
26
27 zx_status_t aml_gp0_init(aml_gpu_t* gpu);
28 void aml_gp0_release(aml_gpu_t* gpu);
29
30 static int32_t current_clk_source;
31
aml_gpu_set_clk_freq_source(aml_gpu_t * gpu,int32_t clk_source)32 static void aml_gpu_set_clk_freq_source(aml_gpu_t* gpu, int32_t clk_source) {
33
34 if (current_clk_source == clk_source) {
35 return;
36 }
37
38 aml_gpu_block_t* gpu_block = gpu->gpu_block;
39 GPU_INFO("Setting clock source to %d: %d\n", clk_source, gpu_block->gpu_clk_freq[clk_source]);
40 uint32_t current_clk_cntl = READ32_HIU_REG(gpu_block->hhi_clock_cntl_offset);
41 uint32_t enabled_mux = current_clk_cntl & (1 << FINAL_MUX_BIT_SHIFT);
42 uint32_t new_mux = enabled_mux == 0;
43 uint32_t mux_shift = new_mux ? 16 : 0;
44
45 // clear existing values
46 current_clk_cntl &= ~(CLOCK_MUX_MASK << mux_shift);
47 // set the divisor, enable & source for the unused mux
48 current_clk_cntl |= CALCULATE_CLOCK_MUX(true,
49 gpu_block->gpu_clk_freq[clk_source], 1)
50 << mux_shift;
51
52 // Write the new values to the unused mux
53 WRITE32_HIU_REG(gpu_block->hhi_clock_cntl_offset, current_clk_cntl);
54 zx_nanosleep(zx_deadline_after(ZX_USEC(10)));
55
56 // Toggle current mux selection
57 current_clk_cntl ^= (1 << FINAL_MUX_BIT_SHIFT);
58
59 // Select the unused input mux
60 WRITE32_HIU_REG(gpu_block->hhi_clock_cntl_offset, current_clk_cntl);
61
62 current_clk_source = clk_source;
63 }
64
aml_gpu_set_initial_clk_freq_source(aml_gpu_t * gpu,int32_t clk_source)65 static void aml_gpu_set_initial_clk_freq_source(aml_gpu_t* gpu, int32_t clk_source) {
66 aml_gpu_block_t* gpu_block = gpu->gpu_block;
67 uint32_t current_clk_cntl = READ32_HIU_REG(gpu_block->hhi_clock_cntl_offset);
68 uint32_t enabled_mux = (current_clk_cntl & (1 << FINAL_MUX_BIT_SHIFT)) != 0;
69 uint32_t mux_shift = enabled_mux ? 16 : 0;
70
71 if (current_clk_cntl & (1 << (mux_shift + CLK_ENABLED_BIT_SHIFT))) {
72 aml_gpu_set_clk_freq_source(gpu, clk_source);
73 } else {
74 GPU_INFO("Setting initial clock source to %d: %d\n", clk_source, gpu_block->gpu_clk_freq[clk_source]);
75 // Switching the final dynamic mux from a disabled source to an enabled
76 // source doesn't work. If the current clock source is disabled, then
77 // enable it instead of switching.
78 current_clk_cntl &= ~(CLOCK_MUX_MASK << mux_shift);
79 current_clk_cntl |= CALCULATE_CLOCK_MUX(true,
80 gpu_block->gpu_clk_freq[clk_source], 1)
81 << mux_shift;
82
83 // Write the new values to the existing mux.
84 WRITE32_HIU_REG(gpu_block->hhi_clock_cntl_offset, current_clk_cntl);
85 zx_nanosleep(zx_deadline_after(ZX_USEC(10)));
86 current_clk_source = clk_source;
87 }
88 }
89
aml_gpu_init(aml_gpu_t * gpu)90 static void aml_gpu_init(aml_gpu_t* gpu) {
91 uint32_t temp;
92 aml_gpu_block_t* gpu_block = gpu->gpu_block;
93
94 temp = READ32_PRESET_REG(gpu_block->reset0_mask_offset);
95 temp &= ~(1 << 20);
96 WRITE32_PRESET_REG(gpu_block->reset0_mask_offset, temp);
97
98 temp = READ32_PRESET_REG(gpu_block->reset0_level_offset);
99 temp &= ~(1 << 20);
100 WRITE32_PRESET_REG(gpu_block->reset0_level_offset, temp);
101
102 temp = READ32_PRESET_REG(gpu_block->reset2_mask_offset);
103 temp &= ~(1 << 14);
104 WRITE32_PRESET_REG(gpu_block->reset2_mask_offset, temp);
105
106 temp = READ32_PRESET_REG(gpu_block->reset2_level_offset);
107 temp &= ~(1 << 14);
108 WRITE32_PRESET_REG(gpu_block->reset2_level_offset, temp);
109
110 // Currently the index 2 corresponds to the default
111 // value of GPU clock freq which is 500Mhz.
112 // In future, the GPU driver in garnet
113 // can make an IOCTL to set the default freq
114 aml_gpu_set_initial_clk_freq_source(gpu, 2);
115
116 temp = READ32_PRESET_REG(gpu_block->reset0_level_offset);
117 temp |= 1 << 20;
118 WRITE32_PRESET_REG(gpu_block->reset0_level_offset, temp);
119
120 temp = READ32_PRESET_REG(gpu_block->reset2_level_offset);
121 temp |= 1 << 14;
122 WRITE32_PRESET_REG(gpu_block->reset2_level_offset, temp);
123
124 WRITE32_GPU_REG(PWR_KEY, 0x2968A819);
125 WRITE32_GPU_REG(PWR_OVERRIDE1, 0xfff | (0x20 << 16));
126 }
127
aml_gpu_release(void * ctx)128 static void aml_gpu_release(void* ctx) {
129 aml_gpu_t* gpu = ctx;
130 aml_gp0_release(gpu);
131 mmio_buffer_release(&gpu->hiu_buffer);
132 mmio_buffer_release(&gpu->preset_buffer);
133 mmio_buffer_release(&gpu->gpu_buffer);
134 zx_handle_close(gpu->bti);
135 free(gpu);
136 }
137
aml_gpu_get_protocol(void * ctx,uint32_t proto_id,void * out_proto)138 static zx_status_t aml_gpu_get_protocol(void* ctx, uint32_t proto_id, void* out_proto) {
139
140 aml_gpu_t* gpu = ctx;
141 pdev_protocol_t* gpu_proto = out_proto;
142
143 // Forward the underlying ops.
144 gpu_proto->ops = gpu->pdev.ops;
145 gpu_proto->ctx = gpu->pdev.ctx;
146 return ZX_OK;
147 }
148
aml_gpu_SetFrequencySource(void * ctx,uint32_t clk_source,fidl_txn_t * txn)149 static zx_status_t aml_gpu_SetFrequencySource(void* ctx, uint32_t clk_source, fidl_txn_t* txn) {
150 aml_gpu_t* gpu = ctx;
151 if (clk_source >= MAX_GPU_CLK_FREQ) {
152 GPU_ERROR("Invalid clock freq source index\n");
153 return fuchsia_hardware_gpu_clock_ClockSetFrequencySource_reply(txn, ZX_ERR_NOT_SUPPORTED);
154 }
155 aml_gpu_set_clk_freq_source(gpu, clk_source);
156 return fuchsia_hardware_gpu_clock_ClockSetFrequencySource_reply(txn, ZX_OK);
157 }
158
159 static fuchsia_hardware_gpu_clock_Clock_ops_t fidl_ops = {.SetFrequencySource =
160 aml_gpu_SetFrequencySource};
161
aml_gpu_message(void * ctx,fidl_msg_t * msg,fidl_txn_t * txn)162 static zx_status_t aml_gpu_message(void* ctx, fidl_msg_t* msg, fidl_txn_t* txn) {
163 return fuchsia_hardware_gpu_clock_Clock_dispatch(ctx, txn, msg, &fidl_ops);
164 }
165
166 static zx_protocol_device_t aml_gpu_protocol = {
167 .version = DEVICE_OPS_VERSION,
168 .release = aml_gpu_release,
169 .get_protocol = aml_gpu_get_protocol,
170 .message = aml_gpu_message,
171 };
172
aml_gpu_bind(void * ctx,zx_device_t * parent)173 static zx_status_t aml_gpu_bind(void* ctx, zx_device_t* parent) {
174 zx_status_t status;
175 aml_gpu_t* gpu = calloc(1, sizeof(aml_gpu_t));
176 if (!gpu) {
177 return ZX_ERR_NO_MEMORY;
178 }
179
180 if ((status = device_get_protocol(parent, ZX_PROTOCOL_PDEV, &gpu->pdev)) != ZX_OK) {
181 GPU_ERROR("ZX_PROTOCOL_PDEV not available\n");
182 goto fail;
183 }
184
185 status = pdev_get_bti(&gpu->pdev, 0, &gpu->bti);
186 if (status != ZX_OK) {
187 GPU_ERROR("could not get BTI handle: %d\n", status);
188 return status;
189 }
190
191 status = pdev_map_mmio_buffer2(&gpu->pdev, MMIO_GPU, ZX_CACHE_POLICY_UNCACHED_DEVICE,
192 &gpu->gpu_buffer);
193 if (status != ZX_OK) {
194 GPU_ERROR("pdev_map_mmio_buffer failed\n");
195 goto fail;
196 }
197
198 status = pdev_map_mmio_buffer2(&gpu->pdev, MMIO_HIU, ZX_CACHE_POLICY_UNCACHED_DEVICE,
199 &gpu->hiu_buffer);
200 if (status != ZX_OK) {
201 GPU_ERROR("pdev_map_mmio_buffer failed\n");
202 goto fail;
203 }
204
205 status = pdev_map_mmio_buffer2(&gpu->pdev, MMIO_PRESET, ZX_CACHE_POLICY_UNCACHED_DEVICE,
206 &gpu->preset_buffer);
207 if (status != ZX_OK) {
208 GPU_ERROR("pdev_map_mmio_buffer failed\n");
209 goto fail;
210 }
211
212 pdev_device_info_t info;
213 status = pdev_get_device_info(&gpu->pdev, &info);
214 if (status != ZX_OK) {
215 GPU_ERROR("pdev_get_device_info failed\n");
216 goto fail;
217 }
218
219 switch (info.pid) {
220 case PDEV_PID_AMLOGIC_S912:
221 gpu->gpu_block = &s912_gpu_blocks;
222 break;
223 case PDEV_PID_AMLOGIC_S905D2:
224 gpu->gpu_block = &s905d2_gpu_blocks;
225 break;
226 case PDEV_PID_AMLOGIC_T931:
227 gpu->gpu_block = &t931_gpu_blocks;
228 break;
229 default:
230 GPU_ERROR("unsupported SOC PID %u\n", info.pid);
231 goto fail;
232 }
233
234 if (info.pid == PDEV_PID_AMLOGIC_S905D2) {
235 status = aml_gp0_init(gpu);
236 if (status != ZX_OK) {
237 GPU_ERROR("aml_gp0_init failed: %d\n", status);
238 goto fail;
239 }
240 }
241
242 aml_gpu_init(gpu);
243
244 zx_device_prop_t props[] = {
245 {BIND_PROTOCOL, 0, ZX_PROTOCOL_PDEV},
246 {BIND_PLATFORM_DEV_VID, 0, PDEV_VID_GENERIC},
247 {BIND_PLATFORM_DEV_PID, 0, PDEV_PID_GENERIC},
248 {BIND_PLATFORM_DEV_DID, 0, PDEV_DID_ARM_MALI},
249 };
250
251 device_add_args_t args = {
252 .version = DEVICE_ADD_ARGS_VERSION,
253 .name = "aml-gpu",
254 .ctx = gpu,
255 .ops = &aml_gpu_protocol,
256 .props = props,
257 .prop_count = countof(props),
258 .proto_id = ZX_PROTOCOL_GPU_THERMAL,
259 };
260
261 status = device_add(parent, &args, &gpu->zxdev);
262 if (status != ZX_OK) {
263 goto fail;
264 }
265
266 return ZX_OK;
267
268 fail:
269 aml_gpu_release(gpu);
270 return status;
271 }
272
273 static zx_driver_ops_t aml_gpu_driver_ops = {
274 .version = DRIVER_OPS_VERSION,
275 .bind = aml_gpu_bind,
276 };
277
278 ZIRCON_DRIVER_BEGIN(aml_gpu, aml_gpu_driver_ops, "zircon", "0.1", 6)
279 BI_ABORT_IF(NE, BIND_PROTOCOL, ZX_PROTOCOL_PDEV),
280 BI_ABORT_IF(NE, BIND_PLATFORM_DEV_VID, PDEV_VID_AMLOGIC),
281 BI_ABORT_IF(NE, BIND_PLATFORM_DEV_DID, PDEV_DID_ARM_MALI_INIT),
282 // we support multiple SOC variants
283 BI_MATCH_IF(EQ, BIND_PLATFORM_DEV_PID, PDEV_PID_AMLOGIC_S912),
284 BI_MATCH_IF(EQ, BIND_PLATFORM_DEV_PID, PDEV_PID_AMLOGIC_S905D2),
285 BI_MATCH_IF(EQ, BIND_PLATFORM_DEV_PID, PDEV_PID_AMLOGIC_T931),
286 ZIRCON_DRIVER_END(aml_gpu)
287