1 // Copyright 2017 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <ddk/protocol/pci.h>
6 #include <ddk/protocol/pci-lib.h>
7
8 #include <climits>
9 #include <fbl/algorithm.h>
10 #include <limits>
11 #include <stdlib.h>
12 #include <utility>
13
14 #include "intel-i915.h"
15 #include "gtt.h"
16 #include "macros.h"
17 #include "tiling.h"
18 #include "registers.h"
19
20 #define PAGE_PRESENT (1 << 0)
21
22 namespace {
23
24 constexpr size_t kEntriesPerPinTxn = PAGE_SIZE / sizeof(zx_paddr_t);
25
gen_pte_encode(uint64_t bus_addr)26 inline uint64_t gen_pte_encode(uint64_t bus_addr)
27 {
28 // Make every page present so we don't have to deal with padding for framebuffers
29 return bus_addr | PAGE_PRESENT;
30 }
31
get_pte_offset(uint32_t idx)32 inline uint32_t get_pte_offset(uint32_t idx) {
33 constexpr uint32_t GTT_BASE_OFFSET = 0x800000;
34 return static_cast<uint32_t>(GTT_BASE_OFFSET + (idx * sizeof(uint64_t)));
35 }
36
37 }
38
39 namespace i915 {
40
Gtt()41 Gtt::Gtt() :
42 region_allocator_(RegionAllocator::RegionPool::Create(std::numeric_limits<size_t>::max())) {}
43
~Gtt()44 Gtt::~Gtt() {
45 if (scratch_buffer_paddr_) {
46 scratch_buffer_pmt_.unpin();
47 }
48 }
49
Init(Controller * controller)50 zx_status_t Gtt::Init(Controller* controller) {
51 controller_ = controller;
52
53 zx_status_t status = pci_get_bti(controller->pci(), 0, bti_.reset_and_get_address());
54 if (status != ZX_OK) {
55 LOG_ERROR("Failed to get bti (%d)\n", status);
56 return status;
57 }
58
59 zx_info_bti_t info;
60 status = bti_.get_info(ZX_INFO_BTI, &info, sizeof(zx_info_bti_t), nullptr, nullptr);
61 if (status != ZX_OK) {
62 LOG_ERROR("Failed to fetch bti info (%d)\n", status);
63 return status;
64 }
65 min_contiguity_ = info.minimum_contiguity;
66
67 // Calculate the size of the gtt.
68 auto gmch_gfx_ctrl = registers::GmchGfxControl::Get().FromValue(0);
69 status = pci_config_read16(controller_->pci(), gmch_gfx_ctrl.kAddr,
70 gmch_gfx_ctrl.reg_value_ptr());
71 if (status != ZX_OK) {
72 LOG_ERROR("Failed to read GfxControl\n");
73 return status;
74 }
75 uint32_t gtt_size = gmch_gfx_ctrl.gtt_mappable_mem_size();
76 LOG_TRACE("Gtt::Init gtt_size (for page tables) 0x%x\n", gtt_size);
77
78 status = zx::vmo::create(PAGE_SIZE, 0, &scratch_buffer_);
79 if (status != ZX_OK) {
80 LOG_ERROR("Failed to alloc scratch buffer (%d)\n", status);
81 return status;
82 }
83
84 status = bti_.pin(ZX_BTI_PERM_READ, scratch_buffer_, 0, PAGE_SIZE, &scratch_buffer_paddr_, 1,
85 &scratch_buffer_pmt_);
86 if (status != ZX_OK) {
87 LOG_ERROR("Failed to look up scratch buffer (%d)\n", status);
88 return status;
89 }
90
91 scratch_buffer_.op_range(ZX_VMO_OP_CACHE_CLEAN, 0, PAGE_SIZE, nullptr, 0);
92
93 // Populate the gtt with the scratch buffer.
94 uint64_t pte = gen_pte_encode(scratch_buffer_paddr_);
95 unsigned i;
96 for (i = 0; i < gtt_size / sizeof(uint64_t); i++) {
97 controller_->mmio_space()->Write<uint64_t>(pte, get_pte_offset(i));
98 }
99 controller_->mmio_space()->Read<uint32_t>(get_pte_offset(i - i)); // Posting read
100
101 gfx_mem_size_ = gtt_size / sizeof(uint64_t) * PAGE_SIZE;
102 return region_allocator_.AddRegion({ .base = 0, .size = gfx_mem_size_ });
103 }
104
AllocRegion(uint32_t length,uint32_t align_pow2,fbl::unique_ptr<GttRegion> * region_out)105 zx_status_t Gtt::AllocRegion(uint32_t length, uint32_t align_pow2,
106 fbl::unique_ptr<GttRegion>* region_out) {
107 uint32_t region_length = ROUNDUP(length, PAGE_SIZE);
108 fbl::AllocChecker ac;
109 auto r = fbl::make_unique_checked<GttRegion>(&ac, this);
110 if (!ac.check()) {
111 return ZX_ERR_NO_MEMORY;
112 }
113 if (region_allocator_.GetRegion(region_length, align_pow2, r->region_) != ZX_OK) {
114 return ZX_ERR_NO_RESOURCES;
115 }
116 *region_out = std::move(r);
117 return ZX_OK;
118 }
119
SetupForMexec(uintptr_t stolen_fb,uint32_t length)120 void Gtt::SetupForMexec(uintptr_t stolen_fb, uint32_t length) {
121 // Just clobber everything to get the bootloader framebuffer to work.
122 unsigned pte_idx = 0;
123 for (unsigned i = 0; i < ROUNDUP(length, PAGE_SIZE) / PAGE_SIZE; i++, stolen_fb += PAGE_SIZE) {
124 uint64_t pte = gen_pte_encode(stolen_fb);
125 controller_->mmio_space()->Write<uint64_t>(pte, get_pte_offset(pte_idx++));
126 }
127 controller_->mmio_space()->Read<uint32_t>(get_pte_offset(pte_idx - 1)); // Posting read
128 }
129
GttRegion(Gtt * gtt)130 GttRegion::GttRegion(Gtt* gtt) : gtt_(gtt), is_rotated_(false) {}
131
~GttRegion()132 GttRegion::~GttRegion() {
133 ClearRegion(false);
134 }
135
PopulateRegion(zx_handle_t vmo,uint64_t page_offset,uint64_t length,bool writable)136 zx_status_t GttRegion::PopulateRegion(zx_handle_t vmo, uint64_t page_offset,
137 uint64_t length, bool writable) {
138 if (length > region_->size) {
139 return ZX_ERR_INVALID_ARGS;
140 }
141 if (mapped_end_ != 0) {
142 return ZX_ERR_ALREADY_BOUND;
143 }
144 vmo_ = vmo;
145
146 zx_paddr_t paddrs[kEntriesPerPinTxn];
147 zx_status_t status;
148 uint32_t num_pages = static_cast<uint32_t>(ROUNDUP(length, PAGE_SIZE) / PAGE_SIZE);
149 uint64_t vmo_offset = page_offset * PAGE_SIZE;
150 uint32_t pte_idx = static_cast<uint32_t>(region_->base / PAGE_SIZE);
151 uint32_t pte_idx_end = pte_idx + num_pages;
152
153 size_t num_pins = ROUNDUP(length, gtt_->min_contiguity_) / gtt_->min_contiguity_;
154 fbl::AllocChecker ac;
155 pmts_.reserve(num_pins, &ac);
156 if (!ac.check()) {
157 return ZX_ERR_NO_MEMORY;
158 }
159
160 int32_t flags = ZX_BTI_COMPRESS | ZX_BTI_PERM_READ | (writable ? ZX_BTI_PERM_WRITE : 0);
161 while (pte_idx < pte_idx_end) {
162 uint64_t cur_len = (pte_idx_end - pte_idx) * PAGE_SIZE;
163 if (cur_len > kEntriesPerPinTxn * gtt_->min_contiguity_) {
164 cur_len = kEntriesPerPinTxn * gtt_->min_contiguity_;
165 }
166
167 uint64_t actual_entries = ROUNDUP(cur_len, gtt_->min_contiguity_) / gtt_->min_contiguity_;
168 zx::pmt pmt;
169 status = gtt_->bti_.pin(flags, *zx::unowned_vmo(vmo_),
170 vmo_offset, cur_len, paddrs, actual_entries, &pmt);
171 if (status != ZX_OK) {
172 LOG_ERROR("Failed to get paddrs (%d)\n", status);
173 return status;
174 }
175 vmo_offset += cur_len;
176 mapped_end_ = static_cast<uint32_t>(vmo_offset);
177 pmts_.push_back(std::move(pmt), &ac);
178 ZX_DEBUG_ASSERT(ac.check()); // Shouldn't fail because of the reserve above.
179
180 for (unsigned i = 0; i < actual_entries; i++) {
181 for (unsigned j = 0;
182 j < gtt_->min_contiguity_ / PAGE_SIZE && pte_idx < pte_idx_end; j++) {
183 uint64_t pte = gen_pte_encode(paddrs[i] + j * PAGE_SIZE);
184 gtt_->controller_->mmio_space()->Write<uint64_t>(pte, get_pte_offset(pte_idx++));
185 }
186 }
187 }
188
189 gtt_->controller_->mmio_space()->Read<uint32_t>(get_pte_offset(pte_idx - 1)); // Posting read
190 return ZX_OK;
191 }
192
ClearRegion(bool close_vmo)193 void GttRegion::ClearRegion(bool close_vmo) {
194 if (!region_) {
195 return;
196 }
197
198 uint32_t pte_idx = static_cast<uint32_t>(region_->base / PAGE_SIZE);
199 uint64_t pte = gen_pte_encode(gtt_->scratch_buffer_paddr_);
200 auto mmio_space = gtt_->controller_->mmio_space();
201
202 for (unsigned i = 0; i < mapped_end_ / PAGE_SIZE; i++) {
203 uint32_t pte_offset = get_pte_offset(pte_idx++);
204 mmio_space->Write<uint64_t>(pte, pte_offset);
205 }
206
207 mmio_space->Read<uint32_t>(get_pte_offset(pte_idx - 1)); // Posting read
208
209 for (zx::pmt& pmt : pmts_) {
210 if (pmt.unpin() != ZX_OK) {
211 LOG_INFO("Error unpinning gtt region\n");
212 }
213 }
214 pmts_.reset();
215 mapped_end_ = 0;
216
217 if (close_vmo && vmo_ != ZX_HANDLE_INVALID) {
218 zx_handle_close(vmo_);
219 }
220 vmo_ = ZX_HANDLE_INVALID;
221 }
222
SetRotation(uint32_t rotation,const image_t & image)223 void GttRegion::SetRotation(uint32_t rotation, const image_t& image) {
224 bool rotated = (rotation == FRAME_TRANSFORM_ROT_90 || rotation == FRAME_TRANSFORM_ROT_270);
225 if (rotated == is_rotated_) {
226 return;
227 }
228 is_rotated_ = rotated;
229 // Displaying an image with 90/270 degree rotation requires rearranging the image's
230 // GTT mapping. Since permutations are composed of disjoint cycles and because we can
231 // calculate each page's location in the new mapping, we can remap the image by shifting
232 // the GTT entries around each cycle. We use one of the ignored bits in the global GTT
233 // PTEs to keep track of whether or not entries have been rotated.
234 constexpr uint32_t kRotatedFlag = (1 << 1);
235
236 uint64_t mask = is_rotated_ ? kRotatedFlag : 0;
237 uint32_t width = width_in_tiles(image.type, image.width, image.pixel_format);
238 uint32_t height = height_in_tiles(image.type, image.height, image.pixel_format);
239
240 auto mmio_space = gtt_->controller_->mmio_space();
241 uint32_t pte_offset = static_cast<uint32_t>(base() / PAGE_SIZE);
242 for (uint32_t i = 0; i < size() / PAGE_SIZE; i++) {
243 uint64_t entry = mmio_space->Read<uint64_t>(get_pte_offset(i + pte_offset));
244 uint32_t position = i;
245 // If the entry has already been cycled into the correct place, the
246 // loop check will immediately fail.
247 while ((entry & kRotatedFlag) != mask) {
248 if (mask) {
249 uint32_t x = position % width;
250 uint32_t y = position / width;
251 position = ((x + 1) * height) - y - 1;
252 } else {
253 uint32_t x = position % height;
254 uint32_t y = position / height;
255 position = ((height - x - 1) * width) + y;
256 }
257 uint32_t dest_offset = get_pte_offset(position + pte_offset);
258
259 uint64_t next_entry = mmio_space->Read<uint64_t>(dest_offset);
260 mmio_space->Write<uint64_t>(entry ^ kRotatedFlag, dest_offset);
261 entry = next_entry;
262 }
263 }
264 }
265
266 } // namespace i915
267