1 // Copyright 2017 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "console.h"
6 
7 #include <ddk/debug.h>
8 #include <fbl/algorithm.h>
9 #include <fbl/auto_lock.h>
10 #include <string.h>
11 #include <virtio/virtio.h>
12 #include <lib/zx/vmar.h>
13 
14 #include <utility>
15 
16 #define LOCAL_TRACE 0
17 
18 namespace virtio {
19 
20 namespace {
21 
QueueTransfer(Ring * ring,uintptr_t phys,uint32_t len,bool write)22 zx_status_t QueueTransfer(Ring* ring, uintptr_t phys, uint32_t len, bool write) {
23     uint16_t index;
24     vring_desc* desc = ring->AllocDescChain(1, &index);
25     if (!desc) {
26         // This should not happen
27         zxlogf(ERROR, "Failed to find free descriptor for the virtio ring\n");
28         return ZX_ERR_NO_MEMORY;
29     }
30 
31     desc->addr = phys;
32     desc->len = len;
33     // writeable for the driver is readonly for the device and vice versa
34     desc->flags = write ? 0 : VRING_DESC_F_WRITE;
35     ring->SubmitChain(index);
36 
37     return ZX_OK;
38 }
39 
40 } // namespace
41 
TransferBuffer()42 TransferBuffer::TransferBuffer() {
43     memset(&buf_, 0, sizeof(buf_));
44 }
45 
~TransferBuffer()46 TransferBuffer::~TransferBuffer() {
47     io_buffer_release(&buf_);
48 }
49 
Init(const zx::bti & bti,size_t count,uint32_t chunk_size)50 zx_status_t TransferBuffer::Init(const zx::bti& bti, size_t count, uint32_t chunk_size) {
51     if (!count)
52         return ZX_OK;
53 
54     count_ = count;
55     chunk_size_ = chunk_size;
56     size_ = count * chunk_size;
57 
58     TransferDescriptor* descriptor = new TransferDescriptor[count_];
59     if (!descriptor) {
60         zxlogf(ERROR, "Failed to allocate transfer descriptors (%d)\n", ZX_ERR_NO_MEMORY);
61         return ZX_ERR_NO_MEMORY;
62     }
63 
64     descriptor_.reset(descriptor, count_);
65 
66     zx_status_t status = io_buffer_init(&buf_, bti.get(), size_, IO_BUFFER_RW | IO_BUFFER_CONTIG);
67     if (status != ZX_OK) {
68         zxlogf(ERROR, "Failed to allocate transfer buffers (%d)\n", status);
69         return status;
70     }
71 
72     void* virt = io_buffer_virt(&buf_);
73     zx_paddr_t phys = io_buffer_phys(&buf_);
74     for (size_t i = 0; i < count_; ++i) {
75         TransferDescriptor& desc = descriptor_[i];
76 
77         desc.virt = reinterpret_cast<uint8_t*>(virt) + i * chunk_size;
78         desc.phys = phys + i * chunk_size;
79         desc.total_len = chunk_size;
80         desc.used_len = 0;
81         desc.processed_len = 0;
82     }
83 
84     return ZX_OK;
85 }
86 
GetDescriptor(size_t index)87 TransferDescriptor* TransferBuffer::GetDescriptor(size_t index) {
88     if (index > count_)
89         return nullptr;
90     return &descriptor_[index];
91 }
92 
PhysicalToDescriptor(uintptr_t phys)93 TransferDescriptor* TransferBuffer::PhysicalToDescriptor(uintptr_t phys) {
94     zx_paddr_t base = io_buffer_phys(&buf_);
95     if (phys < base || phys >= base + size_)
96         return nullptr;
97     return &descriptor_[(phys - base) / chunk_size_];
98 }
99 
Add(TransferDescriptor * desc)100 void TransferQueue::Add(TransferDescriptor* desc) {
101     queue_.push_front(desc);
102 }
103 
Peek()104 TransferDescriptor* TransferQueue::Peek() {
105     if (queue_.is_empty())
106         return nullptr;
107     return &queue_.back();
108 }
109 
Dequeue()110 TransferDescriptor* TransferQueue::Dequeue() {
111     if (queue_.is_empty())
112         return nullptr;
113     return queue_.pop_back();
114 }
115 
IsEmpty() const116 bool TransferQueue::IsEmpty() const {
117     return queue_.is_empty();
118 }
119 
ConsoleDevice(zx_device_t * bus_device,zx::bti bti,fbl::unique_ptr<Backend> backend)120 ConsoleDevice::ConsoleDevice(zx_device_t* bus_device, zx::bti bti, fbl::unique_ptr<Backend> backend)
121     : Device(bus_device, std::move(bti), std::move(backend)) {}
122 
~ConsoleDevice()123 ConsoleDevice::~ConsoleDevice() {}
124 
125 // We don't need to hold request_lock_ during initialization
Init()126 zx_status_t ConsoleDevice::Init() TA_NO_THREAD_SAFETY_ANALYSIS {
127     LTRACE_ENTRY;
128     // It's a common part for all virtio devices: reset the device, notify
129     // about the driver and negotiate supported features
130     DeviceReset();
131     DriverStatusAck();
132     if (!DeviceFeatureSupported(VIRTIO_F_VERSION_1)) {
133         zxlogf(ERROR, "%s: Legacy virtio interface is not supported by this driver\n", tag());
134         return ZX_ERR_NOT_SUPPORTED;
135     }
136     DriverFeatureAck(VIRTIO_F_VERSION_1);
137 
138     zx_status_t status = DeviceStatusFeaturesOk();
139     if (status) {
140         zxlogf(ERROR, "%s: Feature negotiation failed (%d)\n", tag(), status);
141         return status;
142     }
143 
144     status = port0_receive_queue_.Init(0, kDescriptors);
145     if (status) {
146         zxlogf(ERROR, "%s: Failed to initialize receive queue (%d)\n", tag(), status);
147         return status;
148     }
149 
150     status = port0_receive_buffer_.Init(bti_, kDescriptors, kChunkSize);
151     if (status) {
152         zxlogf(ERROR, "%s: Failed to allocate buffers for receive queue (%d)\n", tag(), status);
153         return status;
154     }
155 
156     // Initially the whole receive buffer is available for device to write, so
157     // put all descriptors in the virtio ring available list
158     for (size_t i = 0; i < kDescriptors; ++i) {
159         TransferDescriptor* desc = port0_receive_buffer_.GetDescriptor(i);
160         QueueTransfer(&port0_receive_queue_, desc->phys, desc->total_len, /*write*/ 0);
161     }
162     // Notify the device
163     port0_receive_queue_.Kick();
164 
165     status = port0_transmit_queue_.Init(1, kDescriptors);
166     if (status) {
167         zxlogf(ERROR, "%s: Failed to initialize transmit queue (%d)\n", tag(), status);
168         return status;
169     }
170 
171     status = port0_transmit_buffer_.Init(bti_, kDescriptors, kChunkSize);
172     if (status) {
173         zxlogf(ERROR, "%s: Failed to allocate buffers for transmit queue (%d)\n", tag(), status);
174         return status;
175     }
176 
177     // Initially the whole transmit buffer available for writing, so put all the
178     // descriptors in the queue
179     for (size_t i = 0; i < kDescriptors; ++i) {
180         TransferDescriptor* desc = port0_transmit_buffer_.GetDescriptor(i);
181         port0_transmit_descriptors_.Add(desc);
182     }
183 
184     device_ops_.read = virtio_console_read;
185     device_ops_.write = virtio_console_write;
186 
187     device_add_args_t args = {};
188     args.version = DEVICE_ADD_ARGS_VERSION;
189     args.name = "virtio-console";
190     args.ctx = this;
191     args.ops = &device_ops_;
192 
193     // We probably want to have an alias for console devices
194     args.proto_id = ZX_PROTOCOL_CONSOLE;
195 
196     status = device_add(bus_device_, &args, &device_);
197     if (status) {
198         zxlogf(ERROR, "%s: Failed to register device (%d)\n", tag(), status);
199         device_ = nullptr;
200         return status;
201     }
202 
203     StartIrqThread();
204     DriverStatusOk();
205 
206     LTRACE_EXIT;
207     return ZX_OK;
208 }
209 
IrqRingUpdate()210 void ConsoleDevice::IrqRingUpdate() {
211     LTRACE_ENTRY;
212 
213     fbl::AutoLock a(&request_lock_);
214 
215     // These callbacks are called synchronously, so we don't need to acquire request_lock_
216     port0_receive_queue_.IrqRingUpdate([this](vring_used_elem* elem) TA_NO_THREAD_SAFETY_ANALYSIS {
217         uint16_t index = static_cast<uint16_t>(elem->id);
218         vring_desc* desc = port0_receive_queue_.DescFromIndex(index);
219         uint32_t remain = elem->len;
220 
221         for (;;) {
222             bool has_next = desc->flags & VRING_DESC_F_NEXT;
223             uint16_t next = desc->next;
224 
225             TransferDescriptor* trans = port0_receive_buffer_.PhysicalToDescriptor(desc->addr);
226 
227             trans->processed_len = 0;
228             trans->used_len = fbl::min(trans->total_len, remain);
229             remain -= trans->used_len;
230             port0_receive_descriptors_.Add(trans);
231 
232             port0_receive_queue_.FreeDesc(index);
233             if (!has_next)
234                 break;
235 
236             index = next;
237             desc = port0_receive_queue_.DescFromIndex(index);
238         }
239         device_state_set(device_, DEV_STATE_READABLE);
240     });
241 
242     port0_transmit_queue_.IrqRingUpdate([this](vring_used_elem* elem) TA_NO_THREAD_SAFETY_ANALYSIS {
243         uint16_t index = static_cast<uint16_t>(elem->id);
244         vring_desc* desc = port0_transmit_queue_.DescFromIndex(index);
245 
246         for (;;) {
247             bool has_next = desc->flags & VRING_DESC_F_NEXT;
248             uint16_t next = desc->next;
249 
250             TransferDescriptor* trans = port0_transmit_buffer_.PhysicalToDescriptor(desc->addr);
251 
252             port0_transmit_descriptors_.Add(trans);
253 
254             port0_transmit_queue_.FreeDesc(index);
255             if (!has_next)
256                 break;
257 
258             index = next;
259             desc = port0_transmit_queue_.DescFromIndex(index);
260         }
261         device_state_set(device_, DEV_STATE_WRITABLE);
262     });
263     LTRACE_EXIT;
264 }
265 
virtio_console_read(void * ctx,void * buf,size_t count,zx_off_t off,size_t * actual)266 zx_status_t ConsoleDevice::virtio_console_read(void* ctx, void* buf, size_t count, zx_off_t off, size_t* actual) {
267     ConsoleDevice* console = reinterpret_cast<ConsoleDevice*>(ctx);
268 
269     return console->Read(buf, count, off, actual);
270 }
271 
Read(void * buf,size_t count,zx_off_t off,size_t * actual)272 zx_status_t ConsoleDevice::Read(void* buf, size_t count, zx_off_t off, size_t* actual) {
273     LTRACE_ENTRY;
274     *actual = 0;
275 
276     if (count > UINT32_MAX)
277         count = UINT32_MAX;
278 
279     fbl::AutoLock a(&request_lock_);
280 
281     TransferDescriptor* desc = port0_receive_descriptors_.Peek();
282     if (!desc) {
283         device_state_clr(device_, DEV_STATE_READABLE);
284         return ZX_ERR_SHOULD_WAIT;
285     }
286 
287     uint32_t len = fbl::min(static_cast<uint32_t>(count), desc->used_len - desc->processed_len);
288     memcpy(buf, desc->virt + desc->processed_len, len);
289     desc->processed_len += len;
290     *actual += len;
291 
292     // Did we read the whole buffer? If so return it back to the device
293     if (desc->processed_len == desc->used_len) {
294         port0_receive_descriptors_.Dequeue();
295         QueueTransfer(&port0_receive_queue_, desc->phys, desc->total_len, /*write*/ 0);
296         port0_receive_queue_.Kick();
297     }
298 
299     LTRACE_EXIT;
300     return ZX_OK;
301 }
302 
virtio_console_write(void * ctx,const void * buf,size_t count,zx_off_t off,size_t * actual)303 zx_status_t ConsoleDevice::virtio_console_write(void* ctx, const void* buf, size_t count, zx_off_t off, size_t* actual) {
304     ConsoleDevice* console = reinterpret_cast<ConsoleDevice*>(ctx);
305 
306     return console->Write(buf, count, off, actual);
307 }
308 
Write(const void * buf,size_t count,zx_off_t off,size_t * actual)309 zx_status_t ConsoleDevice::Write(const void* buf, size_t count, zx_off_t off, size_t* actual) {
310     LTRACE_ENTRY;
311     *actual = 0;
312 
313     if (count > UINT32_MAX)
314         count = UINT32_MAX;
315 
316     fbl::AutoLock a(&request_lock_);
317 
318     TransferDescriptor* desc = port0_transmit_descriptors_.Dequeue();
319     if (!desc) {
320         device_state_clr(device_, DEV_STATE_WRITABLE);
321         return ZX_ERR_SHOULD_WAIT;
322     }
323 
324     uint32_t len = fbl::min(static_cast<uint32_t>(count), desc->total_len);
325     memcpy(desc->virt, buf, len);
326     desc->used_len = len;
327     *actual += len;
328 
329     QueueTransfer(&port0_transmit_queue_, desc->phys, desc->used_len, /*write*/ 1);
330     port0_transmit_queue_.Kick();
331 
332     LTRACE_EXIT;
333     return ZX_OK;
334 }
335 
336 } // namespace virtio
337