1 /*
2  * Copyright (c) 2006-2023, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2021-11-11     GuEe-GUI     the first version
9  * 2023-10-12     fangjianzhou support SDL2
10  */
11 
12 #include <rtthread.h>
13 #include <cpuport.h>
14 
15 #include <virtio.h>
16 
_virtio_dev_check(struct virtio_device * dev)17 rt_inline void _virtio_dev_check(struct virtio_device *dev)
18 {
19     RT_ASSERT(dev != RT_NULL);
20     RT_ASSERT(dev->mmio_config != RT_NULL);
21 }
22 
virtio_reset_device(struct virtio_device * dev)23 void virtio_reset_device(struct virtio_device *dev)
24 {
25     _virtio_dev_check(dev);
26 
27     dev->mmio_config->status = 0;
28 }
29 
virtio_status_acknowledge_driver(struct virtio_device * dev)30 void virtio_status_acknowledge_driver(struct virtio_device *dev)
31 {
32     _virtio_dev_check(dev);
33 
34     dev->mmio_config->status |= VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER;
35 }
36 
virtio_status_driver_ok(struct virtio_device * dev)37 void virtio_status_driver_ok(struct virtio_device *dev)
38 {
39     _virtio_dev_check(dev);
40 
41     dev->mmio_config->status |= VIRTIO_STATUS_FEATURES_OK | VIRTIO_STATUS_DRIVER_OK;
42 }
43 
virtio_interrupt_ack(struct virtio_device * dev)44 void virtio_interrupt_ack(struct virtio_device *dev)
45 {
46     rt_uint32_t status;
47 
48     _virtio_dev_check(dev);
49 
50     status = dev->mmio_config->interrupt_status;
51 
52     if (status != 0)
53     {
54         dev->mmio_config->interrupt_ack = status;
55     }
56 }
57 
virtio_has_feature(struct virtio_device * dev,rt_uint32_t feature_bit)58 rt_bool_t virtio_has_feature(struct virtio_device *dev, rt_uint32_t feature_bit)
59 {
60     _virtio_dev_check(dev);
61 
62     return !!(dev->mmio_config->device_features & (1UL << feature_bit));
63 }
64 
virtio_queues_alloc(struct virtio_device * dev,rt_size_t queues_num)65 rt_err_t virtio_queues_alloc(struct virtio_device *dev, rt_size_t queues_num)
66 {
67     _virtio_dev_check(dev);
68 
69     dev->queues = rt_malloc(sizeof(struct virtq) * queues_num);
70 
71     if (dev->queues != RT_NULL)
72     {
73         dev->queues_num = queues_num;
74 
75         return RT_EOK;
76     }
77 
78     return -RT_ENOMEM;
79 }
80 
virtio_queues_free(struct virtio_device * dev)81 void virtio_queues_free(struct virtio_device *dev)
82 {
83     if (dev->queues != RT_NULL)
84     {
85         dev->queues_num = 0;
86         rt_free(dev->queues);
87     }
88 }
89 
virtio_queue_init(struct virtio_device * dev,rt_uint32_t queue_index,rt_size_t ring_size)90 rt_err_t virtio_queue_init(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t ring_size)
91 {
92     int i;
93     void *pages;
94     rt_size_t pages_total_size;
95     struct virtq *queue;
96 
97     _virtio_dev_check(dev);
98 
99     RT_ASSERT(dev->mmio_config->queue_num_max > 0);
100     RT_ASSERT(dev->mmio_config->queue_num_max > queue_index);
101     /* ring_size is power of 2 */
102     RT_ASSERT(ring_size > 0);
103     RT_ASSERT(((ring_size - 1) & ring_size) == 0);
104 
105     queue = &dev->queues[queue_index];
106     pages_total_size = VIRTIO_PAGE_ALIGN(
107             VIRTQ_DESC_TOTAL_SIZE(ring_size) + VIRTQ_AVAIL_TOTAL_SIZE(ring_size)) + VIRTQ_USED_TOTAL_SIZE(ring_size);
108 
109     pages = rt_malloc_align(pages_total_size, VIRTIO_PAGE_SIZE);
110 
111     if (pages == RT_NULL)
112     {
113         return -RT_ENOMEM;
114     }
115 
116     queue->free = rt_malloc(sizeof(rt_bool_t) * ring_size);
117 
118     if (queue->free == RT_NULL)
119     {
120         rt_free_align(pages);
121         return -RT_ENOMEM;
122     }
123 
124     rt_memset(pages, 0, pages_total_size);
125 
126     dev->mmio_config->guest_page_size = VIRTIO_PAGE_SIZE;
127     dev->mmio_config->queue_sel = queue_index;
128     dev->mmio_config->queue_num = ring_size;
129     dev->mmio_config->queue_align = VIRTIO_PAGE_SIZE;
130     dev->mmio_config->queue_pfn = VIRTIO_VA2PA(pages) >> VIRTIO_PAGE_SHIFT;
131 
132     queue->num = ring_size;
133     queue->desc = (struct virtq_desc *)((rt_ubase_t)pages);
134     queue->avail = (struct virtq_avail *)(((rt_ubase_t)pages) + VIRTQ_DESC_TOTAL_SIZE(ring_size));
135     queue->used = (struct virtq_used *)VIRTIO_PAGE_ALIGN(
136             (rt_ubase_t)&queue->avail->ring[ring_size] + VIRTQ_AVAIL_RES_SIZE);
137 
138     queue->used_idx = 0;
139 
140     /* All descriptors start out unused */
141     for (i = 0; i < ring_size; ++i)
142     {
143         queue->free[i] = RT_TRUE;
144     }
145 
146     queue->free_count = ring_size;
147 
148     return RT_EOK;
149 }
150 
virtio_queue_destroy(struct virtio_device * dev,rt_uint32_t queue_index)151 void virtio_queue_destroy(struct virtio_device *dev, rt_uint32_t queue_index)
152 {
153     struct virtq *queue;
154 
155     _virtio_dev_check(dev);
156 
157     RT_ASSERT(dev->mmio_config->queue_num_max > 0);
158     RT_ASSERT(dev->mmio_config->queue_num_max > queue_index);
159 
160     queue = &dev->queues[queue_index];
161 
162     RT_ASSERT(queue->num > 0);
163 
164     rt_free(queue->free);
165     rt_free_align((void *)queue->desc);
166 
167     dev->mmio_config->queue_sel = queue_index;
168     dev->mmio_config->queue_pfn = RT_NULL;
169 
170     queue->num = 0;
171     queue->desc = RT_NULL;
172     queue->avail = RT_NULL;
173     queue->used = RT_NULL;
174 }
175 
virtio_queue_notify(struct virtio_device * dev,rt_uint32_t queue_index)176 void virtio_queue_notify(struct virtio_device *dev, rt_uint32_t queue_index)
177 {
178     _virtio_dev_check(dev);
179 
180     dev->mmio_config->queue_notify = queue_index;
181 }
182 
virtio_submit_chain(struct virtio_device * dev,rt_uint32_t queue_index,rt_uint16_t desc_index)183 void virtio_submit_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index)
184 {
185     rt_size_t ring_size;
186     struct virtq *queue;
187 
188     _virtio_dev_check(dev);
189 
190     queue = &dev->queues[queue_index];
191     ring_size = queue->num;
192 
193     /* Tell the device the first index in our chain of descriptors */
194     queue->avail->ring[queue->avail->idx % ring_size] = desc_index;
195     rt_hw_dsb();
196 
197     /* Tell the device another avail ring entry is available */
198     queue->avail->idx++;
199     rt_hw_dsb();
200 }
201 
virtio_alloc_desc(struct virtio_device * dev,rt_uint32_t queue_index)202 rt_uint16_t virtio_alloc_desc(struct virtio_device *dev, rt_uint32_t queue_index)
203 {
204     int i;
205     struct virtq *queue;
206 
207     _virtio_dev_check(dev);
208 
209     RT_ASSERT(queue_index < dev->queues_num);
210 
211     queue = &dev->queues[queue_index];
212 
213     if (queue->free_count > 0)
214     {
215         rt_size_t ring_size = queue->num;
216 
217         for (i = 0; i < ring_size; ++i)
218         {
219             if (queue->free[i])
220             {
221                 queue->free[i] = RT_FALSE;
222                 queue->free_count--;
223 
224                 return (rt_uint16_t)i;
225             }
226         }
227     }
228 
229     return VIRTQ_INVALID_DESC_ID;
230 }
231 
virtio_free_desc(struct virtio_device * dev,rt_uint32_t queue_index,rt_uint16_t desc_index)232 void virtio_free_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index)
233 {
234     struct virtq *queue;
235 
236     _virtio_dev_check(dev);
237 
238     queue = &dev->queues[queue_index];
239 
240     RT_ASSERT(queue_index < dev->queues_num);
241     RT_ASSERT(!queue->free[desc_index]);
242 
243     queue->desc[desc_index].addr = 0;
244     queue->desc[desc_index].len = 0;
245     queue->desc[desc_index].flags = 0;
246     queue->desc[desc_index].next = 0;
247 
248     queue->free[desc_index] = RT_TRUE;
249 
250     queue->free_count++;
251 }
252 
virtio_alloc_desc_chain(struct virtio_device * dev,rt_uint32_t queue_index,rt_size_t count,rt_uint16_t * indexs)253 rt_err_t virtio_alloc_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t count,
254         rt_uint16_t *indexs)
255 {
256     int i, j;
257 
258     _virtio_dev_check(dev);
259 
260     RT_ASSERT(indexs != RT_NULL);
261 
262     if (dev->queues[queue_index].free_count < count)
263     {
264         return -RT_ERROR;
265     }
266 
267     for (i = 0; i < count; ++i)
268     {
269         indexs[i] = virtio_alloc_desc(dev, queue_index);
270 
271         if (indexs[i] == VIRTQ_INVALID_DESC_ID)
272         {
273             for (j = 0; j < i; ++j)
274             {
275                 virtio_free_desc(dev, queue_index, indexs[j]);
276             }
277 
278             return -RT_ERROR;
279         }
280     }
281 
282     return RT_EOK;
283 }
284 
virtio_free_desc_chain(struct virtio_device * dev,rt_uint32_t queue_index,rt_uint16_t desc_index)285 void virtio_free_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index)
286 {
287     rt_uint16_t flags, next;
288     struct virtq_desc *desc;
289 
290     _virtio_dev_check(dev);
291 
292     desc = &dev->queues[queue_index].desc[0];
293 
294     for (;;)
295     {
296         flags = desc[desc_index].flags;
297         next = desc[desc_index].next;
298 
299         virtio_free_desc(dev, queue_index, desc_index);
300 
301         if (flags & VIRTQ_DESC_F_NEXT)
302         {
303             desc_index = next;
304         }
305         else
306         {
307             break;
308         }
309     }
310 }
311 
virtio_fill_desc(struct virtio_device * dev,rt_uint32_t queue_index,rt_uint16_t desc_index,rt_uint64_t addr,rt_uint32_t len,rt_uint16_t flags,rt_uint16_t next)312 void virtio_fill_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index,
313         rt_uint64_t addr, rt_uint32_t len, rt_uint16_t flags, rt_uint16_t next)
314 {
315     struct virtq_desc *desc;
316 
317     _virtio_dev_check(dev);
318 
319     desc = &dev->queues[queue_index].desc[desc_index];
320 
321     desc->addr = addr;
322     desc->len = len;
323     desc->flags = flags;
324     desc->next = next;
325 }
326 
327 #ifdef RT_USING_SMART
328 #ifdef RT_USING_VIRTIO_GPU
329 
330 #include <virtio_gpu.h>
331 #include "drivers/lcd.h"
332 #include <dfs_file.h>
333 #include <lwp_user_mm.h>
334 
335 static struct rt_device_graphic_info _graphic_info;
336 static struct rt_device_rect_info    _rect_info;
337 static struct rt_device              _fb        = {};
338 static rt_device_t                   _gpu_dev   = RT_NULL;
339 
fb_open(rt_device_t dev,rt_uint16_t oflag)340 static rt_err_t fb_open(rt_device_t dev, rt_uint16_t oflag)
341 {
342     return RT_EOK;
343 }
344 
fb_close(rt_device_t dev)345 static rt_err_t fb_close(rt_device_t dev)
346 {
347     return RT_EOK;
348 }
349 
fb_control(rt_device_t dev,int cmd,void * args)350 static rt_err_t fb_control(rt_device_t dev, int cmd, void *args)
351 {
352     switch(cmd)
353     {
354         case FBIOPAN_DISPLAY:
355         {
356             rt_hw_cpu_dcache_clean(_graphic_info.framebuffer, _graphic_info.smem_len);
357             rt_device_control(_gpu_dev, RTGRAPHIC_CTRL_RECT_UPDATE, &_rect_info);
358             break;
359         }
360         case FBIOGET_FSCREENINFO:
361         {
362             struct fb_fix_screeninfo *info = (struct fb_fix_screeninfo *)args;
363             strncpy(info->id, "lcd", sizeof(info->id));
364             info->smem_len = _graphic_info.smem_len;
365             break;
366         }
367         case FBIOGET_VSCREENINFO:
368         {
369             struct fb_var_screeninfo *info = (struct fb_var_screeninfo *)args;
370             info->bits_per_pixel           = _graphic_info.bits_per_pixel;
371             info->xres                     = _graphic_info.width;
372             info->yres                     = _graphic_info.height;
373             info->yres_virtual             = _graphic_info.height;
374             info->xres_virtual             = _graphic_info.width;
375             info->transp.offset            = 24;
376             info->transp.length            = 8;
377             info->red.offset               = 0;
378             info->red.length               = 8;
379             info->green.offset             = 8;
380             info->green.length             = 8;
381             info->blue.offset              = 16;
382             info->blue.length              = 8;
383             break;
384         }
385         case RT_FIOMMAP2:
386         {
387             struct dfs_mmap2_args *mmap2 = (struct dfs_mmap2_args *)args;
388 
389             if(mmap2)
390             {
391                 mmap2->ret = lwp_map_user_phy(lwp_self(), RT_NULL, rt_kmem_v2p(_graphic_info.framebuffer), mmap2->length, 1);
392             }
393             else
394             {
395                 return -EIO;
396             }
397 
398             break;
399         }
400         default:
401             break;
402     }
403 
404     return RT_EOK;
405 }
406 
407 #ifdef RT_USING_DEVICE_OPS
408 const static struct rt_device_ops fb_ops =
409 {
410     RT_NULL,
411     fb_open,
412     fb_close,
413     RT_NULL,
414     RT_NULL,
415     fb_control
416 };
417 #endif
418 
fb_init()419 static int fb_init()
420 {
421     _gpu_dev = rt_device_find("virtio-gpu0");
422 
423     if(_gpu_dev == RT_NULL)
424     {
425         return -RT_ERROR;
426     }
427 
428     if(_gpu_dev != RT_NULL && rt_device_open(_gpu_dev, 0) == RT_EOK)
429     {
430         rt_memset(&_graphic_info, 0, sizeof(_graphic_info));
431         rt_memset(&_rect_info, 0, sizeof(_rect_info));
432         rt_device_control(_gpu_dev, VIRTIO_DEVICE_CTRL_GPU_SET_PRIMARY, RT_NULL);
433         rt_device_control(_gpu_dev, VIRTIO_DEVICE_CTRL_GPU_CREATE_2D, (void *)RTGRAPHIC_PIXEL_FORMAT_RGB888);
434         rt_device_control(_gpu_dev, RTGRAPHIC_CTRL_GET_INFO, &_graphic_info);
435         _rect_info.x      = 0;
436         _rect_info.y      = 0;
437         _rect_info.width  = _graphic_info.width;
438         _rect_info.height = _graphic_info.height;
439         memset(_graphic_info.framebuffer, 0xff, _graphic_info.smem_len);
440         rt_device_control(_gpu_dev, RTGRAPHIC_CTRL_RECT_UPDATE, &_rect_info);
441     }
442 
443     if(rt_device_find("fb0") != RT_NULL)
444     {
445         rt_kprintf("a device named fb0 already exists\n");
446         return -RT_ERROR;
447     }
448 
449     _fb.type = RT_Device_Class_Miscellaneous;
450 
451 #ifdef RT_USING_DEVICE_OPS
452     _fb.ops        = &fb_ops;
453 #else
454     _fb.init       = RT_NULL;
455     _fb.open       = fb_open;
456     _fb.close      = fb_close;
457     _fb.read       = RT_NULL;
458     _fb.write      = RT_NULL;
459     _fb.control    = fb_control;
460     _fb.user_data  = RT_NULL;
461 #endif
462 
463     rt_device_register(&_fb, "fb0", RT_DEVICE_FLAG_RDWR);
464     return RT_EOK;
465 }
466 INIT_COMPONENT_EXPORT(fb_init);
467 #endif
468 #endif
469