1 // Copyright 2016 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #pragma once
6 
7 #include <zircon/assert.h>
8 #include <zircon/compiler.h>
9 #include <zircon/syscalls.h>
10 #include <zircon/types.h>
11 
12 #include <limits.h>
13 #include <stdbool.h>
14 #include <stddef.h>
15 #include <stdint.h>
16 
17 __BEGIN_CDECLS;
18 
19 // Sentinel value for io_buffer_t's |phys| field for when it is not valid.
20 #define IO_BUFFER_INVALID_PHYS 0
21 
22 typedef struct {
23     zx_handle_t bti_handle; // borrowed by library
24     zx_handle_t vmo_handle; // owned by library
25     zx_handle_t pmt_handle; // owned by library
26     size_t size;
27     zx_off_t offset;
28     void* virt;
29     // Points to the physical page backing the start of the VMO, if this
30     // io buffer was created with the IO_BUFFER_CONTIG flag.
31     zx_paddr_t phys;
32 
33     // This is used for storing the addresses of the physical pages backing non
34     // contiguous buffers and is set by io_buffer_physmap().
35     // Each entry in the list represents a whole page and the first entry
36     // points to the page containing 'offset'.
37     zx_paddr_t* phys_list;
38     uint64_t phys_count;
39 } io_buffer_t;
40 
41 enum {
42     IO_BUFFER_RO         = (0 << 0),    // map buffer read-only
43     IO_BUFFER_RW         = (1 << 0),    // map buffer read/write
44     IO_BUFFER_CONTIG     = (1 << 1),    // allocate physically contiguous buffer
45     IO_BUFFER_UNCACHED   = (1 << 2),    // map buffer with ZX_CACHE_POLICY_UNCACHED
46     IO_BUFFER_FLAGS_MASK = IO_BUFFER_RW | IO_BUFFER_CONTIG | IO_BUFFER_UNCACHED,
47 };
48 
49 // Initializes a new io_buffer.  If this call fails, it is still safe to call
50 // io_buffer_release on |buffer|.  |bti| is borrowed by the io_buffer and may be
51 // used throughout the lifetime of the io_buffer.
52 zx_status_t io_buffer_init(io_buffer_t* buffer, zx_handle_t bti, size_t size, uint32_t flags);
53 // An alignment of zero is interpreted as requesting page alignment.
54 // Requesting a specific alignment is not supported for non-contiguous buffers,
55 // pass zero for |alignment_log2| if not passing IO_BUFFER_CONTIG.  |bti| is borrowed
56 // by the io_buffer and may be used throughout the lifetime of the io_buffer.
57 zx_status_t io_buffer_init_aligned(io_buffer_t* buffer, zx_handle_t bti, size_t size,
58                                    uint32_t alignment_log2, uint32_t flags);
59 
60 // Initializes an io_buffer base on an existing VMO.
61 // duplicates the provided vmo_handle - does not take ownership
62 // |bti| is borrowed by the io_buffer and may be used throughout the lifetime of the io_buffer.
63 zx_status_t io_buffer_init_vmo(io_buffer_t* buffer, zx_handle_t bti, zx_handle_t vmo_handle,
64                                zx_off_t offset, uint32_t flags);
65 
66 // Initializes an io_buffer base on an existing VMO that has already been mapped
67 // duplicates the provided vmo_handle - does not take ownership
68 // vmo_handle must be a VMO that was created via zx_vmo_create_physical()
69 // the virtual address provided in vaddr will be unmapped by io_buffer_release()
70 // |bti| is borrowed by the io_buffer and may be used throughout the lifetime of the io_buffer.
71 zx_status_t io_buffer_init_mmio(io_buffer_t* buffer, zx_handle_t vmo_handle, void* vaddr,
72                                 zx_off_t offset, size_t size);
73 
74 // Initializes an io_buffer that maps a given physical address
75 // |bti| is borrowed by the io_buffer and may be used throughout the lifetime of the io_buffer.
76 zx_status_t io_buffer_init_physical(io_buffer_t* buffer, zx_handle_t bti, zx_paddr_t addr,
77                                     size_t size, zx_handle_t resource, uint32_t cache_policy);
78 
79 zx_status_t io_buffer_cache_op(io_buffer_t* buffer, const uint32_t op,
80                                const zx_off_t offset, const size_t size);
81 
82 // io_buffer_cache_flush() performs a cache flush on a range of memory in the buffer
83 zx_status_t io_buffer_cache_flush(io_buffer_t* buffer, zx_off_t offset, size_t length);
84 
85 // io_buffer_cache_flush_invalidate() performs a cache flush and invalidate on a range of memory
86 // in the buffer
87 zx_status_t io_buffer_cache_flush_invalidate(io_buffer_t* buffer, zx_off_t offset, size_t length);
88 
89 // Looks up the physical pages backing this buffer's vm object.
90 // This is used for non contiguous buffers.
91 // The 'phys_list' and 'phys_count' fields are set if this function succeeds.
92 zx_status_t io_buffer_physmap(io_buffer_t* buffer);
93 
94 // Pins and returns the physical addresses corresponding to the requested subrange
95 // of the buffer.  Invoking zx_pmt_unpin() on pmt releases the pin and makes the
96 // addresses invalid to use.
97 zx_status_t io_buffer_physmap_range(io_buffer_t* buffer, zx_off_t offset,
98                                     size_t length, size_t phys_count,
99                                     zx_paddr_t* physmap, zx_handle_t* pmt);
100 
101 // Releases an io_buffer
102 void io_buffer_release(io_buffer_t* buffer);
103 
io_buffer_is_valid(const io_buffer_t * buffer)104 static inline bool io_buffer_is_valid(const io_buffer_t* buffer) {
105     return (buffer->vmo_handle != ZX_HANDLE_INVALID);
106 }
107 
io_buffer_virt(const io_buffer_t * buffer)108 static inline void* io_buffer_virt(const io_buffer_t* buffer) {
109     return (void*)(((uintptr_t)buffer->virt) + buffer->offset);
110 }
111 
io_buffer_phys(const io_buffer_t * buffer)112 static inline zx_paddr_t io_buffer_phys(const io_buffer_t* buffer) {
113     ZX_DEBUG_ASSERT(buffer->phys != IO_BUFFER_INVALID_PHYS);
114     return buffer->phys + buffer->offset;
115 }
116 
117 // Returns the buffer size available after the given offset, relative to the
118 // io_buffer vmo offset.
io_buffer_size(const io_buffer_t * buffer,size_t offset)119 static inline size_t io_buffer_size(const io_buffer_t* buffer, size_t offset) {
120     size_t remaining = buffer->size - buffer->offset - offset;
121     // May overflow.
122     if (remaining > buffer->size) {
123         remaining = 0;
124     }
125     return remaining;
126 }
127 
128 __END_CDECLS;
129