1 /*
2 * Copyright (c) 2019-2025 Allwinner Technology Co., Ltd. ALL rights reserved.
3 *
4 * Allwinner is a trademark of Allwinner Technology Co.,Ltd., registered in
5 * the the people's Republic of China and other countries.
6 * All Allwinner Technology Co.,Ltd. trademarks are used with permission.
7 *
8 * DISCLAIMER
9 * THIRD PARTY LICENCES MAY BE REQUIRED TO IMPLEMENT THE SOLUTION/PRODUCT.
10 * IF YOU NEED TO INTEGRATE THIRD PARTY’S TECHNOLOGY (SONY, DTS, DOLBY, AVS OR MPEGLA, ETC.)
11 * IN ALLWINNERS’SDK OR PRODUCTS, YOU SHALL BE SOLELY RESPONSIBLE TO OBTAIN
12 * ALL APPROPRIATELY REQUIRED THIRD PARTY LICENCES.
13 * ALLWINNER SHALL HAVE NO WARRANTY, INDEMNITY OR OTHER OBLIGATIONS WITH RESPECT TO MATTERS
14 * COVERED UNDER ANY REQUIRED THIRD PARTY LICENSE.
15 * YOU ARE SOLELY RESPONSIBLE FOR YOUR USAGE OF THIRD PARTY’S TECHNOLOGY.
16 *
17 *
18 * THIS SOFTWARE IS PROVIDED BY ALLWINNER"AS IS" AND TO THE MAXIMUM EXTENT
19 * PERMITTED BY LAW, ALLWINNER EXPRESSLY DISCLAIMS ALL WARRANTIES OF ANY KIND,
20 * WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING WITHOUT LIMITATION REGARDING
21 * THE TITLE, NON-INFRINGEMENT, ACCURACY, CONDITION, COMPLETENESS, PERFORMANCE
22 * OR MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
23 * IN NO EVENT SHALL ALLWINNER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS, OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
28 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
30 * OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32 #ifndef __DMA_WRAP_H_
33 #define __DMA_WRAP_H_
34 #include <hal_dma.h>
35 #include <hal_cache.h>
36 #include <hal_mem.h>
37 
38 struct dma_chan {
39     struct sunxi_dma_chan *dma_handle;
40 };
41 
42 #if 0
43 static inline void dma_free_coherent(void *addr)
44 {
45         void *malloc_ptr = NULL;
46             if (!addr)
47                     return;
48             malloc_ptr = *(uint32_t *)((uint32_t *)addr - 1);
49                 hal_free(malloc_ptr);
50 }
51 
52 static inline void *dma_alloc_coherent(size_t size)
53 {
54         void *fake_ptr = NULL;
55             void *malloc_ptr = NULL;
56 
57             malloc_ptr = hal_malloc(size + 64);
58                 if ((uint32_t)malloc_ptr & 0x3) {
59                         snd_err("error: krhino_mm_alloc not align to 4 byte\r\n");
60                         }
61                 fake_ptr = (uint32_t)(malloc_ptr + 64) & (~63);
62                     *(uint32_t *)((uint32_t *)fake_ptr - 1) = malloc_ptr;
63 
64                     return fake_ptr;
65 }
66 #endif
67 
dma_request_channel(void)68 static inline struct dma_chan *dma_request_channel(void)
69 {
70     struct dma_chan *chan = NULL;
71     hal_dma_chan_status_t status = 0;
72 
73     chan = calloc(1, sizeof(struct dma_chan));
74     status = hal_dma_chan_request(&chan->dma_handle);
75     if (status != HAL_DMA_CHAN_STATUS_FREE) {
76         snd_err("request dma chan failed\n");
77         free(chan);
78         return NULL;
79     }
80     return chan;
81 }
82 
dma_release_channel(struct dma_chan * chan)83 static inline void dma_release_channel(struct dma_chan *chan)
84 {
85     hal_dma_status_t status = 0;
86     if (!chan)
87         return;
88     status = hal_dma_chan_free(chan->dma_handle);
89     if (status != HAL_DMA_STATUS_OK)
90         snd_err("free dma chan failed\n");
91     free(chan);
92 }
93 
dmaengine_tx_status(struct dma_chan * chan,uint32_t * residue)94 static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
95             uint32_t *residue)
96 {
97     return hal_dma_tx_status(chan->dma_handle, residue);
98 }
99 
dmaengine_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir)100 static inline int dmaengine_prep_dma_cyclic(
101         struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
102         size_t period_len, enum dma_transfer_direction dir)
103 {
104     hal_dma_status_t status = 0;
105 
106     snd_print("[%s] line:%d buf_addr:0x%x, buf_len:0x%x, period_len:0x%x\n",
107         __func__, __LINE__, buf_addr, buf_len, period_len);
108 
109     status = hal_dma_prep_cyclic(chan->dma_handle,
110             (unsigned long)buf_addr, (unsigned long)buf_len,
111             (unsigned long)period_len, dir);
112 
113     if (status != HAL_DMA_STATUS_OK) {
114         snd_err("hal_dma_prep_cyclic failed\n");
115         return -1;
116     }
117     return 0;
118 }
119 
dmaengine_submit(struct dma_chan * chan,dma_callback callback,void * callback_param)120 static inline int dmaengine_submit(struct dma_chan *chan,
121             dma_callback callback, void *callback_param)
122 {
123     hal_dma_status_t status = 0;
124 
125     snd_print("\n");
126     status = hal_dma_callback_install(chan->dma_handle,
127                 callback, callback_param);
128     if (status != HAL_DMA_STATUS_OK) {
129         snd_err("hal_dma_prep_cyclic failed\n");
130         return -1;
131     }
132 
133     return 0;
134 }
135 
dmaengine_slave_config(struct dma_chan * chan,struct dma_slave_config * config)136 static inline int dmaengine_slave_config(struct dma_chan *chan,
137                                           struct dma_slave_config *config)
138 {
139     hal_dma_status_t status = 0;
140 
141     snd_print("\n");
142     status = hal_dma_slave_config(chan->dma_handle, config);
143     if (status != HAL_DMA_STATUS_OK) {
144         snd_err("hal_dma_slave_config failed\n");
145         return -1;
146     }
147 
148     return 0;
149 }
150 
dma_async_issue_pending(struct dma_chan * chan)151 static inline void dma_async_issue_pending(struct dma_chan *chan)
152 {
153     hal_dma_status_t status = 0;
154 
155     snd_print("\n");
156     status = hal_dma_start(chan->dma_handle);
157     if (status != HAL_DMA_STATUS_OK) {
158         snd_err("hal_dma_start failed\n");
159         return ;
160     }
161 
162     return;
163 }
164 
dmaengine_terminate_async(struct dma_chan * chan)165 static inline int dmaengine_terminate_async(struct dma_chan *chan)
166 {
167     hal_dma_status_t status = 0;
168 
169     status = hal_dma_stop(chan->dma_handle);
170     if (status != HAL_DMA_STATUS_OK) {
171         snd_err("hal_dma_stop failed\n");
172         return -1;
173     }
174 
175     status = hal_dma_chan_desc_free(chan->dma_handle);
176     if (status != HAL_DMA_STATUS_OK) {
177         snd_err("hal_dma_chan_desc_free failed, return:%d\n", status);
178         return -1;
179     }
180 
181     return 0;
182 }
183 
dmaengine_pause(struct dma_chan * chan)184 static inline int dmaengine_pause(struct dma_chan *chan)
185 {
186     printf("dma pause not support.\n");
187     return -1;
188 }
189 
dmaengine_resume(struct dma_chan * chan)190 static inline int dmaengine_resume(struct dma_chan *chan)
191 {
192     printf("dma resume not support.\n");
193     return -1;
194 }
195 
196 #endif /* __DMA_WRAP_H_ */
197