1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2023-08-08 GuEe-GUI first version
9 */
10
11 #include "blk_dfs.h"
12
13 #include <dfs_file.h>
14 #include <drivers/classes/block.h>
15
16 #if defined(RT_USING_POSIX_DEVIO) && defined(RT_USING_DFS_V2)
17 struct blk_fops_data
18 {
19 struct rt_device_blk_geometry geometry;
20 };
21
blk_fops_open(struct dfs_file * file)22 static int blk_fops_open(struct dfs_file *file)
23 {
24 struct rt_device *dev = file->vnode->data;
25 struct blk_fops_data *data = rt_malloc(sizeof(*data));
26
27 if (!data)
28 {
29 return (int)-RT_ENOMEM;
30 }
31
32 dev->user_data = data;
33 rt_device_control(dev, RT_DEVICE_CTRL_BLK_GETGEOME, &data->geometry);
34 rt_device_control(dev, RT_DEVICE_CTRL_ALL_BLK_SSIZEGET, &file->vnode->size);
35
36 return 0;
37 }
38
blk_fops_close(struct dfs_file * file)39 static int blk_fops_close(struct dfs_file *file)
40 {
41 struct rt_device *dev = file->vnode->data;
42
43 rt_free(dev->user_data);
44 dev->user_data = RT_NULL;
45
46 return 0;
47 }
48
blk_fops_ioctl(struct dfs_file * file,int cmd,void * arg)49 static int blk_fops_ioctl(struct dfs_file *file, int cmd, void *arg)
50 {
51 struct rt_device *dev = file->vnode->data;
52
53 return (int)rt_device_control(dev, cmd, arg);
54 }
55
blk_fops_read(struct dfs_file * file,void * buf,size_t count,off_t * pos)56 static ssize_t blk_fops_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
57 {
58 void *rbuf;
59 rt_ssize_t res = 0;
60 int bytes_per_sector, blk_pos, first_offs, rsize = 0;
61 struct rt_device *dev = file->vnode->data;
62 struct blk_fops_data *data = dev->user_data;
63
64 bytes_per_sector = data->geometry.bytes_per_sector;
65 blk_pos = *pos / bytes_per_sector;
66 first_offs = *pos % bytes_per_sector;
67
68 if ((rbuf = rt_malloc(bytes_per_sector)))
69 {
70 /*
71 ** #1: read first unalign block size.
72 */
73 res = rt_device_read(dev, blk_pos, rbuf, 1);
74
75 if (res == 1)
76 {
77 if (count > bytes_per_sector - first_offs)
78 {
79 rsize = bytes_per_sector - first_offs;
80 }
81 else
82 {
83 rsize = count;
84 }
85 rt_memcpy(buf, rbuf + first_offs, rsize);
86 ++blk_pos;
87
88 /*
89 ** #2: read continuous block size.
90 */
91 while (rsize < count)
92 {
93 res = rt_device_read(dev, blk_pos++, rbuf, 1);
94
95 if (res != 1)
96 {
97 break;
98 }
99
100 if (count - rsize >= bytes_per_sector)
101 {
102 rt_memcpy(buf + rsize, rbuf, bytes_per_sector);
103 rsize += bytes_per_sector;
104 }
105 else
106 {
107 rt_memcpy(buf + rsize, rbuf, count - rsize);
108 rsize = count;
109 }
110 }
111
112 *pos += rsize;
113 }
114
115 rt_free(rbuf);
116 }
117
118 return rsize;
119 }
120
blk_fops_write(struct dfs_file * file,const void * buf,size_t count,off_t * pos)121 static ssize_t blk_fops_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
122 {
123 void *rbuf;
124 rt_ssize_t res = 0;
125 int bytes_per_sector, blk_pos, first_offs, wsize = 0;
126 struct rt_device *dev = file->vnode->data;
127 struct blk_fops_data *data = dev->user_data;
128
129 bytes_per_sector = data->geometry.bytes_per_sector;
130 blk_pos = *pos / bytes_per_sector;
131 first_offs = *pos % bytes_per_sector;
132
133 /*
134 ** #1: write first unalign block size.
135 */
136 if (first_offs != 0)
137 {
138 if (count > bytes_per_sector - first_offs)
139 {
140 wsize = bytes_per_sector - first_offs;
141 }
142 else
143 {
144 wsize = count;
145 }
146
147 if ((rbuf = rt_malloc(bytes_per_sector)))
148 {
149 res = rt_device_read(dev, blk_pos, rbuf, 1);
150
151 if (res == 1)
152 {
153 rt_memcpy(rbuf + first_offs, buf, wsize);
154 res = rt_device_write(dev, blk_pos, (const void *)rbuf, 1);
155
156 if (res == 1)
157 {
158 blk_pos += 1;
159 rt_free(rbuf);
160
161 goto _goon;
162 }
163 }
164
165 rt_free(rbuf);
166 }
167
168 return 0;
169 }
170
171 _goon:
172 /*
173 ** #2: write continuous block size.
174 */
175 if ((count - wsize) / bytes_per_sector != 0)
176 {
177 res = rt_device_write(dev, blk_pos, buf + wsize, (count - wsize) / bytes_per_sector);
178 wsize += res * bytes_per_sector;
179 blk_pos += res;
180
181 if (res != (count - wsize) / bytes_per_sector)
182 {
183 *pos += wsize;
184 return wsize;
185 }
186 }
187
188 /*
189 ** # 3: write last unalign block size.
190 */
191 if ((count - wsize) != 0)
192 {
193 if ((rbuf = rt_malloc(bytes_per_sector)))
194 {
195 res = rt_device_read(dev, blk_pos, rbuf, 1);
196
197 if (res == 1)
198 {
199 rt_memcpy(rbuf, buf + wsize, count - wsize);
200 res = rt_device_write(dev, blk_pos, (const void *)rbuf, 1);
201
202 if (res == 1)
203 {
204 wsize += count - wsize;
205 }
206 }
207
208 rt_free(rbuf);
209 }
210 }
211
212 *pos += wsize;
213 return wsize;
214 }
215
blk_fops_flush(struct dfs_file * file)216 static int blk_fops_flush(struct dfs_file *file)
217 {
218 struct rt_device *dev = file->vnode->data;
219
220 return (int)rt_device_control(dev, RT_DEVICE_CTRL_BLK_SYNC, RT_NULL);
221 }
222
blk_fops_poll(struct dfs_file * file,struct rt_pollreq * req)223 static int blk_fops_poll(struct dfs_file *file, struct rt_pollreq *req)
224 {
225 int mask = 0;
226
227 return mask;
228 }
229
230 const static struct dfs_file_ops blk_fops =
231 {
232 .open = blk_fops_open,
233 .close = blk_fops_close,
234 .ioctl = blk_fops_ioctl,
235 .read = blk_fops_read,
236 .write = blk_fops_write,
237 .flush = blk_fops_flush,
238 .lseek = generic_dfs_lseek,
239 .poll = blk_fops_poll
240 };
241
device_set_blk_fops(struct rt_device * dev)242 void device_set_blk_fops(struct rt_device *dev)
243 {
244 dev->fops = &blk_fops;
245 }
246 #else
device_set_blk_fops(struct rt_device * dev)247 void device_set_blk_fops(struct rt_device *dev)
248 {
249 }
250 #endif /* RT_USING_POSIX_DEVIO && RT_USING_DFS_V2 */
251
device_get_blk_ssize(struct rt_device * dev,void * args)252 void device_get_blk_ssize(struct rt_device *dev, void *args)
253 {
254 rt_uint32_t bytes_per_sector;
255 struct rt_device_blk_geometry geometry;
256
257 rt_device_control(dev, RT_DEVICE_CTRL_BLK_GETGEOME, &geometry);
258 bytes_per_sector = geometry.bytes_per_sector;
259
260 RT_ASSERT(sizeof(bytes_per_sector) == sizeof(geometry.bytes_per_sector));
261
262 rt_memcpy(args, &bytes_per_sector, sizeof(bytes_per_sector));
263 }
264
device_get_all_blk_ssize(struct rt_device * dev,void * args)265 void device_get_all_blk_ssize(struct rt_device *dev, void *args)
266 {
267 rt_uint64_t count_mul_per;
268 struct rt_device_blk_geometry geometry;
269
270 rt_device_control(dev, RT_DEVICE_CTRL_BLK_GETGEOME, &geometry);
271 count_mul_per = geometry.bytes_per_sector * geometry.sector_count;
272
273 rt_memcpy(args, &count_mul_per, sizeof(count_mul_per));
274 }
275