1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Squashfs - a compressed read only filesystem for Linux
4 *
5 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
6 * Phillip Lougher <phillip@squashfs.org.uk>
7 *
8 * block.c
9 */
10
11 /*
12 * This file implements the low-level routines to read and decompress
13 * datablocks and metadata blocks.
14 */
15
16 #include <linux/blkdev.h>
17 #include <linux/fs.h>
18 #include <linux/vfs.h>
19 #include <linux/slab.h>
20 #include <linux/pagemap.h>
21 #include <linux/string.h>
22 #include <linux/bio.h>
23
24 #include "squashfs_fs.h"
25 #include "squashfs_fs_sb.h"
26 #include "squashfs.h"
27 #include "decompressor.h"
28 #include "page_actor.h"
29
30 /*
31 * Returns the amount of bytes copied to the page actor.
32 */
copy_bio_to_actor(struct bio * bio,struct squashfs_page_actor * actor,int offset,int req_length)33 static int copy_bio_to_actor(struct bio *bio,
34 struct squashfs_page_actor *actor,
35 int offset, int req_length)
36 {
37 void *actor_addr;
38 struct bvec_iter_all iter_all = {};
39 struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
40 int copied_bytes = 0;
41 int actor_offset = 0;
42
43 squashfs_actor_nobuff(actor);
44 actor_addr = squashfs_first_page(actor);
45
46 if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all)))
47 return 0;
48
49 while (copied_bytes < req_length) {
50 int bytes_to_copy = min_t(int, bvec->bv_len - offset,
51 PAGE_SIZE - actor_offset);
52
53 bytes_to_copy = min_t(int, bytes_to_copy,
54 req_length - copied_bytes);
55 if (!IS_ERR(actor_addr))
56 memcpy(actor_addr + actor_offset, bvec_virt(bvec) +
57 offset, bytes_to_copy);
58
59 actor_offset += bytes_to_copy;
60 copied_bytes += bytes_to_copy;
61 offset += bytes_to_copy;
62
63 if (actor_offset >= PAGE_SIZE) {
64 actor_addr = squashfs_next_page(actor);
65 if (!actor_addr)
66 break;
67 actor_offset = 0;
68 }
69 if (offset >= bvec->bv_len) {
70 if (!bio_next_segment(bio, &iter_all))
71 break;
72 offset = 0;
73 }
74 }
75 squashfs_finish_page(actor);
76 return copied_bytes;
77 }
78
squashfs_bio_read_cached(struct bio * fullbio,struct address_space * cache_mapping,u64 index,int length,u64 read_start,u64 read_end,int page_count)79 static int squashfs_bio_read_cached(struct bio *fullbio,
80 struct address_space *cache_mapping, u64 index, int length,
81 u64 read_start, u64 read_end, int page_count)
82 {
83 struct folio *head_to_cache = NULL, *tail_to_cache = NULL;
84 struct block_device *bdev = fullbio->bi_bdev;
85 int start_idx = 0, end_idx = 0;
86 struct folio_iter fi;
87 struct bio *bio = NULL;
88 int idx = 0;
89 int err = 0;
90 #ifdef CONFIG_SQUASHFS_COMP_CACHE_FULL
91 struct folio **cache_folios = kmalloc_array(page_count,
92 sizeof(*cache_folios), GFP_KERNEL | __GFP_ZERO);
93 #endif
94
95 bio_for_each_folio_all(fi, fullbio) {
96 struct folio *folio = fi.folio;
97
98 if (folio->mapping == cache_mapping) {
99 idx++;
100 continue;
101 }
102
103 /*
104 * We only use this when the device block size is the same as
105 * the page size, so read_start and read_end cover full pages.
106 *
107 * Compare these to the original required index and length to
108 * only cache pages which were requested partially, since these
109 * are the ones which are likely to be needed when reading
110 * adjacent blocks.
111 */
112 if (idx == 0 && index != read_start)
113 head_to_cache = folio;
114 else if (idx == page_count - 1 && index + length != read_end)
115 tail_to_cache = folio;
116 #ifdef CONFIG_SQUASHFS_COMP_CACHE_FULL
117 /* Cache all pages in the BIO for repeated reads */
118 else if (cache_folios)
119 cache_folios[idx] = folio;
120 #endif
121
122 if (!bio || idx != end_idx) {
123 struct bio *new = bio_alloc_clone(bdev, fullbio,
124 GFP_NOIO, &fs_bio_set);
125
126 if (bio) {
127 bio_trim(bio, start_idx * PAGE_SECTORS,
128 (end_idx - start_idx) * PAGE_SECTORS);
129 bio_chain(bio, new);
130 submit_bio(bio);
131 }
132
133 bio = new;
134 start_idx = idx;
135 }
136
137 idx++;
138 end_idx = idx;
139 }
140
141 if (bio) {
142 bio_trim(bio, start_idx * PAGE_SECTORS,
143 (end_idx - start_idx) * PAGE_SECTORS);
144 err = submit_bio_wait(bio);
145 bio_put(bio);
146 }
147
148 if (err)
149 return err;
150
151 if (head_to_cache) {
152 int ret = filemap_add_folio(cache_mapping, head_to_cache,
153 read_start >> PAGE_SHIFT,
154 GFP_NOIO);
155
156 if (!ret) {
157 folio_mark_uptodate(head_to_cache);
158 folio_unlock(head_to_cache);
159 }
160
161 }
162
163 if (tail_to_cache) {
164 int ret = filemap_add_folio(cache_mapping, tail_to_cache,
165 (read_end >> PAGE_SHIFT) - 1,
166 GFP_NOIO);
167
168 if (!ret) {
169 folio_mark_uptodate(tail_to_cache);
170 folio_unlock(tail_to_cache);
171 }
172 }
173
174 #ifdef CONFIG_SQUASHFS_COMP_CACHE_FULL
175 if (!cache_folios)
176 goto out;
177
178 for (idx = 0; idx < page_count; idx++) {
179 if (!cache_folios[idx])
180 continue;
181 int ret = filemap_add_folio(cache_mapping, cache_folios[idx],
182 (read_start >> PAGE_SHIFT) + idx,
183 GFP_NOIO);
184
185 if (!ret) {
186 folio_mark_uptodate(cache_folios[idx]);
187 folio_unlock(cache_folios[idx]);
188 }
189 }
190 kfree(cache_folios);
191 out:
192 #endif
193 return 0;
194 }
195
squashfs_get_cache_page(struct address_space * mapping,pgoff_t index)196 static struct page *squashfs_get_cache_page(struct address_space *mapping,
197 pgoff_t index)
198 {
199 struct page *page;
200
201 if (!mapping)
202 return NULL;
203
204 page = find_get_page(mapping, index);
205 if (!page)
206 return NULL;
207
208 if (!PageUptodate(page)) {
209 put_page(page);
210 return NULL;
211 }
212
213 return page;
214 }
215
squashfs_bio_read(struct super_block * sb,u64 index,int length,struct bio ** biop,int * block_offset)216 static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
217 struct bio **biop, int *block_offset)
218 {
219 struct squashfs_sb_info *msblk = sb->s_fs_info;
220 struct address_space *cache_mapping = msblk->cache_mapping;
221 const u64 read_start = round_down(index, msblk->devblksize);
222 const sector_t block = read_start >> msblk->devblksize_log2;
223 const u64 read_end = round_up(index + length, msblk->devblksize);
224 const sector_t block_end = read_end >> msblk->devblksize_log2;
225 int offset = read_start - round_down(index, PAGE_SIZE);
226 int total_len = (block_end - block) << msblk->devblksize_log2;
227 const int page_count = DIV_ROUND_UP(total_len + offset, PAGE_SIZE);
228 int error, i;
229 struct bio *bio;
230
231 bio = bio_kmalloc(page_count, GFP_NOIO);
232 if (!bio)
233 return -ENOMEM;
234 bio_init(bio, sb->s_bdev, bio->bi_inline_vecs, page_count, REQ_OP_READ);
235 bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT);
236
237 for (i = 0; i < page_count; ++i) {
238 unsigned int len =
239 min_t(unsigned int, PAGE_SIZE - offset, total_len);
240 pgoff_t index = (read_start >> PAGE_SHIFT) + i;
241 struct page *page;
242
243 page = squashfs_get_cache_page(cache_mapping, index);
244 if (!page)
245 page = alloc_page(GFP_NOIO);
246
247 if (!page) {
248 error = -ENOMEM;
249 goto out_free_bio;
250 }
251
252 /*
253 * Use the __ version to avoid merging since we need each page
254 * to be separate when we check for and avoid cached pages.
255 */
256 __bio_add_page(bio, page, len, offset);
257 offset = 0;
258 total_len -= len;
259 }
260
261 if (cache_mapping)
262 error = squashfs_bio_read_cached(bio, cache_mapping, index,
263 length, read_start, read_end,
264 page_count);
265 else
266 error = submit_bio_wait(bio);
267 if (error)
268 goto out_free_bio;
269
270 *biop = bio;
271 *block_offset = index & ((1 << msblk->devblksize_log2) - 1);
272 return 0;
273
274 out_free_bio:
275 bio_free_pages(bio);
276 bio_uninit(bio);
277 kfree(bio);
278 return error;
279 }
280
281 /*
282 * Read and decompress a metadata block or datablock. Length is non-zero
283 * if a datablock is being read (the size is stored elsewhere in the
284 * filesystem), otherwise the length is obtained from the first two bytes of
285 * the metadata block. A bit in the length field indicates if the block
286 * is stored uncompressed in the filesystem (usually because compression
287 * generated a larger block - this does occasionally happen with compression
288 * algorithms).
289 */
squashfs_read_data(struct super_block * sb,u64 index,int length,u64 * next_index,struct squashfs_page_actor * output)290 int squashfs_read_data(struct super_block *sb, u64 index, int length,
291 u64 *next_index, struct squashfs_page_actor *output)
292 {
293 struct squashfs_sb_info *msblk = sb->s_fs_info;
294 struct bio *bio = NULL;
295 int compressed;
296 int res;
297 int offset;
298
299 if (length) {
300 /*
301 * Datablock.
302 */
303 compressed = SQUASHFS_COMPRESSED_BLOCK(length);
304 length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
305 TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
306 index, compressed ? "" : "un", length, output->length);
307 } else {
308 /*
309 * Metadata block.
310 */
311 const u8 *data;
312 struct bvec_iter_all iter_all = {};
313 struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
314
315 if (index + 2 > msblk->bytes_used) {
316 res = -EIO;
317 goto out;
318 }
319 res = squashfs_bio_read(sb, index, 2, &bio, &offset);
320 if (res)
321 goto out;
322
323 if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
324 res = -EIO;
325 goto out_free_bio;
326 }
327 /* Extract the length of the metadata block */
328 data = bvec_virt(bvec);
329 length = data[offset];
330 if (offset < bvec->bv_len - 1) {
331 length |= data[offset + 1] << 8;
332 } else {
333 if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
334 res = -EIO;
335 goto out_free_bio;
336 }
337 data = bvec_virt(bvec);
338 length |= data[0] << 8;
339 }
340 bio_free_pages(bio);
341 bio_uninit(bio);
342 kfree(bio);
343
344 compressed = SQUASHFS_COMPRESSED(length);
345 length = SQUASHFS_COMPRESSED_SIZE(length);
346 index += 2;
347
348 TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2,
349 compressed ? "" : "un", length);
350 }
351 if (length <= 0 || length > output->length ||
352 (index + length) > msblk->bytes_used) {
353 res = -EIO;
354 goto out;
355 }
356
357 if (next_index)
358 *next_index = index + length;
359
360 res = squashfs_bio_read(sb, index, length, &bio, &offset);
361 if (res)
362 goto out;
363
364 if (compressed) {
365 if (!msblk->stream) {
366 res = -EIO;
367 goto out_free_bio;
368 }
369 res = msblk->thread_ops->decompress(msblk, bio, offset, length, output);
370 } else {
371 res = copy_bio_to_actor(bio, output, offset, length);
372 }
373
374 out_free_bio:
375 bio_free_pages(bio);
376 bio_uninit(bio);
377 kfree(bio);
378 out:
379 if (res < 0) {
380 ERROR("Failed to read block 0x%llx: %d\n", index, res);
381 if (msblk->panic_on_errors)
382 panic("squashfs read failed");
383 }
384
385 return res;
386 }
387