| /system/dev/block/ftl/test/ |
| A D | oob_doubler_test.cpp | 99 op.rw.length = 5; in QueueDisabledTest() 100 op.rw.offset_nand = 6; in QueueDisabledTest() 101 op.rw.offset_data_vmo = 7; in QueueDisabledTest() 102 op.rw.offset_oob_vmo = 8; in QueueDisabledTest() 107 EXPECT_EQ(5, result->rw.length); in QueueDisabledTest() 108 EXPECT_EQ(6, result->rw.offset_nand); in QueueDisabledTest() 121 op.rw.length = 5; in QueueEnabledTest() 122 op.rw.offset_nand = 6; in QueueEnabledTest() 123 op.rw.offset_data_vmo = 7; in QueueEnabledTest() 124 op.rw.offset_oob_vmo = 8; in QueueEnabledTest() [all …]
|
| A D | nand_driver_test.cpp | 48 if (operation->rw.command == NAND_OP_READ) { in NandQueue() 56 operation->rw.corrected_bit_flips = ecc_bits_; in NandQueue() 57 } else if (operation->rw.command == NAND_OP_WRITE) { in NandQueue() 171 EXPECT_EQ(2 * 2, operation->rw.length); in ReadTest() 172 EXPECT_EQ(5 * 2, operation->rw.offset_nand); in ReadTest() 173 EXPECT_EQ(0, operation->rw.offset_data_vmo); in ReadTest() 174 EXPECT_EQ(2 * 2, operation->rw.offset_oob_vmo); in ReadTest() 237 EXPECT_EQ(2 * 2, operation->rw.length); in WriteTest() 238 EXPECT_EQ(5 * 2, operation->rw.offset_nand); in WriteTest() 239 EXPECT_EQ(0, operation->rw.offset_data_vmo); in WriteTest() [all …]
|
| A D | nand_operation_test.cpp | 23 EXPECT_EQ(0, op->rw.data_vmo); in SetDataVmoTest() 27 EXPECT_NE(0, op->rw.data_vmo); in SetDataVmoTest() 28 EXPECT_NE(ZX_HANDLE_INVALID, op->rw.data_vmo); in SetDataVmoTest() 39 EXPECT_EQ(0, op->rw.oob_vmo); in SetOobVmoTest() 43 EXPECT_NE(0, op->rw.oob_vmo); in SetOobVmoTest() 44 EXPECT_NE(ZX_HANDLE_INVALID, op->rw.oob_vmo); in SetOobVmoTest()
|
| /system/dev/block/zxcrypt/ |
| A D | extra.cpp | 33 if (add_overflow(block->rw.offset_dev, reserved_blocks, &block->rw.offset_dev)) { in Init() 35 block->rw.offset_dev); in Init() 38 vmo = block->rw.vmo; in Init() 39 length = block->rw.length; in Init() 40 offset_dev = block->rw.offset_dev; in Init() 41 offset_vmo = block->rw.offset_vmo; in Init()
|
| A D | worker.cpp | 126 if (mul_overflow(block->rw.length, device_->block_size(), &length) || in EncryptWrite() 127 mul_overflow(block->rw.offset_dev, device_->block_size(), &offset_dev) || in EncryptWrite() 131 block->rw.length, block->rw.offset_dev, extra->offset_vmo); in EncryptWrite() 155 if (mul_overflow(block->rw.length, device_->block_size(), &length) || in DecryptRead() 156 mul_overflow(block->rw.offset_dev, device_->block_size(), &offset_dev) || in DecryptRead() 157 mul_overflow(block->rw.offset_vmo, device_->block_size(), &offset_vmo)) { in DecryptRead() 160 block->rw.length, block->rw.offset_dev, block->rw.offset_vmo); in DecryptRead() 168 if ((rc = zx_vmar_map(root, flags, 0, block->rw.vmo, offset_vmo, length, &address)) != ZX_OK) { in DecryptRead()
|
| /system/dev/nand/nand/ |
| A D | nand.c | 103 if (nand_op->rw.data_vmo != ZX_HANDLE_INVALID) { in nand_read_op() 119 if (nand_op->rw.oob_vmo != ZX_HANDLE_INVALID) { in nand_read_op() 140 for (uint32_t i = 0; i < nand_op->rw.length; i++) { in nand_read_op() 146 status, nand_op->rw.offset_nand); in nand_read_op() 191 if (nand_op->rw.data_vmo != ZX_HANDLE_INVALID) { in nand_write_op() 207 if (nand_op->rw.oob_vmo != ZX_HANDLE_INVALID) { in nand_write_op() 231 nand_op->rw.offset_nand); in nand_write_op() 345 if (op->rw.offset_nand >= dev->num_nand_pages || !op->rw.length || in _nand_queue() 346 (dev->num_nand_pages - op->rw.offset_nand) < op->rw.length) { in _nand_queue() 350 if (op->rw.data_vmo == ZX_HANDLE_INVALID && in _nand_queue() [all …]
|
| /system/dev/block/ftl/ |
| A D | oob_doubler.cpp | 26 operation->rw.length *= 2; in Queue() 27 operation->rw.offset_nand *= 2; in Queue() 28 operation->rw.offset_data_vmo *= 2; in Queue() 29 operation->rw.offset_oob_vmo *= 2; in Queue()
|
| A D | nand_driver.cpp | 91 op->rw.command = NAND_OP_READ; in NandRead() 92 op->rw.offset_nand = start_page; in NandRead() 93 op->rw.length = page_count; in NandRead() 106 op->rw.offset_oob_vmo = data_pages; in NandRead() 128 if (op->rw.corrected_bit_flips > info_.ecc_bits) { in NandRead() 137 if (op->rw.corrected_bit_flips > info_.ecc_bits / 2) { in NandRead() 154 op->rw.command = NAND_OP_WRITE; in NandWrite() 155 op->rw.offset_nand = start_page; in NandWrite() 156 op->rw.length = page_count; in NandWrite() 170 op->rw.offset_oob_vmo = data_pages; in NandWrite()
|
| A D | nand_operation.cpp | 23 operation->rw.data_vmo = mapper_.vmo().get(); in SetDataVmo() 37 operation->rw.oob_vmo = mapper_.vmo().get(); in SetOobVmo()
|
| /system/dev/nand/ram-nand/ |
| A D | ram-nand.cpp | 246 if (operation->rw.offset_nand >= max_pages || !operation->rw.length || in NandQueue() 247 (max_pages - operation->rw.offset_nand) < operation->rw.length) { in NandQueue() 251 if (operation->rw.data_vmo == ZX_HANDLE_INVALID && in NandQueue() 252 operation->rw.oob_vmo == ZX_HANDLE_INVALID) { in NandQueue() 358 if (operation->rw.data_vmo == ZX_HANDLE_INVALID) { in ReadWriteData() 364 uint32_t length = operation->rw.length * params_.page_size; in ReadWriteData() 368 operation->rw.corrected_bit_flips = 0; in ReadWriteData() 378 (operation->rw.offset_nand + operation->rw.length - 1) in ReadWriteData() 386 if (operation->rw.oob_vmo == ZX_HANDLE_INVALID) { in ReadWriteOob() 392 uint32_t length = operation->rw.length * params_.oob_size; in ReadWriteOob() [all …]
|
| /system/dev/block/sdmmc/ |
| A D | sdmmc.c | 58 "extra", TA_INT32(bop->rw.extra), in block_complete() 59 "length", TA_INT32(bop->rw.length), in block_complete() 195 if ((btxn->rw.offset_dev >= max) || ((max - btxn->rw.offset_dev) < btxn->rw.length)) { in sdmmc_queue() 199 if (btxn->rw.length == 0) { in sdmmc_queue() 293 if (txn->bop.rw.length > 1) { in sdmmc_do_txn() 302 if (txn->bop.rw.length > 1) { in sdmmc_do_txn() 323 txn->bop.command, txn->bop.rw.offset_vmo, txn->bop.rw.length, in sdmmc_do_txn() 330 req->arg = txn->bop.rw.offset_dev; in sdmmc_do_txn() 331 req->blockcount = txn->bop.rw.length; in sdmmc_do_txn() 343 req->dma_vmo = txn->bop.rw.vmo; in sdmmc_do_txn() [all …]
|
| /system/dev/nand/ram-nand/test/ |
| A D | ram-nand.cpp | 421 op->rw.length = 1; in QueueOneTest() 457 op->rw.length = num_pages; in SetForWrite() 458 op->rw.offset_nand = offset; in SetForWrite() 464 op->rw.command = NAND_OP_READ; in SetForRead() 465 op->rw.length = num_pages; in SetForRead() 466 op->rw.offset_nand = offset; in SetForRead() 491 op->rw.command = NAND_OP_READ; in ReadWriteTest() 616 op->rw.length = 1; in OobLimitsTest() 633 op->rw.length = 5; in OobLimitsTest() 666 op->rw.length = 2; in ReadWriteOobTest() [all …]
|
| /system/dev/block/fvm/ |
| A D | vpartition.cpp | 344 if (txn->rw.length == 0) { in BlockImplQueue() 347 } else if ((txn->rw.offset_dev >= device_capacity) || in BlockImplQueue() 348 (device_capacity - txn->rw.offset_dev < txn->rw.length)) { in BlockImplQueue() 358 size_t vslice_end = (txn->rw.offset_dev + txn->rw.length - 1) / blocks_per_slice; in BlockImplQueue() 411 uint32_t length_remaining = txn->rw.length; in BlockImplQueue() 416 uint64_t offset_vmo = txn->rw.offset_vmo; in BlockImplQueue() 419 length = fbl::round_up(txn->rw.offset_dev + 1, blocks_per_slice) - txn->rw.offset_dev; in BlockImplQueue() 439 txns[i]->rw.offset_vmo = offset_vmo; in BlockImplQueue() 440 txns[i]->rw.length = static_cast<uint32_t>(length); in BlockImplQueue() 443 txns[i]->rw.offset_dev += (txn->rw.offset_dev % blocks_per_slice); in BlockImplQueue() [all …]
|
| /system/dev/nand/broker/ |
| A D | broker.cpp | 161 op->rw.command = command; in Queue() 166 op->rw.length = request.length; in Queue() 167 op->rw.offset_nand = request.offset_nand; in Queue() 168 op->rw.offset_data_vmo = request.offset_data_vmo; in Queue() 169 op->rw.offset_oob_vmo = request.offset_oob_vmo; in Queue() 170 op->rw.data_vmo = request.data_vmo ? request.vmo : ZX_HANDLE_INVALID; in Queue() 171 op->rw.oob_vmo = request.oob_vmo ? request.vmo : ZX_HANDLE_INVALID; in Queue() 184 *corrected_bits = op->rw.corrected_bit_flips; in Queue()
|
| /system/dev/nand/nandpart/ |
| A D | aml-bad-block.cpp | 170 nand_op->rw.command = NAND_OP_WRITE; in WritePages() 172 nand_op->rw.oob_vmo = oob_vmo_.get(); in WritePages() 173 nand_op->rw.length = num_pages; in WritePages() 174 nand_op->rw.offset_nand = nand_page; in WritePages() 175 nand_op->rw.offset_data_vmo = 0; in WritePages() 176 nand_op->rw.offset_oob_vmo = 0; in WritePages() 232 nand_op->rw.command = NAND_OP_READ; in ReadPages() 235 nand_op->rw.length = num_pages; in ReadPages() 236 nand_op->rw.offset_nand = nand_page; in ReadPages() 237 nand_op->rw.offset_data_vmo = 0; in ReadPages() [all …]
|
| /system/dev/nand/skip-block/ |
| A D | skip-block.cpp | 62 op->rw.offset_data_vmo += ctx->nand_info->pages_per_block; in ReadCompletionCallback() 108 op->rw.command = NAND_OP_WRITE; in EraseCompletionCallback() 109 op->rw.data_vmo = ctx->op.vmo; in EraseCompletionCallback() 110 op->rw.oob_vmo = ZX_HANDLE_INVALID; in EraseCompletionCallback() 111 op->rw.length = ctx->nand_info->pages_per_block; in EraseCompletionCallback() 113 op->rw.offset_data_vmo = ctx->op.vmo_offset; in EraseCompletionCallback() 311 nand_op->rw.command = NAND_OP_READ; in Read() 312 nand_op->rw.data_vmo = op.vmo; in Read() 313 nand_op->rw.oob_vmo = ZX_HANDLE_INVALID; in Read() 314 nand_op->rw.length = nand_info_.pages_per_block; in Read() [all …]
|
| /system/dev/block/ahci/ |
| A D | sata.c | 75 .rw.vmo = vmo, in sata_device_identify() 76 .rw.length = 1, in sata_device_identify() 77 .rw.offset_dev = 0, in sata_device_identify() 78 .rw.offset_vmo = 0, in sata_device_identify() 241 if (bop->rw.length == 0) { in sata_queue() 246 if ((bop->rw.offset_dev >= dev->info.block_count) || in sata_queue() 247 ((dev->info.block_count - bop->rw.offset_dev) < bop->rw.length)) { in sata_queue()
|
| /system/dev/block/ramdisk/ |
| A D | ramdisk.c | 107 size_t txn_blocks = txn->op.rw.length; in worker_thread() 116 size_t dev_offset = txn->op.rw.offset_dev * dev->blk_size; in worker_thread() 117 size_t vmo_offset = txn->op.rw.offset_vmo * dev->blk_size; in worker_thread() 135 status = zx_vmo_read(txn->op.rw.vmo, addr, vmo_offset, length); in worker_thread() 137 if (status == ZX_OK && blocks < txn->op.rw.length && defer) { in worker_thread() 144 txn->op.rw.length -= blocks; in worker_thread() 145 txn->op.rw.offset_vmo += blocks; in worker_thread() 146 txn->op.rw.offset_dev += blocks; in worker_thread() 324 if ((txn->op.rw.offset_dev >= ramdev->blk_count) || in ramdisk_queue() 325 ((ramdev->blk_count - txn->op.rw.offset_dev) < txn->op.rw.length)) { in ramdisk_queue() [all …]
|
| /system/dev/block/gpt/ |
| A D | gpt.cpp | 168 size_t blocks = bop->rw.length; in gpt_queue() 172 if ((bop->rw.offset_dev >= max) || in gpt_queue() 173 ((max - bop->rw.offset_dev) < blocks)) { in gpt_queue() 179 bop->rw.offset_dev += gpt->gpt_entry.first; in gpt_queue() 275 bop->rw.vmo = vmo; in gpt_bind_thread() 276 bop->rw.length = 1; in gpt_bind_thread() 277 bop->rw.offset_dev = 1; in gpt_bind_thread() 278 bop->rw.offset_vmo = 0; in gpt_bind_thread() 309 bop->rw.vmo = vmo; in gpt_bind_thread() 313 bop->rw.offset_dev = header.entries; in gpt_bind_thread() [all …]
|
| /system/dev/block/mbr/ |
| A D | mbr.c | 134 size_t blocks = bop->rw.length; in mbr_queue() 138 if ((bop->rw.offset_dev >= max) || in mbr_queue() 139 ((max - bop->rw.offset_dev) < blocks)) { in mbr_queue() 145 bop->rw.offset_dev += mbr->partition.start_sector_lba; in mbr_queue() 236 bop->rw.vmo = vmo; in mbr_bind_thread() 237 bop->rw.length = iosize / block_info.block_size; in mbr_bind_thread() 238 bop->rw.offset_dev = 0; in mbr_bind_thread() 239 bop->rw.offset_vmo = 0; in mbr_bind_thread()
|
| /system/dev/bus/virtio/ |
| A D | block.cpp | 306 req->sector = txn->op.rw.offset_dev; in QueueTxn() 346 size_t page0_offset = txn->op.rw.offset_vmo & PAGE_MASK; in QueueTxn() 383 uint64_t suboffset = txn->op.rw.offset_vmo & PAGE_MASK; in pin_pages() 384 uint64_t aligned_offset = txn->op.rw.offset_vmo & ~PAGE_MASK; in pin_pages() 392 zx_handle_t vmo = txn->op.rw.vmo; in pin_pages() 409 if ((txn->op.rw.offset_dev >= config_.capacity) || in SignalWorker() 410 (config_.capacity - txn->op.rw.offset_dev < txn->op.rw.length)) { in SignalWorker() 416 if (txn->op.rw.length == 0) { in SignalWorker() 478 txn->op.rw.offset_vmo *= config_.blk_size; in WorkerThread() 479 bytes = txn->op.rw.length * config_.blk_size; in WorkerThread()
|
| /system/dev/nand/nandpart/test/ |
| A D | aml-bad-block-test.cpp | 116 zx::vmo data_vmo(op->rw.data_vmo); in MockQueue() 119 op->rw.offset_data_vmo, in MockQueue() 120 op->rw.length * kPageSize, in MockQueue() 129 zx::vmar::root_self()->unmap(data_buf, op->rw.length * kPageSize); in MockQueue() 132 zx::vmo oob_vmo(op->rw.oob_vmo); in MockQueue() 133 status = zx::vmar::root_self()->map(0, oob_vmo, op->rw.offset_oob_vmo, in MockQueue() 134 op->rw.length * kOobSize, in MockQueue() 142 zx::vmar::root_self()->unmap(oob_buf, op->rw.length * kOobSize); in MockQueue() 149 for (uint16_t i = 0; i < op->rw.length; i++) { in MockQueue() 150 auto it = context->table_entries.find(op->rw.offset_nand + i); in MockQueue() [all …]
|
| /system/dev/block/block/ |
| A D | block.cpp | 219 op->rw.length = static_cast<uint32_t>(sub_txn_length / block_size); in DoIo() 220 op->rw.vmo = io_vmo_.get(); in DoIo() 221 op->rw.offset_dev = (off + sub_txn_offset) / block_size; in DoIo() 222 op->rw.offset_vmo = 0; in DoIo() 286 stats_.total_blocks_read += op->rw.length; in BlockQueue() 287 stats_.total_blocks += op->rw.length; in BlockQueue() 290 stats_.total_blocks_written += op->rw.length; in BlockQueue() 291 stats_.total_blocks += op->rw.length; in BlockQueue()
|
| /system/dev/block/nvme/ |
| A D | nvme.c | 309 zx_handle_t vmo = txn->op.rw.vmo; in io_process_txn() 354 cmd.u.rw.start_lba = txn->op.rw.offset_dev; in io_process_txn() 355 cmd.u.rw.block_count = blocks - 1; in io_process_txn() 381 txn->op.rw.offset_dev += blocks; in io_process_txn() 382 txn->op.rw.offset_vmo += bytes; in io_process_txn() 383 txn->op.rw.length -= blocks; in io_process_txn() 389 if (txn->op.rw.length == 0) { in io_process_txn() 469 txn->op.rw.length = 0; in io_process_cpls() 546 if (txn->op.rw.length == 0) { in nvme_queue() 552 (nvme->info.block_count - txn->op.rw.offset_dev < txn->op.rw.length)) { in nvme_queue() [all …]
|
| /system/dev/block/usb-mass-storage/ |
| A D | usb-mass-storage.c | 277 zx_status_t status = usb_request_init(req, txn->op.rw.vmo, offset, length, ep_address); in ums_data_transfer() 302 zx_off_t block_offset = txn->op.rw.offset_dev; in ums_read() 303 uint32_t num_blocks = txn->op.rw.length; in ums_read() 309 zx_off_t vmo_offset = txn->op.rw.offset_vmo * block_size; in ums_read() 367 zx_off_t block_offset = txn->op.rw.offset_dev; in ums_write() 368 uint32_t num_blocks = txn->op.rw.length; in ums_write() 374 zx_off_t vmo_offset = txn->op.rw.offset_vmo * block_size; in ums_write() 630 txn->op.rw.length, txn->op.rw.offset_dev, status); in ums_worker_thread() 636 txn->op.rw.length, txn->op.rw.offset_dev, status); in ums_worker_thread() 662 txn->op.rw.length, txn->op.rw.offset_dev); in ums_worker_thread() [all …]
|