| /drivers/iommu/iommufd/ |
| A D | iova_bitmap.c | 168 struct iova_bitmap_map *mapped = &bitmap->mapped; in iova_bitmap_get() local 222 struct iova_bitmap_map *mapped = &bitmap->mapped; in iova_bitmap_put() local 225 unpin_user_pages(mapped->pages, mapped->npages); in iova_bitmap_put() 226 mapped->npages = 0; in iova_bitmap_put() 255 mapped = &bitmap->mapped; in iova_bitmap_alloc() 286 struct iova_bitmap_map *mapped = &bitmap->mapped; in iova_bitmap_free() local 307 bytes = (bitmap->mapped.npages << PAGE_SHIFT) - bitmap->mapped.pgoff; in iova_bitmap_mapped_remaining() 348 (iova + length - 1) <= (mapped->iova + mapped->length - 1)); in iova_bitmap_mapped_range() 405 struct iova_bitmap_map *mapped = &bitmap->mapped; in iova_bitmap_set() local 421 mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; in iova_bitmap_set() [all …]
|
| /drivers/gpu/host1x/ |
| A D | cdma.c | 53 if (!pb->mapped) in host1x_pushbuffer_destroy() 63 pb->mapped = NULL; in host1x_pushbuffer_destroy() 78 pb->mapped = NULL; in host1x_pushbuffer_init() 95 if (!pb->mapped) in host1x_pushbuffer_init() 114 if (!pb->mapped) in host1x_pushbuffer_init() 129 dma_free_wc(host1x->dev, size, pb->mapped, pb->phys); in host1x_pushbuffer_init() 140 u32 *p = (u32 *)((void *)pb->mapped + pb->pos); in host1x_pushbuffer_push() 451 u32 *mapped = cdma->push_buffer.mapped; in host1x_cdma_update_sync_queue() local 471 mapped[2*slot+1] = 0x0; in host1x_cdma_update_sync_queue() 473 mapped[2*slot+0] = 0x1bad0000; in host1x_cdma_update_sync_queue() [all …]
|
| /drivers/platform/chrome/ |
| A D | cros_ec_proto.c | 259 int ret, mapped; in cros_ec_get_host_event_wake_mask() local 273 if (mapped) { in cros_ec_get_host_event_wake_mask() 274 ret = mapped; in cros_ec_get_host_event_wake_mask() 376 if (mapped) { in cros_ec_get_proto_info() 377 ret = mapped; in cros_ec_get_proto_info() 445 if (mapped) { in cros_ec_get_proto_info_legacy() 446 ret = mapped; in cros_ec_get_proto_info_legacy() 519 if (mapped) { in cros_ec_get_host_command_version_mask() 520 ret = mapped; in cros_ec_get_host_command_version_mask() 725 if (mapped) { in cros_ec_cmd_xfer_status() [all …]
|
| /drivers/gpu/host1x/hw/ |
| A D | debug_hw.c | 215 pb->dma, pb->mapped); in show_channel_gathers() 219 u32 *mapped; in show_channel_gathers() local 227 mapped = (u32 *)job->gather_copy_mapped; in show_channel_gathers() 229 mapped = host1x_bo_mmap(g->bo); in show_channel_gathers() 231 if (!mapped) { in show_channel_gathers() 240 g->base, mapped); in show_channel_gathers() 243 host1x_bo_munmap(g->bo, mapped); in show_channel_gathers()
|
| /drivers/gpu/drm/sti/ |
| A D | NOTES | 39 These IP are mapped to the DRM objects as following: 40 - The CRTCs are mapped to the Compositor Main and Aux Mixers 41 - The Framebuffers and planes are mapped to the Compositor GDP (non video 43 - The Cursor is mapped to the Compositor Cursor 44 - The Encoders are mapped to the TVOut 45 - The Bridges/Connectors are mapped to the HDMI / DVO / HD Analog / SD analog
|
| /drivers/gpu/drm/msm/ |
| A D | msm_gem_vma.c | 274 if (!msm_vma->mapped) in msm_gem_vma_unmap() 294 msm_vma->mapped = false; in msm_gem_vma_unmap() 308 if (msm_vma->mapped) in msm_gem_vma_map() 311 msm_vma->mapped = true; in msm_gem_vma_map() 403 vma->mapped = false; in msm_gem_vma_new() 529 bool mapped = to_msm_vma(orig_vma)->mapped; in msm_gem_vm_sm_step_remap() local 535 if (mapped) { in msm_gem_vm_sm_step_remap() 581 to_msm_vma(prev_vma)->mapped = mapped; in msm_gem_vm_sm_step_remap() 591 to_msm_vma(next_vma)->mapped = mapped; in msm_gem_vm_sm_step_remap() 595 if (!mapped) in msm_gem_vm_sm_step_remap() [all …]
|
| A D | msm_iommu.c | 141 size_t mapped = 0; in msm_iommu_pagetable_map_prr() local 145 ret = ops->map_pages(ops, addr, phys, size, 1, prot, GFP_KERNEL, &mapped); in msm_iommu_pagetable_map_prr() 150 addr += mapped; in msm_iommu_pagetable_map_prr() 151 len -= mapped; in msm_iommu_pagetable_map_prr() 193 size_t pgsize, count, mapped = 0; in msm_iommu_pagetable_map() local 199 prot, GFP_KERNEL, &mapped); in msm_iommu_pagetable_map() 204 phys += mapped; in msm_iommu_pagetable_map() 205 addr += mapped; in msm_iommu_pagetable_map() 206 size -= mapped; in msm_iommu_pagetable_map() 207 len -= mapped; in msm_iommu_pagetable_map()
|
| /drivers/hid/ |
| A D | hid-plantronics.c | 73 goto mapped; in plantronics_input_mapping() 97 goto mapped; in plantronics_input_mapping() 101 goto mapped; in plantronics_input_mapping() 105 goto mapped; in plantronics_input_mapping() 124 mapped: in plantronics_input_mapping()
|
| /drivers/mtd/maps/ |
| A D | Kconfig | 17 ROM driver code to communicate with chips which are mapped 43 are mapped on your particular target board. Refer to the 74 and RAM driver code to communicate with chips which are mapped 140 tristate "CFI Flash device mapped on AMD SC520 CDP" 148 tristate "CFI Flash device mapped on AMD NetSc520" 174 tristate "CFI Flash device mapped on Arcom SBC-GXx boards" 191 tristate "Flash device mapped with DOCCS on NatSemi SCx200" 281 tristate "CFI Flash device mapped on Hitachi SolutionEngine" 288 tristate "CFI Flash device mapped on StrongARM SA11x0" 296 tristate "CFI Flash device mapped on DC21285 Footbridge" [all …]
|
| /drivers/infiniband/hw/hfi1/ |
| A D | user_exp_rcv.c | 137 bool mapped) in unpin_rcv_pages() argument 143 if (mapped) { in unpin_rcv_pages() 152 hfi1_release_user_pages(mm, pages, npages, mapped); in unpin_rcv_pages() 253 tididx = 0, mapped, mapped_pages = 0; in hfi1_user_exp_rcv_setup() local 335 tidlist, &tididx, &mapped); in hfi1_user_exp_rcv_setup() 350 mapped_pages += mapped; in hfi1_user_exp_rcv_setup() 380 &tididx, &mapped); in hfi1_user_exp_rcv_setup() 391 mapped_pages += mapped; in hfi1_user_exp_rcv_setup() 666 int mapped = 0; in program_rcvarray() local 707 mapped += npages; in program_rcvarray() [all …]
|
| /drivers/dma-buf/heaps/ |
| A D | cma_heap.c | 51 bool mapped; member 76 a->mapped = false; in cma_heap_attach() 111 a->mapped = true; in cma_heap_map_dma_buf() 121 a->mapped = false; in cma_heap_unmap_dma_buf() 137 if (!a->mapped) in cma_heap_dma_buf_begin_cpu_access() 158 if (!a->mapped) in cma_heap_dma_buf_end_cpu_access()
|
| A D | system_heap.c | 38 bool mapped; member 92 a->mapped = false; in system_heap_attach() 128 a->mapped = true; in system_heap_map_dma_buf() 138 a->mapped = false; in system_heap_unmap_dma_buf() 154 if (!a->mapped) in system_heap_dma_buf_begin_cpu_access() 175 if (!a->mapped) in system_heap_dma_buf_end_cpu_access()
|
| /drivers/sh/intc/ |
| A D | virq.c | 149 struct intc_map_entry *mapped; in intc_subgroup_init_one() local 154 mapped = radix_tree_lookup(&d->tree, subgroup->parent_id); in intc_subgroup_init_one() 155 if (!mapped) { in intc_subgroup_init_one() 160 pirq = mapped - intc_irq_xlate; in intc_subgroup_init_one()
|
| /drivers/dax/ |
| A D | kmem.c | 74 int i, rc, mapped = 0; in dev_dax_kmem_probe() local 154 if (mapped) in dev_dax_kmem_probe() 186 if (mapped) in dev_dax_kmem_probe() 190 mapped++; in dev_dax_kmem_probe()
|
| /drivers/mtd/ |
| A D | mtdswap.c | 1023 unsigned int newblock, mapped; in mtdswap_writesect() local 1040 mapped = d->page_data[page]; in mtdswap_writesect() 1041 if (mapped <= BLOCK_MAX) { in mtdswap_writesect() 1046 d->revmap[mapped] = PAGE_UNDEF; in mtdswap_writesect() 1146 unsigned int mapped; in mtdswap_discard() local 1151 mapped = d->page_data[page]; in mtdswap_discard() 1152 if (mapped <= BLOCK_MAX) { in mtdswap_discard() 1157 d->revmap[mapped] = PAGE_UNDEF; in mtdswap_discard() 1159 } else if (mapped == BLOCK_ERROR) { in mtdswap_discard() 1207 mapped = 0; in mtdswap_show() [all …]
|
| /drivers/gpu/drm/xe/ |
| A D | xe_hmm.c | 143 xe_assert(vm->xe, !userptr->mapped); in xe_hmm_userptr_set_mapped() 144 userptr->mapped = true; in xe_hmm_userptr_set_mapped() 166 if (userptr->sg && userptr->mapped) in xe_hmm_userptr_unmap() 169 userptr->mapped = false; in xe_hmm_userptr_unmap()
|
| /drivers/block/rnbd/ |
| A D | README | 11 transport. After being mapped, the remote block devices can be accessed 39 mapped from the server side. After the session to the server machine is 40 established, the mapped device will appear on the client side under 50 When a device is mapped from the client, the server generates the path
|
| /drivers/xen/ |
| A D | xlate_mmu.c | 74 int mapped; member 129 info->mapped++; in remap_pte_fn() 166 data.mapped = 0; in xen_xlate_remap_gfn_array() 170 return err < 0 ? err : data.mapped; in xen_xlate_remap_gfn_array()
|
| /drivers/staging/media/ipu3/ |
| A D | ipu3-mmu.c | 304 size_t s_length, mapped = 0; in imgu_mmu_map_sg() local 320 ret = imgu_mmu_map(info, iova + mapped, phys, s_length); in imgu_mmu_map_sg() 324 mapped += s_length; in imgu_mmu_map_sg() 329 return mapped; in imgu_mmu_map_sg() 333 imgu_mmu_unmap(info, iova, mapped); in imgu_mmu_map_sg()
|
| /drivers/iommu/ |
| A D | io-pgtable-arm.c | 427 gfp_t gfp, size_t *mapped) in __arm_lpae_map() argument 445 *mapped += num_entries * size; in __arm_lpae_map() 478 cptep, gfp, mapped); in __arm_lpae_map() 575 ptep, gfp, mapped); in arm_lpae_map_pages() 1327 size_t size, mapped; in arm_lpae_run_tests() local 1361 GFP_KERNEL, &mapped)) in arm_lpae_run_tests() 1367 GFP_KERNEL, &mapped)) in arm_lpae_run_tests() 1389 IOMMU_WRITE, GFP_KERNEL, &mapped)) in arm_lpae_run_tests() 1402 mapped = 0; in arm_lpae_run_tests() 1408 GFP_KERNEL, &mapped)) in arm_lpae_run_tests() [all …]
|
| /drivers/md/ |
| A D | dm-rq.c | 204 static void dm_done(struct request *clone, blk_status_t error, bool mapped) in dm_done() argument 213 if (mapped && rq_end_io) in dm_done() 253 bool mapped = true; in dm_softirq_done() local 267 mapped = false; in dm_softirq_done() 269 dm_done(clone, tio->error, mapped); in dm_softirq_done()
|
| /drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
| A D | vmm.c | 830 new->mapped = vma->mapped; in nvkm_vma_tail() 1014 vma->mapped ? 'M' : '-', in nvkm_vma_dump() 1257 vma->mapped = false; in nvkm_vmm_pfn_unmap() 1299 bool mapped = vma->mapped; in nvkm_vmm_pfn_map() local 1333 if (map != mapped) { in nvkm_vmm_pfn_map() 1342 if ((tmp->mapped = map)) in nvkm_vmm_pfn_map() 1355 if (!mapped) { in nvkm_vmm_pfn_map() 1364 if (mapped) { in nvkm_vmm_pfn_map() 1400 vma->mapped = false; in nvkm_vmm_unmap_region() 1565 vma->mapped = true; in nvkm_vmm_map_locked() [all …]
|
| /drivers/mmc/host/ |
| A D | usdhi6rol0.c | 155 void *mapped; /* mapped page */ member 325 host->head_pg.mapped = host->pg.mapped; in usdhi6_blk_bounce() 327 host->pg.mapped = kmap(host->pg.page); in usdhi6_blk_bounce() 337 memcpy(host->bounce_buf + blk_head, host->pg.mapped, in usdhi6_blk_bounce() 369 host->pg.mapped = kmap(host->pg.page); in usdhi6_sg_map() 385 host->blk_page = host->pg.mapped; in usdhi6_sg_map() 407 memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head, in usdhi6_sg_unmap() 409 memcpy(host->pg.mapped, host->bounce_buf + blk_head, in usdhi6_sg_unmap() 445 host->blk_page = host->pg.mapped; in usdhi6_sg_advance() 507 host->pg.mapped = kmap(host->pg.page); in usdhi6_sg_advance() [all …]
|
| /drivers/md/dm-vdo/ |
| A D | data-vio.c | 1334 (unsigned long long) data_vio->mapped.pbn, in enter_read_only_mode() 1472 bool compressed = vdo_is_state_compressed(data_vio->mapped.state); in complete_read() 1542 if (data_vio->mapped.pbn == VDO_ZERO_BLOCK) { in read_block() 1549 if (vdo_is_state_compressed(data_vio->mapped.state)) { in read_block() 1551 read_endio, REQ_OP_READ, data_vio->mapped.pbn); in read_block() 1557 data_vio->mapped.pbn); in read_block() 1566 data_vio->mapped.pbn); in read_block() 1665 data_vio->decrement_updater.zpbn = data_vio->mapped; in journal_remapping() 1668 if (data_vio->mapped.pbn == VDO_ZERO_BLOCK) in journal_remapping() 1675 if (data_vio->mapped.pbn == VDO_ZERO_BLOCK) { in journal_remapping() [all …]
|
| /drivers/firmware/ |
| A D | dmi-sysfs.c | 384 u8 __iomem *mapped; in dmi_sel_raw_read_phys32() local 387 mapped = dmi_remap(sel->access_method_address, sel->area_length); in dmi_sel_raw_read_phys32() 388 if (!mapped) in dmi_sel_raw_read_phys32() 393 *(buf++) = readb(mapped + pos++); in dmi_sel_raw_read_phys32() 397 dmi_unmap(mapped); in dmi_sel_raw_read_phys32()
|