| /drivers/crypto/intel/keembay/ |
| A D | ocs-hcu.c | 450 if (!dma_list) in ocs_hcu_ll_dma_start() 494 dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL); in ocs_hcu_dma_list_alloc() 495 if (!dma_list) in ocs_hcu_dma_list_alloc() 503 kfree(dma_list); in ocs_hcu_dma_list_alloc() 509 return dma_list; in ocs_hcu_dma_list_alloc() 515 if (!dma_list) in ocs_hcu_dma_list_free() 519 sizeof(*dma_list->head) * dma_list->max_nents, in ocs_hcu_dma_list_free() 520 dma_list->head, dma_list->dma_addr); in ocs_hcu_dma_list_free() 522 kfree(dma_list); in ocs_hcu_dma_list_free() 537 if (!dma_list) in ocs_hcu_dma_list_add_tail() [all …]
|
| A D | ocs-hcu.h | 77 struct ocs_hcu_dma_list *dma_list); 80 struct ocs_hcu_dma_list *dma_list, 87 const struct ocs_hcu_dma_list *dma_list); 91 const struct ocs_hcu_dma_list *dma_list, 103 const struct ocs_hcu_dma_list *dma_list,
|
| A D | keembay-ocs-hcu-core.c | 79 struct ocs_hcu_dma_list *dma_list; member 209 if (rctx->dma_list) { in kmb_ocs_hcu_dma_cleanup() 210 ocs_hcu_dma_list_free(hcu_dev, rctx->dma_list); in kmb_ocs_hcu_dma_cleanup() 211 rctx->dma_list = NULL; in kmb_ocs_hcu_dma_cleanup() 295 rctx->dma_list = ocs_hcu_dma_list_alloc(rctx->hcu_dev, nents); in kmb_ocs_dma_prepare() 296 if (!rctx->dma_list) { in kmb_ocs_dma_prepare() 303 rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev, rctx->dma_list, in kmb_ocs_dma_prepare() 328 rctx->dma_list, in kmb_ocs_dma_prepare() 445 rctx->dma_list, req->result, rctx->dig_sz); in kmb_ocs_hcu_do_one_request() 470 rctx->dma_list); in kmb_ocs_hcu_do_one_request() [all …]
|
| A D | ocs-aes.c | 424 dma_addr_t dma_list) in dma_to_ocs_aes_ll() argument 427 iowrite32(dma_list, in dma_to_ocs_aes_ll() 433 dma_addr_t dma_list) in dma_from_ocs_aes_ll() argument 436 iowrite32(dma_list, in dma_from_ocs_aes_ll()
|
| /drivers/infiniband/hw/mthca/ |
| A D | mthca_allocator.c | 196 u64 *dma_list = NULL; in mthca_buf_alloc() local 217 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 219 if (!dma_list) in mthca_buf_alloc() 223 dma_list[i] = t + i * (1 << shift); in mthca_buf_alloc() 229 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 231 if (!dma_list) in mthca_buf_alloc() 250 dma_list[i] = t; in mthca_buf_alloc() 258 dma_list, shift, npages, in mthca_buf_alloc() 266 kfree(dma_list); in mthca_buf_alloc() 274 kfree(dma_list); in mthca_buf_alloc()
|
| A D | mthca_eq.c | 471 u64 *dma_list = NULL; in mthca_create_eq() local 490 dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL); in mthca_create_eq() 491 if (!dma_list) in mthca_create_eq() 505 dma_list[i] = t; in mthca_create_eq() 519 dma_list, PAGE_SHIFT, npages, in mthca_create_eq() 551 kfree(dma_list); in mthca_create_eq() 582 kfree(dma_list); in mthca_create_eq()
|
| /drivers/misc/genwqe/ |
| A D | card_utils.c | 238 dma_list[i] = 0x0; in genwqe_unmap_pages() 244 dma_addr_t *dma_list) in genwqe_map_pages() argument 253 dma_list[i] = 0x0; in genwqe_map_pages() 266 dma_list[i] = daddr; in genwqe_map_pages() 271 genwqe_unmap_pages(cd, dma_list, num_pages); in genwqe_map_pages() 375 dma_addr_t *dma_list) in genwqe_setup_sgl() argument 410 daddr = dma_list[p] + map_offs; in genwqe_setup_sgl() 607 m->dma_list = NULL; in genwqe_user_vmap() 630 if (m->dma_list) in genwqe_user_vunmap() 631 genwqe_unmap_pages(cd, m->dma_list, m->nr_pages); in genwqe_user_vunmap() [all …]
|
| A D | card_base.h | 171 dma_addr_t *dma_list; /* list of dma addresses per page */ member 372 dma_addr_t *dma_list);
|
| A D | card_dev.c | 965 &m->dma_list[page_offs]); in ddcb_cmd_fixups()
|
| /drivers/vfio/ |
| A D | vfio_iommu_type1.c | 67 struct rb_root dma_list; member 166 struct rb_node *node = iommu->dma_list.rb_node; in vfio_find_dma() 186 struct rb_node *node = iommu->dma_list.rb_node; in vfio_find_dma_first_node() 223 rb_insert_color(&new->node, &iommu->dma_list); in vfio_link_dma() 228 rb_erase(&old->node, &iommu->dma_list); in vfio_unlink_dma() 275 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_iommu_populate_bitmap_full() 286 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_dma_bitmap_alloc_all() 1712 n = rb_first(&iommu->dma_list); in vfio_iommu_replay() 2326 while ((node = rb_first(&iommu->dma_list))) in vfio_iommu_unmap_unpin_all() 2334 n = rb_first(&iommu->dma_list); in vfio_iommu_unmap_unpin_reaccount() [all …]
|
| /drivers/staging/vme_user/ |
| A D | vme.c | 890 struct vme_dma_list *dma_list; in vme_new_dma_list() local 897 dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL); in vme_new_dma_list() 898 if (!dma_list) in vme_new_dma_list() 901 INIT_LIST_HEAD(&dma_list->entries); in vme_new_dma_list() 902 dma_list->parent = list_entry(resource->entry, in vme_new_dma_list() 905 mutex_init(&dma_list->mtx); in vme_new_dma_list() 907 return dma_list; in vme_new_dma_list()
|
| /drivers/net/ethernet/mellanox/mlx4/ |
| A D | eq.c | 977 u64 *dma_list = NULL; in mlx4_create_eq() local 998 dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL); in mlx4_create_eq() 999 if (!dma_list) in mlx4_create_eq() 1015 dma_list[i] = t; in mlx4_create_eq() 1033 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); in mlx4_create_eq() 1053 kfree(dma_list); in mlx4_create_eq() 1082 kfree(dma_list); in mlx4_create_eq()
|
| /drivers/pci/endpoint/functions/ |
| A D | pci-epf-mhi.c | 155 struct list_head dma_list; member 461 list_splice_tail_init(&epf_mhi->dma_list, &head); in pci_epf_mhi_dma_worker() 479 list_add_tail(&transfer->node, &epf_mhi->dma_list); in pci_epf_mhi_dma_async_callback() 695 INIT_LIST_HEAD(&epf_mhi->dma_list); in pci_epf_mhi_dma_init()
|