| /drivers/xen/ |
| A D | privcmd.c | 643 if (copy_from_user(&kdata, udata, sizeof(kdata))) in privcmd_ioctl_dm_op() 650 if (kdata.num == 0) in privcmd_ioctl_dm_op() 705 rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs); in privcmd_ioctl_dm_op() 745 if (copy_from_user(&kdata, udata, sizeof(kdata))) in privcmd_ioctl_mmap_resource() 753 if (!!kdata.addr != !!kdata.num) in privcmd_ioctl_mmap_resource() 758 xdata.id = kdata.id; in privcmd_ioctl_mmap_resource() 760 if (!kdata.addr && !kdata.num) { in privcmd_ioctl_mmap_resource() 816 rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT); in privcmd_ioctl_mmap_resource() 856 if (copy_from_user(&kdata, udata, sizeof(kdata))) in privcmd_ioctl_pcidev_get_gsi() 863 kdata.gsi = rc; in privcmd_ioctl_pcidev_get_gsi() [all …]
|
| /drivers/dma-buf/ |
| A D | dma-heap.c | 129 char *kdata = stack_kdata; in dma_heap_ioctl() local 154 kdata = kmalloc(ksize, GFP_KERNEL); in dma_heap_ioctl() 155 if (!kdata) in dma_heap_ioctl() 159 if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) { in dma_heap_ioctl() 166 memset(kdata + in_size, 0, ksize - in_size); in dma_heap_ioctl() 170 ret = dma_heap_ioctl_allocate(file, kdata); in dma_heap_ioctl() 177 if (copy_to_user((void __user *)arg, kdata, out_size) != 0) in dma_heap_ioctl() 180 if (kdata != stack_kdata) in dma_heap_ioctl() 181 kfree(kdata); in dma_heap_ioctl()
|
| /drivers/gpu/drm/ |
| A D | drm_ioctl.c | 796 return func(dev, kdata, file_priv); in drm_ioctl_kernel() 823 char *kdata = NULL; in drm_ioctl() local 876 kdata = stack_kdata; in drm_ioctl() 878 kdata = kmalloc(ksize, GFP_KERNEL); in drm_ioctl() 879 if (!kdata) { in drm_ioctl() 885 if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) { in drm_ioctl() 891 memset(kdata + in_size, 0, ksize - in_size); in drm_ioctl() 893 retcode = drm_ioctl_kernel(filp, func, kdata, ioctl->flags); in drm_ioctl() 894 if (copy_to_user((void __user *)arg, kdata, out_size) != 0) in drm_ioctl() 905 if (kdata != stack_kdata) in drm_ioctl() [all …]
|
| /drivers/hid/bpf/ |
| A D | hid_bpf_struct_ops.c | 151 void *kdata, const void *udata) in hid_bpf_ops_init_member() argument 158 khid_bpf_ops = (struct hid_bpf_ops *)kdata; in hid_bpf_ops_init_member() 180 static int hid_bpf_reg(void *kdata, struct bpf_link *link) in hid_bpf_reg() argument 182 struct hid_bpf_ops *ops = kdata; in hid_bpf_reg() 239 static void hid_bpf_unreg(void *kdata, struct bpf_link *link) in hid_bpf_unreg() argument 241 struct hid_bpf_ops *ops = kdata; in hid_bpf_unreg()
|
| /drivers/gpu/drm/radeon/ |
| A D | radeon_cs.c | 109 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; in radeon_cs_parser_relocs() 350 if (p->chunks[i].kdata == NULL) { in radeon_cs_parser_init() 353 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in radeon_cs_parser_init() 357 p->cs_flags = p->chunks[i].kdata[0]; in radeon_cs_parser_init() 359 ring = p->chunks[i].kdata[1]; in radeon_cs_parser_init() 361 priority = (s32)p->chunks[i].kdata[2]; in radeon_cs_parser_init() 458 kvfree(parser->chunks[i].kdata); in radeon_cs_parser_fini() 662 if (ib_chunk->kdata) in radeon_cs_ib_fill() 663 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4); in radeon_cs_ib_fill() 886 (u64)relocs_chunk->kdata[idx + 3] << 32; in radeon_cs_packet_next_reloc() [all …]
|
| A D | radeon.h | 1011 uint32_t *kdata; member 1051 if (ibc->kdata) in radeon_get_ib_value() 1052 return ibc->kdata[idx]; in radeon_get_ib_value()
|
| /drivers/gpu/drm/amd/amdgpu/ |
| A D | amdgpu_cs.c | 229 if (p->chunks[i].kdata == NULL) { in amdgpu_cs_pass1() 235 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in amdgpu_cs_pass1() 247 ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs); in amdgpu_cs_pass1() 256 ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata, in amdgpu_cs_pass1() 270 ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata); in amdgpu_cs_pass1() 340 kvfree(p->chunks[i].kdata); in amdgpu_cs_pass1() 355 struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata; in amdgpu_cs_p2_ib() 412 struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata; in amdgpu_cs_p2_dependencies() 484 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; in amdgpu_cs_p2_syncobj_in() 522 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; in amdgpu_cs_p2_syncobj_out() [all …]
|
| A D | amdgpu_cs.h | 40 void *kdata; member
|
| /drivers/net/ethernet/netronome/nfp/flower/ |
| A D | conntrack.c | 872 key = kdata + offset; in nfp_fl_ct_add_offload() 880 key = kdata + offset; in nfp_fl_ct_add_offload() 893 key = kdata + offset; in nfp_fl_ct_add_offload() 916 key = kdata + offset; in nfp_fl_ct_add_offload() 926 key = kdata + offset; in nfp_fl_ct_add_offload() 942 key = kdata + offset; in nfp_fl_ct_add_offload() 953 key = kdata + offset; in nfp_fl_ct_add_offload() 964 key = kdata + offset; in nfp_fl_ct_add_offload() 975 key = kdata + offset; in nfp_fl_ct_add_offload() 986 key = kdata + offset; in nfp_fl_ct_add_offload() [all …]
|
| /drivers/accel/habanalabs/common/ |
| A D | habanalabs_ioctl.c | 1233 char *kdata = NULL; in _hl_ioctl() local 1257 kdata = stack_kdata; in _hl_ioctl() 1259 kdata = kzalloc(asize, GFP_KERNEL); in _hl_ioctl() 1260 if (!kdata) { in _hl_ioctl() 1268 if (copy_from_user(kdata, (void __user *)arg, usize)) { in _hl_ioctl() 1274 retcode = func(hpriv, kdata); in _hl_ioctl() 1276 if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize)) in _hl_ioctl() 1285 if (kdata != stack_kdata) in _hl_ioctl() 1286 kfree(kdata); in _hl_ioctl()
|
| /drivers/gpu/drm/amd/amdkfd/ |
| A D | kfd_chardev.c | 3250 char *kdata = NULL; in kfd_ioctl() local 3317 kdata = stack_kdata; in kfd_ioctl() 3319 kdata = kmalloc(asize, GFP_KERNEL); in kfd_ioctl() 3320 if (!kdata) { in kfd_ioctl() 3326 memset(kdata + usize, 0, asize - usize); in kfd_ioctl() 3330 if (copy_from_user(kdata, (void __user *)arg, usize) != 0) { in kfd_ioctl() 3335 memset(kdata, 0, usize); in kfd_ioctl() 3338 retcode = func(filep, process, kdata); in kfd_ioctl() 3341 if (copy_to_user((void __user *)arg, kdata, usize) != 0) in kfd_ioctl() 3349 if (kdata != stack_kdata) in kfd_ioctl() [all …]
|