| /linux/kernel/ |
| A D | capability.c | 147 struct __user_cap_data_struct kdata[2]; in SYSCALL_DEFINE2() local 168 kdata[0].effective = pE.val; kdata[1].effective = pE.val >> 32; in SYSCALL_DEFINE2() 169 kdata[0].permitted = pP.val; kdata[1].permitted = pP.val >> 32; in SYSCALL_DEFINE2() 170 kdata[0].inheritable = pI.val; kdata[1].inheritable = pI.val >> 32; in SYSCALL_DEFINE2() 191 if (copy_to_user(dataptr, kdata, tocopy * sizeof(kdata[0]))) in SYSCALL_DEFINE2() 222 struct __user_cap_data_struct kdata[2] = { { 0, }, }; in SYSCALL_DEFINE2() local 241 if (copybytes > sizeof(kdata)) in SYSCALL_DEFINE2() 244 if (copy_from_user(&kdata, data, copybytes)) in SYSCALL_DEFINE2() 247 effective = mk_kernel_cap(kdata[0].effective, kdata[1].effective); in SYSCALL_DEFINE2() 248 permitted = mk_kernel_cap(kdata[0].permitted, kdata[1].permitted); in SYSCALL_DEFINE2() [all …]
|
| /linux/drivers/xen/ |
| A D | privcmd.c | 643 if (copy_from_user(&kdata, udata, sizeof(kdata))) in privcmd_ioctl_dm_op() 650 if (kdata.num == 0) in privcmd_ioctl_dm_op() 705 rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs); in privcmd_ioctl_dm_op() 745 if (copy_from_user(&kdata, udata, sizeof(kdata))) in privcmd_ioctl_mmap_resource() 753 if (!!kdata.addr != !!kdata.num) in privcmd_ioctl_mmap_resource() 758 xdata.id = kdata.id; in privcmd_ioctl_mmap_resource() 760 if (!kdata.addr && !kdata.num) { in privcmd_ioctl_mmap_resource() 816 rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT); in privcmd_ioctl_mmap_resource() 856 if (copy_from_user(&kdata, udata, sizeof(kdata))) in privcmd_ioctl_pcidev_get_gsi() 863 kdata.gsi = rc; in privcmd_ioctl_pcidev_get_gsi() [all …]
|
| /linux/kernel/trace/ |
| A D | trace_hwlat.c | 167 if (!kdata->kthread) in trace_hwlat_callback() 178 kdata->nmi_total_ts += time_get() - kdata->nmi_ts_start; in trace_hwlat_callback() 182 kdata->nmi_count++; in trace_hwlat_callback() 216 kdata->nmi_total_ts = 0; in get_sample() 217 kdata->nmi_count = 0; in get_sample() 287 if (kdata->nmi_total_ts) in get_sample() 295 s.nmi_count = kdata->nmi_count; in get_sample() 402 kthread = kdata->kthread; in stop_single_kthread() 408 kdata->kthread = NULL; in stop_single_kthread() 429 if (kdata->kthread) in start_single_kthread() [all …]
|
| /linux/net/ipv4/ |
| A D | bpf_tcp_ca.c | 212 void *kdata, const void *udata) in bpf_tcp_ca_init_member() argument 219 tcp_ca = (struct tcp_congestion_ops *)kdata; in bpf_tcp_ca_init_member() 238 static int bpf_tcp_ca_reg(void *kdata, struct bpf_link *link) in bpf_tcp_ca_reg() argument 240 return tcp_register_congestion_control(kdata); in bpf_tcp_ca_reg() 243 static void bpf_tcp_ca_unreg(void *kdata, struct bpf_link *link) in bpf_tcp_ca_unreg() argument 245 tcp_unregister_congestion_control(kdata); in bpf_tcp_ca_unreg() 248 static int bpf_tcp_ca_update(void *kdata, void *old_kdata, struct bpf_link *link) in bpf_tcp_ca_update() argument 250 return tcp_update_congestion_control(kdata, old_kdata); in bpf_tcp_ca_update() 253 static int bpf_tcp_ca_validate(void *kdata) in bpf_tcp_ca_validate() argument 255 return tcp_validate_congestion_control(kdata); in bpf_tcp_ca_validate()
|
| /linux/drivers/dma-buf/ |
| A D | dma-heap.c | 129 char *kdata = stack_kdata; in dma_heap_ioctl() local 154 kdata = kmalloc(ksize, GFP_KERNEL); in dma_heap_ioctl() 155 if (!kdata) in dma_heap_ioctl() 159 if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) { in dma_heap_ioctl() 166 memset(kdata + in_size, 0, ksize - in_size); in dma_heap_ioctl() 170 ret = dma_heap_ioctl_allocate(file, kdata); in dma_heap_ioctl() 177 if (copy_to_user((void __user *)arg, kdata, out_size) != 0) in dma_heap_ioctl() 180 if (kdata != stack_kdata) in dma_heap_ioctl() 181 kfree(kdata); in dma_heap_ioctl()
|
| /linux/tools/testing/selftests/bpf/bpf_test_no_cfi/ |
| A D | bpf_test_no_cfi.c | 20 void *kdata, const void *udata) in dummy_init_member() argument 25 static int dummy_reg(void *kdata, struct bpf_link *link) in dummy_reg() argument 30 static void dummy_unreg(void *kdata, struct bpf_link *link) in dummy_unreg() argument
|
| /linux/drivers/gpu/drm/ |
| A D | drm_ioctl.c | 745 return func(dev, kdata, file_priv); in drm_ioctl_kernel() 772 char *kdata = NULL; in drm_ioctl() local 825 kdata = stack_kdata; in drm_ioctl() 827 kdata = kmalloc(ksize, GFP_KERNEL); in drm_ioctl() 828 if (!kdata) { in drm_ioctl() 834 if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) { in drm_ioctl() 840 memset(kdata + in_size, 0, ksize - in_size); in drm_ioctl() 842 retcode = drm_ioctl_kernel(filp, func, kdata, ioctl->flags); in drm_ioctl() 843 if (copy_to_user((void __user *)arg, kdata, out_size) != 0) in drm_ioctl() 854 if (kdata != stack_kdata) in drm_ioctl() [all …]
|
| /linux/drivers/hid/bpf/ |
| A D | hid_bpf_struct_ops.c | 151 void *kdata, const void *udata) in hid_bpf_ops_init_member() argument 158 khid_bpf_ops = (struct hid_bpf_ops *)kdata; in hid_bpf_ops_init_member() 180 static int hid_bpf_reg(void *kdata, struct bpf_link *link) in hid_bpf_reg() argument 182 struct hid_bpf_ops *ops = kdata; in hid_bpf_reg() 239 static void hid_bpf_unreg(void *kdata, struct bpf_link *link) in hid_bpf_unreg() argument 241 struct hid_bpf_ops *ops = kdata; in hid_bpf_unreg()
|
| /linux/drivers/gpu/drm/radeon/ |
| A D | radeon_cs.c | 109 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; in radeon_cs_parser_relocs() 353 if (p->chunks[i].kdata == NULL) { in radeon_cs_parser_init() 356 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in radeon_cs_parser_init() 360 p->cs_flags = p->chunks[i].kdata[0]; in radeon_cs_parser_init() 362 ring = p->chunks[i].kdata[1]; in radeon_cs_parser_init() 364 priority = (s32)p->chunks[i].kdata[2]; in radeon_cs_parser_init() 458 kvfree(parser->chunks[i].kdata); in radeon_cs_parser_fini() 662 if (ib_chunk->kdata) in radeon_cs_ib_fill() 663 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4); in radeon_cs_ib_fill() 886 (u64)relocs_chunk->kdata[idx + 3] << 32; in radeon_cs_packet_next_reloc() [all …]
|
| A D | radeon.h | 1013 uint32_t *kdata; member 1053 if (ibc->kdata) in radeon_get_ib_value() 1054 return ibc->kdata[idx]; in radeon_get_ib_value()
|
| /linux/kernel/bpf/ |
| A D | bpf_struct_ops.c | 600 void *udata, *kdata; in bpf_struct_ops_map_update_elem() local 640 kdata = &kvalue->data; in bpf_struct_ops_map_update_elem() 654 *(void **)(kdata + moff) = BPF_MODULE_OWNER; in bpf_struct_ops_map_update_elem() 658 err = st_ops->init_member(t, member, kdata, udata); in bpf_struct_ops_map_update_elem() 734 *(void **)(kdata + moff) = image + trampoline_start + cfi_get_offset(); in bpf_struct_ops_map_update_elem() 741 err = st_ops->validate(kdata); in bpf_struct_ops_map_update_elem() 762 err = st_ops->reg(kdata, NULL); in bpf_struct_ops_map_update_elem() 1019 bool bpf_struct_ops_get(const void *kdata) in bpf_struct_ops_get() argument 1025 kvalue = container_of(kdata, struct bpf_struct_ops_value, data); in bpf_struct_ops_get() 1032 void bpf_struct_ops_put(const void *kdata) in bpf_struct_ops_put() argument [all …]
|
| /linux/tools/testing/selftests/bpf/bpf_testmod/ |
| A D | bpf_testmod.c | 1074 void *kdata, const void *udata) in bpf_testmod_ops_init_member() argument 1082 ((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data; in bpf_testmod_ops_init_member() 1097 static int bpf_dummy_reg(void *kdata, struct bpf_link *link) in bpf_dummy_reg() argument 1099 struct bpf_testmod_ops *ops = kdata; in bpf_dummy_reg() 1112 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link) in bpf_dummy_unreg() argument 1153 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link) in bpf_dummy_reg2() argument 1155 struct bpf_testmod_ops2 *ops = kdata; in bpf_dummy_reg2() 1267 static int st_ops_reg(void *kdata, struct bpf_link *link) in st_ops_reg() argument 1277 st_ops = kdata; in st_ops_reg() 1284 static void st_ops_unreg(void *kdata, struct bpf_link *link) in st_ops_unreg() argument [all …]
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| A D | amdgpu_cs.c | 229 if (p->chunks[i].kdata == NULL) { in amdgpu_cs_pass1() 235 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in amdgpu_cs_pass1() 247 ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs); in amdgpu_cs_pass1() 256 ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata, in amdgpu_cs_pass1() 270 ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata); in amdgpu_cs_pass1() 321 kvfree(p->chunks[i].kdata); in amdgpu_cs_pass1() 336 struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata; in amdgpu_cs_p2_ib() 389 struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata; in amdgpu_cs_p2_dependencies() 461 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; in amdgpu_cs_p2_syncobj_in() 499 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; in amdgpu_cs_p2_syncobj_out() [all …]
|
| A D | amdgpu_cs.h | 40 void *kdata; member
|
| /linux/arch/arm64/kernel/ |
| A D | ptrace.c | 2030 *kdata = reg; in compat_ptrace_hbp_get_resource_info() 2037 u32 *kdata) in compat_ptrace_hbp_get() argument 2046 *kdata = (u32)addr; in compat_ptrace_hbp_get() 2049 *kdata = ctrl; in compat_ptrace_hbp_get() 2058 u32 *kdata) in compat_ptrace_hbp_set() argument 2066 addr = *kdata; in compat_ptrace_hbp_set() 2069 ctrl = *kdata; in compat_ptrace_hbp_set() 2080 u32 kdata; in compat_ptrace_gethbpregs() local 2094 ret = put_user(kdata, data); in compat_ptrace_gethbpregs() 2103 u32 kdata = 0; in compat_ptrace_sethbpregs() local [all …]
|
| /linux/net/bpf/ |
| A D | bpf_dummy_struct_ops.c | 270 void *kdata, const void *udata) in bpf_dummy_init_member() argument 275 static int bpf_dummy_reg(void *kdata, struct bpf_link *link) in bpf_dummy_reg() argument 280 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link) in bpf_dummy_unreg() argument
|
| /linux/drivers/net/ethernet/netronome/nfp/flower/ |
| A D | conntrack.c | 872 key = kdata + offset; in nfp_fl_ct_add_offload() 880 key = kdata + offset; in nfp_fl_ct_add_offload() 893 key = kdata + offset; in nfp_fl_ct_add_offload() 916 key = kdata + offset; in nfp_fl_ct_add_offload() 926 key = kdata + offset; in nfp_fl_ct_add_offload() 942 key = kdata + offset; in nfp_fl_ct_add_offload() 953 key = kdata + offset; in nfp_fl_ct_add_offload() 964 key = kdata + offset; in nfp_fl_ct_add_offload() 975 key = kdata + offset; in nfp_fl_ct_add_offload() 986 key = kdata + offset; in nfp_fl_ct_add_offload() [all …]
|
| /linux/drivers/accel/habanalabs/common/ |
| A D | habanalabs_ioctl.c | 1235 char *kdata = NULL; in _hl_ioctl() local 1259 kdata = stack_kdata; in _hl_ioctl() 1261 kdata = kzalloc(asize, GFP_KERNEL); in _hl_ioctl() 1262 if (!kdata) { in _hl_ioctl() 1270 if (copy_from_user(kdata, (void __user *)arg, usize)) { in _hl_ioctl() 1276 retcode = func(hpriv, kdata); in _hl_ioctl() 1278 if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize)) in _hl_ioctl() 1290 if (kdata != stack_kdata) in _hl_ioctl() 1291 kfree(kdata); in _hl_ioctl()
|
| /linux/drivers/gpu/drm/amd/amdkfd/ |
| A D | kfd_chardev.c | 3241 char *kdata = NULL; in kfd_ioctl() local 3308 kdata = stack_kdata; in kfd_ioctl() 3310 kdata = kmalloc(asize, GFP_KERNEL); in kfd_ioctl() 3311 if (!kdata) { in kfd_ioctl() 3317 memset(kdata + usize, 0, asize - usize); in kfd_ioctl() 3321 if (copy_from_user(kdata, (void __user *)arg, usize) != 0) { in kfd_ioctl() 3326 memset(kdata, 0, usize); in kfd_ioctl() 3329 retcode = func(filep, process, kdata); in kfd_ioctl() 3332 if (copy_to_user((void __user *)arg, kdata, usize) != 0) in kfd_ioctl() 3340 if (kdata != stack_kdata) in kfd_ioctl() [all …]
|
| /linux/include/linux/ |
| A D | bpf.h | 1755 void *kdata, const void *udata); 1756 int (*reg)(void *kdata, struct bpf_link *link); 1757 void (*unreg)(void *kdata, struct bpf_link *link); 1758 int (*update)(void *kdata, void *old_kdata, struct bpf_link *link); 1759 int (*validate)(void *kdata); 1818 bool bpf_struct_ops_get(const void *kdata); 1819 void bpf_struct_ops_put(const void *kdata);
|
| /linux/kernel/sched/ |
| A D | ext.c | 5433 void *kdata, const void *udata) in bpf_scx_init_member() argument 5436 struct sched_ext_ops *ops = kdata; in bpf_scx_init_member() 5503 static int bpf_scx_reg(void *kdata, struct bpf_link *link) in bpf_scx_reg() argument 5505 return scx_ops_enable(kdata, link); in bpf_scx_reg() 5508 static void bpf_scx_unreg(void *kdata, struct bpf_link *link) in bpf_scx_unreg() argument 5527 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link) in bpf_scx_update() argument 5539 static int bpf_scx_validate(void *kdata) in bpf_scx_validate() argument
|
| /linux/tools/power/pm-graph/ |
| A D | sleepgraph.py | 606 def defaultKprobe(self, name, kdata): argument 607 k = kdata
|