| /kernel/dma/ |
| A D | mapping.c | 34 size_t size; member 53 WARN_ON(this->size != match->size || in dmam_match() 111 dr->size = size; in dmam_alloc_attrs() 508 size, attrs); in dma_get_sgtable_attrs() 511 size, attrs); in dma_get_sgtable_attrs() 697 size = PAGE_ALIGN(size); in __dma_alloc_pages() 714 size, dir, gfp, 0); in dma_alloc_pages() 728 size = PAGE_ALIGN(size); in __dma_free_pages() 965 size_t size = SIZE_MAX; in dma_max_mapping_size() local 974 return size; in dma_max_mapping_size() [all …]
|
| A D | direct.c | 76 return dma_addr + size - 1 <= in dma_coherent_ok() 100 size_t size) in __dma_direct_free_pages() argument 102 if (swiotlb_free(dev, page, size)) in __dma_direct_free_pages() 104 dma_free_contiguous(dev, page, size); in __dma_direct_free_pages() 112 swiotlb_free(dev, page, size); in dma_direct_alloc_swiotlb() 126 WARN_ON_ONCE(!PAGE_ALIGNED(size)); in __dma_direct_alloc_pages() 201 arch_dma_prep_coherent(page, size); in dma_direct_alloc_no_mapping() 215 size = PAGE_ALIGN(size); in dma_direct_alloc() 293 memset(ret, 0, size); in dma_direct_alloc() 374 memset(ret, 0, size); in dma_direct_alloc_pages() [all …]
|
| A D | coherent.c | 17 int size; member 42 int pages = size >> PAGE_SHIFT; in dma_init_coherent_memory() 45 if (!size) in dma_init_coherent_memory() 62 dma_mem->size = pages; in dma_init_coherent_memory() 73 &phys_addr, size / SZ_1M); in dma_init_coherent_memory() 145 int order = get_order(size); in __dma_alloc_from_coherent() 152 if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT))) in __dma_alloc_from_coherent() 166 memset(ret, 0, size); in __dma_alloc_from_coherent() 302 size_t size, int *ret) in dma_mmap_from_global_coherent() argument 308 vaddr, size, ret); in dma_mmap_from_global_coherent() [all …]
|
| A D | direct.h | 14 void *cpu_addr, dma_addr_t dma_addr, size_t size, 18 void *cpu_addr, dma_addr_t dma_addr, size_t size, 64 arch_sync_dma_for_device(paddr, size, dir); in dma_direct_sync_single_for_device() 73 arch_sync_dma_for_cpu(paddr, size, dir); in dma_direct_sync_single_for_cpu() 77 swiotlb_sync_single_for_cpu(dev, paddr, size, dir); in dma_direct_sync_single_for_cpu() 80 arch_dma_mark_clean(paddr, size); in dma_direct_sync_single_for_cpu() 93 return swiotlb_map(dev, phys, size, dir, attrs); in dma_direct_map_page() 97 dma_kmalloc_needs_bounce(dev, size, dir)) { in dma_direct_map_page() 101 return swiotlb_map(dev, phys, size, dir, attrs); in dma_direct_map_page() 110 arch_sync_dma_for_device(phys, size, dir); in dma_direct_map_page() [all …]
|
| A D | ops_helpers.c | 20 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_get_sgtable() argument 28 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_common_get_sgtable() 36 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_mmap() argument 41 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_common_mmap() 68 page = dma_alloc_contiguous(dev, size, gfp); in dma_common_alloc_pages() 78 *dma_handle = ops->map_page(dev, page, 0, size, dir, in dma_common_alloc_pages() 81 dma_free_contiguous(dev, page, size); in dma_common_alloc_pages() 85 memset(page_address(page), 0, size); in dma_common_alloc_pages() 95 iommu_dma_unmap_page(dev, dma_handle, size, dir, in dma_common_free_pages() 98 ops->unmap_page(dev, dma_handle, size, dir, in dma_common_free_pages() [all …]
|
| A D | debug.h | 13 size_t offset, size_t size, 18 size_t size, int direction); 35 size_t size, int direction, 40 size_t size, int direction); 43 dma_addr_t dma_handle, size_t size, 48 size_t size, int direction); 59 size_t offset, size_t size, in debug_dma_map_page() argument 66 size_t size, int direction) in debug_dma_unmap_page() argument 94 size_t size, int direction, in debug_dma_map_resource() argument 108 size_t size, int direction) in debug_dma_sync_single_for_cpu() argument [all …]
|
| A D | pool.c | 49 pool_size_dma += size; in dma_atomic_pool_size_add() 51 pool_size_dma32 += size; in dma_atomic_pool_size_add() 53 pool_size_kernel += size; in dma_atomic_pool_size_add() 58 unsigned long size; in cma_in_zone() local 66 size = cma_get_size(cma); in cma_in_zone() 67 if (!size) in cma_in_zone() 71 end = cma_get_base(cma) + size - 1; in cma_in_zone() 247 addr = gen_pool_alloc(pool, size); in __dma_alloc_from_pool() 253 gen_pool_free(pool, addr, size); in __dma_alloc_from_pool() 261 memset(*cpu_addr, 0, size); in __dma_alloc_from_pool() [all …]
|
| A D | contiguous.c | 255 dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) in dma_contiguous_early_fixup() argument 282 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, in dma_contiguous_reserve_area() 333 unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT); in cma_alloc_aligned() 363 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous() 364 if (size <= PAGE_SIZE) in dma_alloc_contiguous() 373 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous() 380 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous() 405 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_free_contiguous() 428 __free_pages(page, get_order(size)); in dma_free_contiguous() 487 dma_contiguous_early_fixup(rmem->base, rmem->size); in rmem_cma_setup() [all …]
|
| A D | debug.c | 981 if (ref->size != entry->size) { in check_unmap() 986 ref->dev_addr, entry->size, ref->size); in check_unmap() 1109 if (ref->size > entry->size) { in check_sync() 1224 entry->size = size; in debug_dma_map_page() 1284 .size = size, in debug_dma_unmap_page() 1415 entry->size = size; in debug_dma_alloc_coherent() 1429 .size = size, in debug_dma_free_coherent() 1461 entry->size = size; in debug_dma_map_resource() 1476 .size = size, in debug_dma_unmap_resource() 1497 ref.size = size; in debug_dma_sync_single_for_cpu() [all …]
|
| /kernel/kcsan/ |
| A D | core.c | 115 size_t size, in find_watchpoint() argument 349 switch (size) { in read_instrumented_memory() 413 return reorder_access->ptr == ptr && reorder_access->size == size && in find_reorder_access() 434 reorder_access->size = size; in set_reorder_access() 456 size_t size, in kcsan_found_watchpoint() argument 728 if (unlikely(size == 0)) in check_access() 777 size = READ_ONCE(reorder_access->size); in check_access() 778 if (size) in check_access() 918 sa->size = size; in kcsan_begin_scoped_access() 969 sa->size = 0; \ [all …]
|
| A D | report.c | 31 size_t size; member 459 get_access_type(ai->access_type), ai->ptr, ai->size, in print_report() 463 get_access_type(ai->access_type), ai->ptr, ai->size, in print_report() 472 if (ai->size <= 8) { in print_report() 473 int hex_len = ai->size * 2; in print_report() 504 other_info->ai.size = 0; in release_report() 565 } while (other_info->ai.size && other_info->ai.ptr == ai->ptr && in set_other_info_task_blocking() 591 WARN_ON(other_info->ai.size); in prepare_report_producer() 609 while (!other_info->ai.size) { /* Await valid @other_info. */ in prepare_report_consumer() 621 (unsigned long)ai->ptr, ai->size)) { in prepare_report_consumer() [all …]
|
| /kernel/ |
| A D | stacktrace.c | 61 buf += size; in stack_trace_snprint() 62 size = 0; in stack_trace_snprint() 65 size -= generated; in stack_trace_snprint() 77 unsigned int size; member 86 if (c->len >= c->size) in stack_trace_consume_entry() 118 .size = size, in stack_trace_save() 142 .size = size, in stack_trace_save_tsk() 171 .size = size, in stack_trace_save_regs() 198 .size = size, in stack_trace_save_tsk_reliable() 228 .size = size, in stack_trace_save_user() [all …]
|
| A D | iomem.c | 13 return (__force void *)ioremap_cache(offset, size); in arch_memremap_wb() 15 return (__force void *)ioremap(offset, size); in arch_memremap_wb() 71 int is_ram = region_intersects(offset, size, in memremap() 80 &offset, (unsigned long) size); in memremap() 93 addr = try_ram_remap(offset, size, flags); in memremap() 95 addr = arch_memremap_wb(offset, size, flags); in memremap() 106 &offset, (unsigned long) size); in memremap() 111 addr = ioremap_wt(offset, size); in memremap() 114 addr = ioremap_wc(offset, size); in memremap() 138 size_t size, unsigned long flags) in devm_memremap() argument [all …]
|
| A D | regset.c | 8 unsigned int size, in __regset_get() argument 16 if (size > regset->n * regset->size) in __regset_get() 17 size = regset->n * regset->size; in __regset_get() 19 to_free = p = kvzalloc(size, GFP_KERNEL); in __regset_get() 24 (struct membuf){.p = p, .left = size}); in __regset_get() 30 return size - res; in __regset_get() 35 unsigned int size, in regset_get() argument 38 return __regset_get(target, regset, size, &data); in regset_get() 44 unsigned int size, in regset_get_alloc() argument 48 return __regset_get(target, regset, size, data); in regset_get_alloc() [all …]
|
| A D | kexec_handover.c | 445 phys_addr_t size; in scratch_size_update() local 452 size = size * scratch_scale / 100; in scratch_size_update() 457 size = size * scratch_scale / 100 - scratch_size_lowmem; in scratch_size_update() 463 phys_addr_t size; in scratch_size_node() local 468 size = size * scratch_scale / 100; in scratch_size_node() 487 phys_addr_t addr, size; in kho_reserve_scratch() local 506 size = scratch_size_lowmem; in kho_reserve_scratch() 513 kho_scratch[i].size = size; in kho_reserve_scratch() 523 kho_scratch[i].size = size; in kho_reserve_scratch() 535 kho_scratch[i].size = size; in kho_reserve_scratch() [all …]
|
| A D | kcov.c | 60 unsigned int size; member 78 unsigned int size; member 142 if (area->size == size) { in kcov_remote_area_get() 155 area->size = size; in kcov_remote_area_put() 334 switch (size) { in __sanitizer_cov_trace_switch() 496 size = kcov->size * sizeof(unsigned long); in kcov_mmap() 713 size = arg; in kcov_ioctl() 714 if (size < 2 || size > INT_MAX / sizeof(unsigned long)) in kcov_ioctl() 726 kcov->size = size; in kcov_ioctl() 856 unsigned int size; in kcov_remote_start() local [all …]
|
| /kernel/module/ |
| A D | stats.c | 277 unsigned int len, size, count_failed = 0; in read_file_mod_stats() local 299 size = MAX_PREAMBLE + min((unsigned int)(floads + fbecoming), in read_file_mod_stats() 301 buf = kzalloc(size, GFP_KERNEL); in read_file_mod_stats() 306 len = scnprintf(buf, size, "%25s\t%u\n", "Mods ever loaded", live_mod_count); in read_file_mod_stats() 331 len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Average mod size", in read_file_mod_stats() 336 len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Average mod text size", in read_file_mod_stats() 349 len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Avg fail kread bytes", in read_file_mod_stats() 361 len += scnprintf(buf + len, size - len, "%25s\t%lu\n", "Avg fail decomp bytes", in read_file_mod_stats() 379 len += scnprintf(buf + len, size - len, "Duplicate failed modules:\n"); in read_file_mod_stats() 380 len += scnprintf(buf + len, size - len, "%25s\t%15s\t%25s\n", in read_file_mod_stats() [all …]
|
| A D | decompress.c | 77 if (len == size) in module_gzip_header_len() 86 const void *buf, size_t size) in module_gzip_decompress() argument 101 s.avail_in = size - gzip_hdr_len; in module_gzip_decompress() 150 const void *buf, size_t size) in module_xz_decompress() argument 159 if (size < sizeof(signature) || in module_xz_decompress() 169 xz_buf.in_size = size; in module_xz_decompress() 208 const void *buf, size_t size) in module_zstd_decompress() argument 221 if (size < sizeof(signature) || in module_zstd_decompress() 229 zstd_buf.size = size; in module_zstd_decompress() 267 zstd_dec.size = PAGE_SIZE; in module_zstd_decompress() [all …]
|
| A D | livepatch.c | 20 unsigned int size, symndx; in copy_module_elf() local 23 size = sizeof(*mod->klp_info); in copy_module_elf() 24 mod->klp_info = kmalloc(size, GFP_KERNEL); in copy_module_elf() 29 size = sizeof(mod->klp_info->hdr); in copy_module_elf() 30 memcpy(&mod->klp_info->hdr, info->hdr, size); in copy_module_elf() 33 size = sizeof(*info->sechdrs) * info->hdr->e_shnum; in copy_module_elf() 34 mod->klp_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL); in copy_module_elf() 41 size = info->sechdrs[info->hdr->e_shstrndx].sh_size; in copy_module_elf() 42 mod->klp_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL); in copy_module_elf()
|
| /kernel/events/ |
| A D | ring_buffer.c | 206 size, backward))) in __perf_output_begin() 223 head += size; in __perf_output_begin() 225 head -= size; in __perf_output_begin() 424 handle->size = 0; in perf_aux_output_begin() 504 rb->aux_head += size; in perf_aux_output_end() 550 if (size > handle->size) in perf_aux_output_skip() 553 rb->aux_head += size; in perf_aux_output_skip() 562 handle->size -= size; in perf_aux_output_skip() 839 unsigned long size; in rb_alloc() local 921 unsigned long size; in rb_alloc() local [all …]
|
| /kernel/trace/ |
| A D | trace_events_inject.c | 142 int size = 0; in trace_get_entry_size() local 146 if (field->size + field->offset > size) in trace_get_entry_size() 147 size = field->size + field->offset; in trace_get_entry_size() 150 return size; in trace_get_entry_size() 177 str_loc -= field->offset + field->size; in trace_alloc_entry() 189 *size = entry_size + 1; in trace_alloc_entry() 247 switch (field->size) { in parse_entry() 289 int err = -ENODEV, size; in event_inject_write() local 305 size = parse_entry(buf, call, &entry); in event_inject_write() 306 if (size < 0) in event_inject_write() [all …]
|
| A D | fprobe.c | 346 int size, curr; in fprobe_return() local 369 curr += size; in fprobe_return() 423 int size; member 435 if (alist->index < alist->size) in fprobe_addr_list_add() 444 alist->size *= 2; in fprobe_addr_list_add() 546 size_t size; member 586 size_t size) in get_ips_from_filter() argument 589 .index = 0, .size = size, .addrs = addrs, .mods = mods}; in get_ips_from_filter() 618 int size, i; in fprobe_init() local 626 fp->entry_data_size = size; in fprobe_init() [all …]
|
| A D | bpf_trace.c | 180 memset(dst, 0, size); in bpf_probe_read_user_common() 217 memset(dst, 0, size); in bpf_probe_read_user_str_common() 267 memset(dst, 0, size); in bpf_probe_read_kernel_str_common() 327 u32, size) in BPF_CALL_3() argument 596 memset(buf, 0, size); in BPF_CALL_4() 664 .size = size, in BPF_CALL_5() 719 .size = ctx_size, in bpf_event_output() 727 .size = meta_size, in bpf_event_output() 1526 if (off % size != 0) in kprobe_prog_is_valid_access() 2014 if (size != 8) in pe_prog_is_valid_access() [all …]
|
| /kernel/debug/kdb/ |
| A D | kdb_support.c | 266 int kdb_getarea_size(void *res, unsigned long addr, size_t size) in kdb_getarea_size() argument 328 memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size); in kdb_getphys() 352 switch (size) { in kdb_getphysword() 369 if (size <= sizeof(*word)) { in kdb_getphysword() 378 kdb_func_printf("bad width %zu\n", size); in kdb_getphysword() 401 switch (size) { in kdb_getword() 418 if (size <= sizeof(*word)) { in kdb_getword() 427 kdb_func_printf("bad width %zu\n", size); in kdb_getword() 449 switch (size) { in kdb_putword() 463 if (size <= sizeof(word)) { in kdb_putword() [all …]
|
| /kernel/bpf/ |
| A D | local_storage.c | 188 u32 size; in bpf_percpu_cgroup_storage_copy() local 201 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_copy() 205 off += size; in bpf_percpu_cgroup_storage_copy() 217 u32 size; in bpf_percpu_cgroup_storage_update() local 238 value + off, size); in bpf_percpu_cgroup_storage_update() 239 off += size; in bpf_percpu_cgroup_storage_update() 363 u32 offset, size; in cgroup_storage_check_btf() local 477 size_t size; in bpf_cgroup_storage_calculate_size() local 484 size = map->value_size; in bpf_cgroup_storage_calculate_size() 489 return size; in bpf_cgroup_storage_calculate_size() [all …]
|