| /include/trace/events/ |
| A D | osnoise.h | 113 __field( u64, start ) 121 __entry->start = start; 140 __field( u64, start ) 147 __entry->start = start; 166 __field( u64, start ) 176 __entry->start = start; 192 TP_ARGS(start, duration), 195 __field( u64, start ) 200 __entry->start = start; 217 __field( u64, start ) [all …]
|
| A D | intel_ifs.h | 13 TP_PROTO(int batch, int start, int stop, u64 status), 15 TP_ARGS(batch, start, stop, status), 20 __field( u16, start ) 26 __entry->start = start; 33 __entry->start,
|
| A D | damon.h | 50 __field(unsigned long, start) 61 __entry->start = r->ar.start; 71 __entry->start, __entry->end, 102 __field(unsigned long, start) 111 __entry->start = r->ar.start; 119 __entry->start, __entry->end,
|
| A D | vmalloc.h | 65 TP_PROTO(unsigned long start, unsigned long end, 68 TP_ARGS(start, end, npurged), 71 __field(unsigned long, start) 77 __entry->start = start; 83 __entry->start, __entry->end, __entry->npurged)
|
| A D | btrfs.h | 301 __entry->start = map->start; 338 __entry->start = start; 663 __entry->start = start; 745 __entry->start = start; 1213 __entry->start = start; 1410 __entry->start = start; 1671 __entry->start = start; 2076 __entry->start = start; 2109 __entry->start = start; 2143 __entry->start = start; [all …]
|
| A D | fsi_master_aspeed.h | 76 TP_PROTO(bool start), 77 TP_ARGS(start), 79 __field(bool, start) 82 __entry->start = start; 84 TP_printk("%s", __entry->start ? "start" : "end")
|
| /include/linux/ |
| A D | ioport.h | 22 resource_size_t start; member 159 .start = (_start), \ 285 res->start = start; in resource_set_range() 308 return r1->start <= r2->start && r1->end >= r2->end; in resource_contains() 314 return r1->start <= r2->end && r1->end >= r2->start; in resource_overlaps() 322 r->start = max(r1->start, r2->start); in resource_intersection() 332 r->start = min(r1->start, r2->start); in resource_union() 352 resource_size_t start, 357 #define release_region(start,n) __release_region(&ioport_resource, (start), (n)) argument 358 #define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n)) argument [all …]
|
| A D | range.h | 7 u64 start; member 13 return range->end - range->start + 1; in range_len() 20 return r1->start <= r2->start && r1->end >= r2->end; in range_contains() 27 return r1->start <= r2->end && r1->end >= r2->start; in range_overlaps() 31 u64 start, u64 end); 35 u64 start, u64 end); 37 void subtract_range(struct range *range, int az, u64 start, u64 end); 45 .start = (_start), \
|
| A D | firmware-map.h | 17 int firmware_map_add_early(u64 start, u64 end, const char *type); 18 int firmware_map_add_hotplug(u64 start, u64 end, const char *type); 19 int firmware_map_remove(u64 start, u64 end, const char *type); 23 static inline int firmware_map_add_early(u64 start, u64 end, const char *type) in firmware_map_add_early() argument 28 static inline int firmware_map_add_hotplug(u64 start, u64 end, const char *type) in firmware_map_add_hotplug() argument 33 static inline int firmware_map_remove(u64 start, u64 end, const char *type) in firmware_map_remove() argument
|
| A D | mmu_notifier.h | 102 unsigned long start, 112 unsigned long start, 200 unsigned long start, 264 unsigned long start; member 379 unsigned long start, 406 unsigned long start, in mmu_notifier_clear_flush_young() argument 415 unsigned long start, in mmu_notifier_clear_young() argument 503 range->start = start; in mmu_notifier_range_init() 569 unsigned long start; member 577 range->start = start; in _mmu_notifier_range_init() [all …]
|
| A D | interval_tree_generic.h | 42 ITTYPE start = ITSTART(node), last = ITLAST(node); \ 51 if (start < ITSTART(parent)) \ 91 if (start <= left->ITSUBTREE) { \ 105 if (start <= ITLAST(node)) /* Cond2 */ \ 116 ITTYPE start, ITTYPE last) \ 137 if (node->ITSUBTREE < start) \ 144 return ITPREFIX ## _subtree_search(node, start, last); \ 148 ITPREFIX ## _iter_next(ITSTRUCT *node, ITTYPE start, ITTYPE last) \ 162 if (start <= right->ITSUBTREE) \ 164 start, last); \ [all …]
|
| A D | bitmap.h | 189 unsigned long start, 209 unsigned long start, in bitmap_find_next_zero_area() argument 226 #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) argument 471 __set_bit(start, map); in bitmap_set() 473 *map |= GENMASK(start + nbits - 1, start); in bitmap_set() 480 __bitmap_set(map, start, nbits); in bitmap_set() 487 __clear_bit(start, map); in bitmap_clear() 489 *map &= ~GENMASK(start + nbits - 1, start); in bitmap_clear() 769 size_t index = BIT_WORD(start); in bitmap_read() 815 offset = start % BITS_PER_LONG; in bitmap_write() [all …]
|
| A D | numa.h | 37 int memory_add_physaddr_to_nid(u64 start); 41 int phys_to_target_node(u64 start); 44 int numa_fill_memblks(u64 start, u64 end); 57 static inline int memory_add_physaddr_to_nid(u64 start) in memory_add_physaddr_to_nid() argument 61 static inline int phys_to_target_node(u64 start) in phys_to_target_node() argument
|
| A D | vhost_iotlb.h | 10 u64 start; member 33 int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb, u64 start, u64 last, 35 int vhost_iotlb_add_range(struct vhost_iotlb *iotlb, u64 start, u64 last, 37 void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last); 46 vhost_iotlb_itree_first(struct vhost_iotlb *iotlb, u64 start, u64 last); 48 vhost_iotlb_itree_next(struct vhost_iotlb_map *map, u64 start, u64 last);
|
| A D | kasan.h | 69 int kasan_add_zero_shadow(void *start, unsigned long size); 70 void kasan_remove_zero_shadow(void *start, unsigned long size); 84 static inline void kasan_remove_zero_shadow(void *start, in kasan_remove_zero_shadow() argument 576 static inline int kasan_populate_vmalloc(unsigned long start, in kasan_populate_vmalloc() argument 581 static inline void kasan_release_vmalloc(unsigned long start, in kasan_release_vmalloc() argument 596 return __kasan_unpoison_vmalloc(start, size, flags); in kasan_unpoison_vmalloc() 597 return (void *)start; in kasan_unpoison_vmalloc() 605 __kasan_poison_vmalloc(start, size); in kasan_poison_vmalloc() 612 static inline int kasan_populate_vmalloc(unsigned long start, in kasan_populate_vmalloc() argument 617 static inline void kasan_release_vmalloc(unsigned long start, in kasan_release_vmalloc() argument [all …]
|
| A D | numa_memblks.h | 14 u64 start; member 24 int __init numa_add_memblk(int nodeid, u64 start, u64 end); 25 int __init numa_add_reserved_memblk(int nid, u64 start, u64 end); 54 extern int phys_to_target_node(u64 start); 56 extern int memory_add_physaddr_to_nid(u64 start);
|
| A D | memory_hotplug.h | 90 bool mhp_range_allowed(u64 start, u64 size, bool need_mapping); 141 extern int arch_add_memory(int nid, u64 start, u64 size, 282 extern int remove_memory(u64 start, u64 size); 283 extern void __remove_memory(u64 start, u64 size); 284 extern int offline_and_remove_memory(u64 start, u64 size); 295 static inline int remove_memory(u64 start, u64 size) in remove_memory() argument 300 static inline void __remove_memory(u64 start, u64 size) {} in __remove_memory() argument 309 extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); 312 extern int add_memory_driver_managed(int nid, u64 start, u64 size, 332 extern int arch_create_linear_mapping(int nid, u64 start, u64 size, [all …]
|
| A D | if_tunnel.h | 14 #define for_each_ip_tunnel_rcu(pos, start) \ argument 15 for (pos = rcu_dereference(start); pos; pos = rcu_dereference(pos->next))
|
| A D | kmsan.h | 141 int __must_check kmsan_vmap_pages_range_noflush(unsigned long start, 155 void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end); 181 void kmsan_iounmap_page_range(unsigned long start, unsigned long end); 350 unsigned long start, unsigned long end, pgprot_t prot, in kmsan_vmap_pages_range_noflush() argument 356 static inline void kmsan_vunmap_range_noflush(unsigned long start, in kmsan_vunmap_range_noflush() argument 361 static inline int __must_check kmsan_ioremap_page_range(unsigned long start, in kmsan_ioremap_page_range() argument 370 static inline void kmsan_iounmap_page_range(unsigned long start, in kmsan_iounmap_page_range() argument
|
| A D | apple-gmux.h | 126 ver_major = inb(res->start + GMUX_PORT_VERSION_MAJOR); in apple_gmux_detect() 127 ver_minor = inb(res->start + GMUX_PORT_VERSION_MINOR); in apple_gmux_detect() 128 ver_release = inb(res->start + GMUX_PORT_VERSION_RELEASE); in apple_gmux_detect() 130 if (apple_gmux_is_indexed(res->start)) in apple_gmux_detect() 137 if (res && apple_gmux_is_mmio(res->start)) in apple_gmux_detect()
|
| /include/uapi/linux/netfilter/ |
| A D | nfnetlink_compat.h | 51 #define NFA_NEST_END(skb, start) \ argument 52 ({ (start)->nfa_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ 54 #define NFA_NEST_CANCEL(skb, start) \ argument 55 ({ if (start) \ 56 skb_trim(skb, (unsigned char *) (start) - (skb)->data); \
|
| /include/drm/ |
| A D | drm_vma_manager.h | 69 unsigned long start, 97 unsigned long start, in drm_vma_offset_exact_lookup_locked() argument 102 node = drm_vma_offset_lookup_locked(mgr, start, pages); in drm_vma_offset_exact_lookup_locked() 103 return (node && node->vm_node.start == start) ? node : NULL; in drm_vma_offset_exact_lookup_locked() 171 return node->vm_node.start; in drm_vma_node_start() 205 return ((__u64)node->vm_node.start) << PAGE_SHIFT; in drm_vma_node_offset_addr()
|
| /include/asm-generic/ |
| A D | cacheflush.h | 36 unsigned long start, in flush_cache_range() argument 71 static inline void flush_icache_range(unsigned long start, unsigned long end) in flush_icache_range() argument 89 static inline void flush_cache_vmap(unsigned long start, unsigned long end) in flush_cache_vmap() argument 95 static inline void flush_cache_vmap_early(unsigned long start, unsigned long end) in flush_cache_vmap_early() argument 101 static inline void flush_cache_vunmap(unsigned long start, unsigned long end) in flush_cache_vunmap() argument
|
| /include/uapi/mtd/ |
| A D | mtd-abi.h | 27 __u32 start; member 32 __u64 start; member 37 __u32 start; member 43 __u64 start; member 85 __u64 start; member 131 __u64 start; member 194 __u32 start; member
|
| /include/net/ |
| A D | fib_rules.h | 16 kuid_t start; member 143 return range->start != 0 && range->end != 0; in fib_rule_port_range_set() 149 return ntohs(port) >= a->start && in fib_rule_port_inrange() 156 if ((range->start ^ ntohs(port)) & port_mask) in fib_rule_port_match() 166 return a->start != 0 && a->end != 0 && a->end < 0xffff && in fib_rule_port_range_valid() 167 a->start <= a->end; in fib_rule_port_range_valid() 173 return a->start == b->start && in fib_rule_port_range_compare() 180 return range->start != range->end; in fib_rule_port_is_range()
|