| /lib/kunit/ |
| A D | executor_test.c | 47 .start = subsuite, .end = &subsuite[2], in filter_suites_test() 57 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start); in filter_suites_test() 66 KUNIT_ASSERT_EQ(test, got.end - got.start, 1); in filter_suites_test() 73 .start = subsuite, .end = &subsuite[2], in filter_suites_test_glob_test() 83 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start); in filter_suites_test_glob_test() 90 KUNIT_ASSERT_EQ(test, got.end - got.start, 1); in filter_suites_test_glob_test() 102 .start = subsuite, .end = &subsuite[2], in filter_suites_to_empty_test() 154 .start = subsuite, .end = &subsuite[2], in filter_attr_test() 173 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start); in filter_attr_test() 192 .start = subsuite, .end = &subsuite[2], in filter_attr_empty_test() [all …]
|
| A D | executor.c | 156 kfree(suite_set.start); in kunit_free_suite_set() 178 const size_t max = suite_set->end - suite_set->start; in kunit_filter_suites() 207 filtered_suite = suite_set->start[i]; in kunit_filter_suites() 248 filtered.start = copy_start; in kunit_filter_suites() 278 size_t num_suites = suite_set->end - suite_set->start; in kunit_exec_run_tests() 321 num_suites = suite_set.end - suite_set.start; in kunit_merge_suite_sets() 322 suite_size = sizeof(suite_set.start); in kunit_merge_suite_sets() 337 total_suite_set.start = total_suite_start; in kunit_merge_suite_sets() 377 if (!suite_set.start) in kunit_run_all_tests() 393 kfree(suite_set.start); in kunit_run_all_tests() [all …]
|
| /lib/ |
| A D | bitmap-str.c | 213 unsigned int start; member 222 unsigned int start; in bitmap_set_region() local 224 for (start = r->start; start <= r->end; start += r->group_len) in bitmap_set_region() 225 bitmap_set(bitmap, start, min(r->end - start + 1, r->off)); in bitmap_set_region() 289 while (start <= end && __end_of_region(*end)) in bitmap_find_region_reverse() 300 r->start = 0; in bitmap_parse_region() 307 str = bitmap_getnum(str, &r->start, lastbit); in bitmap_parse_region() 338 r->end = r->start; in bitmap_parse_region() 446 if (start > end || __end_of_region(*end)) in bitmap_get_x32_reverse() 483 end = bitmap_find_region_reverse(start, end); in bitmap_parse() [all …]
|
| A D | test_hmm.c | 394 start = cmd->addr; in dmirror_read() 396 if (end < start) in dmirror_read() 462 if (end < start) in dmirror_write() 515 devmem->pagemap.range.start = res->start; in dmirror_allocate_chunk() 732 unsigned long start = args->start; in dmirror_migrate_finalize_and_map() local 778 if (end < start) in dmirror_exclusive() 829 unsigned long start = args->start; in dmirror_devmem_fault_alloc_and_copy() local 890 if (end < start) in dmirror_migrate_to_system() 951 if (end < start) in dmirror_migrate_to_device() 1098 range->start, range->end - range->start, in dmirror_range_snapshot() [all …]
|
| A D | interval_tree_test.c | 52 nodes[i].start = a; in init() 113 results += search(&root, start, last); in search_check() 133 unsigned long start, last; in intersection_range_check() local 163 start = 0UL; in intersection_range_check() 167 start = (prandom_u32_state(&rnd) >> 4) % last; in intersection_range_check() 175 if (start <= node->last && last >= node->start) in intersection_range_check() 243 unsigned long start, last; in span_iteration_check() local 270 start = 0UL; in span_iteration_check() 274 start = (prandom_u32_state(&rnd) >> 4) % last; in span_iteration_check() 277 mas_span.first_index = start; in span_iteration_check() [all …]
|
| A D | find_bit.c | 51 #define FIND_NEXT_BIT(FETCH, MUNGE, size, start) \ argument 53 unsigned long mask, idx, tmp, sz = (size), __start = (start); \ 157 return FIND_NEXT_BIT(addr[idx], /* nop */, nbits, start); in _find_next_bit() 193 unsigned long nbits, unsigned long start) in _find_next_and_bit() argument 195 return FIND_NEXT_BIT(addr1[idx] & addr2[idx], /* nop */, nbits, start); in _find_next_and_bit() 202 unsigned long nbits, unsigned long start) in _find_next_andnot_bit() argument 204 return FIND_NEXT_BIT(addr1[idx] & ~addr2[idx], /* nop */, nbits, start); in _find_next_andnot_bit() 211 unsigned long nbits, unsigned long start) in _find_next_or_bit() argument 213 return FIND_NEXT_BIT(addr1[idx] | addr2[idx], /* nop */, nbits, start); in _find_next_or_bit() 220 unsigned long start) in _find_next_zero_bit() argument [all …]
|
| A D | genalloc.c | 87 unsigned long *p = map + BIT_WORD(start); in bitmap_set_ll() 88 const unsigned long size = start + nr; in bitmap_set_ll() 123 unsigned long *p = map + BIT_WORD(start); in bitmap_clear_ll() 124 const unsigned long size = start + nr; in bitmap_clear_ll() 557 unsigned long end = start + size - 1; in gen_pool_has_addr() 562 if (start >= chunk->start_addr && start <= chunk->end_addr) { in gen_pool_has_addr() 648 unsigned long start, unsigned int nr, void *data, in gen_pool_first_fit() argument 667 unsigned long start, unsigned int nr, void *data, in gen_pool_first_fit_align() argument 695 unsigned long start, unsigned int nr, void *data, in gen_pool_fixed_alloc() argument 710 start + offset_bit, nr, 0); in gen_pool_fixed_alloc() [all …]
|
| A D | logic_iomem.c | 94 if (rreg->res->start > offset) in ioremap() 111 offs = rreg->ops->map(offset - found->res->start, in ioremap() 257 unsigned long offs, start; in memset_io() local 264 start = (unsigned long)addr & AREA_MASK; in memset_io() 267 area->ops->set(area->priv, start, value, size); in memset_io() 272 area->ops->write(area->priv, start + offs, 1, value); in memset_io() 281 unsigned long offs, start; in memcpy_fromio() local 288 start = (unsigned long)addr & AREA_MASK; in memcpy_fromio() 304 unsigned long offs, start; in memcpy_toio() local 311 start = (unsigned long)addr & AREA_MASK; in memcpy_toio() [all …]
|
| A D | seq_buf.c | 117 const char *start, *lf; in seq_buf_do_printk() local 122 start = seq_buf_str(s); in seq_buf_do_printk() 123 while ((lf = strchr(start, '\n'))) { in seq_buf_do_printk() 124 int len = lf - start + 1; in seq_buf_do_printk() 126 printk("%s%.*s", lvl, len, start); in seq_buf_do_printk() 127 start = ++lf; in seq_buf_do_printk() 131 if (start < s->buffer + s->len) in seq_buf_do_printk() 132 printk("%s%s\n", lvl, start); in seq_buf_do_printk() 365 if (len <= start) 368 len -= start; [all …]
|
| A D | devres.c | 154 if (!devm_request_mem_region(dev, res->start, size, pretty_name)) { in __devm_ioremap_resource() 159 dest_ptr = __devm_ioremap(dev, res->start, size, type); in __devm_ioremap_resource() 161 devm_release_mem_region(dev, res->start, size); in __devm_ioremap_resource() 355 resource_size_t start; member 363 arch_io_free_memtype_wc(this->start, this->size); in devm_arch_io_free_memtype_wc_release() 376 int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start, in devm_arch_io_reserve_memtype_wc() argument 387 ret = arch_io_reserve_memtype_wc(start, size); in devm_arch_io_reserve_memtype_wc() 393 dr->start = start; in devm_arch_io_reserve_memtype_wc()
|
| A D | test_objpool.c | 36 struct rw_semaphore start; member 139 init_rwsem(&data->start); in ot_init_data() 220 ktime_t start; in ot_thread_worker() local 223 down_read(&test->data.start); in ot_thread_worker() 224 up_read(&test->data.start); in ot_thread_worker() 225 start = ktime_get(); in ot_thread_worker() 350 ktime_t start; in ot_start_sync() local 389 start = ktime_get(); in ot_start_sync() 390 up_write(&test->data.start); in ot_start_sync() 537 ktime_t start; in ot_start_async() local [all …]
|
| A D | interval_tree.c | 7 #define START(node) ((node)->start) 44 } while (cur && (state->nodes[0]->last >= cur->start || in interval_tree_span_iter_next_gap() 45 state->nodes[0]->last + 1 == cur->start)); in interval_tree_span_iter_next_gap() 66 if (iter->nodes[1]->start > first_index) { in interval_tree_span_iter_first() 69 iter->last_hole = iter->nodes[1]->start - 1; in interval_tree_span_iter_first() 118 iter->last_hole = iter->nodes[1]->start - 1; in interval_tree_span_iter_next()
|
| A D | iommu-helper.c | 10 unsigned long start, unsigned int nr, in iommu_area_alloc() argument 19 index = bitmap_find_next_zero_area(map, size, start, nr, align_mask); in iommu_area_alloc() 22 start = ALIGN(shift + index, boundary_size) - shift; in iommu_area_alloc()
|
| A D | string.c | 821 if (*start != value) in check_bytes8() 822 return (void *)start; in check_bytes8() 823 start++; in check_bytes8() 845 return check_bytes8(start, value, bytes); in memchr_inv() 859 prefix = (unsigned long)start % 8; in memchr_inv() 864 r = check_bytes8(start, value, prefix); in memchr_inv() 867 start += prefix; in memchr_inv() 874 if (*(u64 *)start != value64) in memchr_inv() 875 return check_bytes8(start, value, 8); in memchr_inv() 876 start += 8; in memchr_inv() [all …]
|
| A D | asn1_encoder.c | 92 int start = 7 + 7 + 7 + 7; in asn1_encode_oid_digit() local 105 while (oid >> start == 0) in asn1_encode_oid_digit() 106 start -= 7; in asn1_encode_oid_digit() 108 while (start > 0 && *data_len > 0) { in asn1_encode_oid_digit() 111 byte = oid >> start; in asn1_encode_oid_digit() 112 oid = oid - (byte << start); in asn1_encode_oid_digit() 113 start -= 7; in asn1_encode_oid_digit()
|
| /lib/pldmfw/ |
| A D | pldmfw_private.h | 142 #define pldm_first_desc_tlv(start) \ argument 143 ((const struct __pldmfw_desc_tlv *)(start)) 165 #define pldm_for_each_desc_tlv(i, desc, start, count) \ argument 166 for ((i) = 0, (desc) = pldm_first_desc_tlv(start); \ 177 #define pldm_first_record(start) \ argument 178 ((const struct __pldmfw_record_info *)(start)) 199 #define pldm_for_each_record(i, record, start, count) \ argument 200 for ((i) = 0, (record) = pldm_first_record(start); \ 211 #define pldm_first_component(start) \ argument 212 ((const struct __pldmfw_component_info *)(start)) [all …]
|
| /lib/zstd/common/ |
| A D | bitstream.h | 88 const char* start; member 243 bitD->start = (const char*)srcBuffer; in BIT_initDStream() 244 bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer); in BIT_initDStream() 253 bitD->ptr = bitD->start; in BIT_initDStream() 254 bitD->bitContainer = *(const BYTE*)(bitD->start); in BIT_initDStream() 289 return bitContainer >> start; in BIT_getUpperBits() 305 return (bitContainer >> (start & regMask)) & BIT_mask[nbBits]; in BIT_getMiddleBits() 373 assert(bitD->ptr >= bitD->start); in BIT_reloadDStream_internal() 407 assert(bitD->ptr >= bitD->start); in BIT_reloadDStream() 412 if (bitD->ptr == bitD->start) { in BIT_reloadDStream() [all …]
|
| /lib/tests/ |
| A D | usercopy_kunit.c | 49 size_t start, end, i, zero_start, zero_end; in usercopy_test_check_nonzero_user() local 64 start = PAGE_SIZE - (size / 2); in usercopy_test_check_nonzero_user() 66 kmem += start; in usercopy_test_check_nonzero_user() 67 umem += start; in usercopy_test_check_nonzero_user() 92 for (start = 0; start <= size; start++) { in usercopy_test_check_nonzero_user() 93 for (end = start; end <= size; end++) { in usercopy_test_check_nonzero_user() 94 size_t len = end - start; in usercopy_test_check_nonzero_user() 95 int retval = check_zeroed_user(umem + start, len); in usercopy_test_check_nonzero_user() 96 int expected = is_zeroed(kmem + start, len); in usercopy_test_check_nonzero_user() 100 retval, expected, start, end); in usercopy_test_check_nonzero_user()
|
| /lib/raid6/ |
| A D | neon.h | 4 void raid6_neon1_xor_syndrome_real(int disks, int start, int stop, 7 void raid6_neon2_xor_syndrome_real(int disks, int start, int stop, 10 void raid6_neon4_xor_syndrome_real(int disks, int start, int stop, 13 void raid6_neon8_xor_syndrome_real(int disks, int start, int stop,
|
| A D | avx512.c | 99 static void raid6_avx5121_xor_syndrome(int disks, int start, int stop, in raid6_avx5121_xor_syndrome() argument 122 for (z = z0-1 ; z >= start ; z--) { in raid6_avx5121_xor_syndrome() 136 for (z = start-1 ; z >= 0 ; z--) { in raid6_avx5121_xor_syndrome() 231 static void raid6_avx5122_xor_syndrome(int disks, int start, int stop, in raid6_avx5122_xor_syndrome() argument 258 for (z = z0-1 ; z >= start ; z--) { in raid6_avx5122_xor_syndrome() 281 for (z = start-1 ; z >= 0 ; z--) { in raid6_avx5122_xor_syndrome() 421 static void raid6_avx5124_xor_syndrome(int disks, int start, int stop, in raid6_avx5124_xor_syndrome() argument 456 for (z = z0-1 ; z >= start ; z--) { in raid6_avx5124_xor_syndrome() 505 for (z = start-1 ; z >= 0 ; z--) { in raid6_avx5124_xor_syndrome()
|
| A D | avx2.c | 82 static void raid6_avx21_xor_syndrome(int disks, int start, int stop, in raid6_avx21_xor_syndrome() argument 102 for (z = z0-1 ; z >= start ; z--) { in raid6_avx21_xor_syndrome() 113 for (z = start-1 ; z >= 0 ; z--) { in raid6_avx21_xor_syndrome() 192 static void raid6_avx22_xor_syndrome(int disks, int start, int stop, in raid6_avx22_xor_syndrome() argument 215 for (z = z0-1 ; z >= start ; z--) { in raid6_avx22_xor_syndrome() 235 for (z = start-1 ; z >= 0 ; z--) { in raid6_avx22_xor_syndrome() 353 static void raid6_avx24_xor_syndrome(int disks, int start, int stop, in raid6_avx24_xor_syndrome() argument 382 for (z = z0-1 ; z >= start ; z--) { in raid6_avx24_xor_syndrome() 424 for (z = start-1 ; z >= 0 ; z--) { in raid6_avx24_xor_syndrome()
|
| A D | sse2.c | 87 static void raid6_sse21_xor_syndrome(int disks, int start, int stop, in raid6_sse21_xor_syndrome() argument 107 for ( z = z0-1 ; z >= start ; z-- ) { in raid6_sse21_xor_syndrome() 118 for ( z = start-1 ; z >= 0 ; z-- ) { in raid6_sse21_xor_syndrome() 198 static void raid6_sse22_xor_syndrome(int disks, int start, int stop, in raid6_sse22_xor_syndrome() argument 221 for ( z = z0-1 ; z >= start ; z-- ) { in raid6_sse22_xor_syndrome() 240 for ( z = start-1 ; z >= 0 ; z-- ) { in raid6_sse22_xor_syndrome() 364 static void raid6_sse24_xor_syndrome(int disks, int start, int stop, in raid6_sse24_xor_syndrome() argument 393 for ( z = z0-1 ; z >= start ; z-- ) { in raid6_sse24_xor_syndrome() 432 for ( z = start-1 ; z >= 0 ; z-- ) { in raid6_sse24_xor_syndrome()
|
| A D | rvv.c | 98 static void raid6_rvv1_xor_syndrome_real(int disks, int start, int stop, in raid6_rvv1_xor_syndrome_real() argument 130 for (z = z0 - 1; z >= start; z--) { in raid6_rvv1_xor_syndrome_real() 157 for (z = start - 1; z >= 0; z--) { in raid6_rvv1_xor_syndrome_real() 288 static void raid6_rvv2_xor_syndrome_real(int disks, int start, int stop, in raid6_rvv2_xor_syndrome_real() argument 326 for (z = z0 - 1; z >= start; z--) { in raid6_rvv2_xor_syndrome_real() 362 for (z = start - 1; z >= 0; z--) { in raid6_rvv2_xor_syndrome_real() 542 static void raid6_rvv4_xor_syndrome_real(int disks, int start, int stop, in raid6_rvv4_xor_syndrome_real() argument 588 for (z = z0 - 1; z >= start; z--) { in raid6_rvv4_xor_syndrome_real() 642 for (z = start - 1; z >= 0; z--) { in raid6_rvv4_xor_syndrome_real() 982 for (z = z0 - 1; z >= start; z--) { in raid6_rvv8_xor_syndrome_real() [all …]
|
| A D | loongarch_simd.c | 114 static void raid6_lsx_xor_syndrome(int disks, int start, int stop, in raid6_lsx_xor_syndrome() argument 145 for (z = z0-1; z >= start; z--) { in raid6_lsx_xor_syndrome() 184 for (z = start-1; z >= 0; z--) { in raid6_lsx_xor_syndrome() 323 static void raid6_lasx_xor_syndrome(int disks, int start, int stop, in raid6_lasx_xor_syndrome() argument 350 for (z = z0-1; z >= start; z--) { in raid6_lasx_xor_syndrome() 375 for (z = start-1; z >= 0; z--) { in raid6_lasx_xor_syndrome()
|
| /lib/dim/ |
| A D | dim.c | 57 bool dim_calc_stats(const struct dim_sample *start, in dim_calc_stats() argument 62 u32 delta_us = ktime_us_delta(end->time, start->time); in dim_calc_stats() 63 u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); in dim_calc_stats() 65 start->byte_ctr); in dim_calc_stats() 67 start->comp_ctr); in dim_calc_stats()
|