| /xen/xen/xsm/flask/ss/ |
| A D | context.h | 38 memset(&c->range, 0, sizeof(c->range)); in mls_context_init() 48 dst->range.level[0].sens = src->range.level[0].sens; in mls_context_cpy() 49 rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat); in mls_context_cpy() 53 dst->range.level[1].sens = src->range.level[1].sens; in mls_context_cpy() 54 rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat); in mls_context_cpy() 71 dst->range.level[0].sens = src->range.level[0].sens; in mls_context_cpy_low() 76 dst->range.level[1].sens = src->range.level[0].sens; in mls_context_cpy_low() 89 return ((c1->range.level[0].sens == c2->range.level[0].sens) && in mls_context_cmp() 90 ebitmap_cmp(&c1->range.level[0].cat,&c2->range.level[0].cat) && in mls_context_cmp() 91 (c1->range.level[1].sens == c2->range.level[1].sens) && in mls_context_cmp() [all …]
|
| A D | mls.c | 43 int index_sens = context->range.level[l].sens; in mls_compute_context_len() 49 e = &context->range.level[l].cat; in mls_compute_context_len() 114 e = &context->range.level[l].cat; in mls_sid_to_context() 218 if ( !mls_range_isvalid(p, &c->range) ) in mls_context_isvalid() 230 if ( !mls_range_contains(usrdatum->range, c->range) ) in mls_context_isvalid() 366 context->range.level[1].sens = context->range.level[0].sens; in mls_context_to_sid() 368 &context->range.level[0].cat); in mls_context_to_sid() 389 context->range.level[l].sens = range->level[l].sens; in mls_range_set() 391 &range->level[l].cat); in mls_range_set() 438 ebitmap_destroy(&c->range.level[l].cat); in mls_convert_context() [all …]
|
| /xen/xen/arch/x86/x86_64/ |
| A D | mmconf-fam10h.c | 44 struct range { in get_fam10h_pci_mmconf_base() struct 46 } range[8]; in get_fam10h_pci_mmconf_base() local 100 if (range[j - 1].start < start) in get_fam10h_pci_mmconf_base() 102 range[j] = range[j - 1]; in get_fam10h_pci_mmconf_base() 104 range[j].start = start; in get_fam10h_pci_mmconf_base() 105 range[j].end = end; in get_fam10h_pci_mmconf_base() 116 if (range[hi_mmio_num - 1].end < start) in get_fam10h_pci_mmconf_base() 118 if (range[0].start > start + SIZE) in get_fam10h_pci_mmconf_base() 122 start = (range[0].start & MASK) - UNIT; in get_fam10h_pci_mmconf_base() 130 start = (range[i - 1].end + UNIT) & MASK; in get_fam10h_pci_mmconf_base() [all …]
|
| A D | machine_kexec.c | 14 int machine_kexec_get_xen(xen_kexec_range_t *range) in machine_kexec_get_xen() argument 16 range->start = virt_to_maddr(_start); in machine_kexec_get_xen() 17 range->size = virt_to_maddr(_end) - (unsigned long)range->start; in machine_kexec_get_xen()
|
| /xen/xen/common/ |
| A D | rangeset.c | 16 struct range { struct 48 struct range *x = NULL, *y; in find_range() 80 struct rangeset *r, struct range *x, struct range *y) in insert_range() 99 struct range *x; in alloc_range() 118 struct range *x, *y; in rangeset_add_range() 186 struct range *x, *y, *t; in rangeset_remove_range() 254 struct range *x; in rangeset_contains_range() 273 struct range *x; in rangeset_overlaps_range() 293 struct range *x; in rangeset_report_ranges() 453 struct range *x; in rangeset_purge() [all …]
|
| A D | kexec.c | 628 range->start = range->size = 0; in kexec_get_reserve() 634 int nr = range->nr; in kexec_get_cpu() 656 range->start = range->size = 0; in kexec_get_cpu() 664 range->size = VMCOREINFO_BYTES; in kexec_get_vmcoreinfo() 672 switch ( range->range ) in kexec_get_range_internal() 675 ret = kexec_get_reserve(range); in kexec_get_range_internal() 678 ret = kexec_get_cpu(range); in kexec_get_range_internal() 684 ret = machine_kexec_get(range); in kexec_get_range_internal() 693 xen_kexec_range_t range; in kexec_get_range() local 710 xen_kexec_range_t range; in kexec_get_range_compat() local [all …]
|
| A D | unlzma.c | 68 uint32_t range; member 109 rc->range = 0xFFFFFFFF; in rc_init() 129 rc->range <<= 8; in rc_do_normalize() 134 if (rc->range < (1 << RC_TOP_BITS)) in rc_normalize() 145 rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS); in rc_is_bit_0_helper() 157 rc->range = rc->bound; in rc_update_bit_0() 162 rc->range -= rc->bound; in rc_update_bit_1() 185 rc->range >>= 1; in rc_direct_bit() 186 if (rc->code >= rc->range) { in rc_direct_bit() 187 rc->code -= rc->range; in rc_direct_bit()
|
| /xen/xen/arch/x86/hvm/ |
| A D | mtrr.c | 534 list_del(&range->list); in hvm_destroy_cacheattr_region_list() 535 xfree(range); in hvm_destroy_cacheattr_region_list() 549 list_for_each_entry_rcu ( range, in hvm_get_mem_pinned_cacheattr() 556 rc = range->type; in hvm_get_mem_pinned_cacheattr() 594 list_for_each_entry ( range, in hvm_set_mem_pinned_cacheattr() 597 if ( range->start == gfn_start && range->end == gfn_end ) in hvm_set_mem_pinned_cacheattr() 602 type = range->type; in hvm_set_mem_pinned_cacheattr() 641 list_for_each_entry_rcu ( range, in hvm_set_mem_pinned_cacheattr() 645 if ( range->start == gfn_start && range->end == gfn_end ) in hvm_set_mem_pinned_cacheattr() 647 range->type = type; in hvm_set_mem_pinned_cacheattr() [all …]
|
| /xen/xen/arch/x86/include/asm/ |
| A D | amd.h | 124 #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) argument 125 #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) argument 126 #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) argument
|
| /xen/xen/drivers/passthrough/amd/ |
| A D | iommu_acpi.c | 49 struct acpi_ivhd_device_range range; member 486 dev_length = sizeof(*range); in parse_ivhd_device_range() 500 first_bdf = range->start.header.id; in parse_ivhd_device_range() 507 last_bdf = range->end.header.id; in parse_ivhd_device_range() 565 dev_length = sizeof(*range); in parse_ivhd_device_alias_range() 579 first_bdf = range->alias.header.id; in parse_ivhd_device_alias_range() 586 last_bdf = range->end.header.id; in parse_ivhd_device_alias_range() 593 alias_id = range->alias.used_id; in parse_ivhd_device_alias_range() 642 dev_length = sizeof(*range); in parse_ivhd_device_extended_range() 663 last_bdf = range->end.header.id; in parse_ivhd_device_extended_range() [all …]
|
| /xen/xen/arch/arm/arm64/ |
| A D | insn.c | 161 long range) in branch_imm_common() argument 167 return range; in branch_imm_common() 172 if (offset < -range || offset >= range) { in branch_imm_common() 174 return range; in branch_imm_common()
|
| /xen/xen/arch/x86/ |
| A D | machine_kexec.c | 198 int machine_kexec_get(xen_kexec_range_t *range) in machine_kexec_get() argument 200 if (range->range != KEXEC_RANGE_MA_XEN) in machine_kexec_get() 202 return machine_kexec_get_xen(range); in machine_kexec_get()
|
| /xen/xen/include/xen/ |
| A D | livepatch.h | 113 long range = ARCH_LIVEPATCH_RANGE; in livepatch_verify_distance() local 119 if ( offset < -range || offset >= range ) in livepatch_verify_distance()
|
| A D | kexec.h | 57 int machine_kexec_get(xen_kexec_range_t *range); 58 int machine_kexec_get_xen(xen_kexec_range_t *range);
|
| /xen/docs/man/ |
| A D | xen-pci-device-reservations.7.pod | 17 records reservations made within the device ID range in order to avoid 24 =item 1. A vendor may request a range of device IDs by submitting a patch to 27 =item 2. Vendor allocations should be in the range 0xc000-0xfffe to reduce the 30 =item 3. The vendor is responsible for allocations within the range and should 38 range | vendor/product
|
| /xen/tools/libs/ctrl/ |
| A D | xc_kexec.c | 39 int xc_kexec_get_range(xc_interface *xch, int range, int nr, in xc_kexec_get_range() argument 52 get_range->range = range; in xc_kexec_get_range()
|
| /xen/tools/xl/ |
| A D | check-xl-vcpupin-parse | 203 range=$((nr_cpus - cpua)) 204 cpub=$(($RANDOM % range)) 252 range=$((nr_nodes - nodea)) 253 nodeb=$(($RANDOM % range))
|
| A D | check-xl-vcpupin-parse.data-example | 29 # A few attempts of pinning to a random range of cpus 44 # A few attempts of pinning to a random range of nodes
|
| /xen/tools/xenmon/ |
| A D | xenmon.py | 188 for i in range(0, NDOMAINS): 198 for i in range(0, NDOMAINS): 224 for x in range(0, NDOMAINS): 304 for i in range(0, NSAMPLES): 310 for i in range(0, NDOMAINS): 371 for dom in range(0, NDOMAINS): 578 for dom in range(0, NDOMAINS): 593 for i in range(0, NSAMPLES): 599 for i in range(0, NDOMAINS): 632 for dom in range(0, NDOMAINS): [all …]
|
| /xen/xen/arch/x86/mm/ |
| A D | mem_sharing.c | 1456 unsigned long start = range->opaque ?: range->first_gfn; in range_share() 1458 while ( range->last_gfn >= start ) in range_share() 1490 range->opaque = start; in range_share() 2153 if ( mso.u.range._pad[0] || mso.u.range._pad[1] || in mem_sharing_memop() 2154 mso.u.range._pad[2] ) in mem_sharing_memop() 2163 if ( mso.u.range.opaque && in mem_sharing_memop() 2164 (mso.u.range.opaque < mso.u.range.first_gfn || in mem_sharing_memop() 2165 mso.u.range.opaque > mso.u.range.last_gfn) ) in mem_sharing_memop() 2210 max_cgfn < mso.u.range.last_gfn ) in mem_sharing_memop() 2217 rc = range_share(d, cd, &mso.u.range); in mem_sharing_memop() [all …]
|
| /xen/xen/arch/ |
| A D | Kconfig | 9 range 1 16383 26 range 2 64
|
| /xen/xen/tools/kconfig/tests/rand_nested_choice/ |
| A D | __init__.py | 13 for i in range(20):
|
| /xen/xen/common/xz/ |
| A D | dec_lzma2.c | 96 uint32_t range; member 457 rc->range = (uint32_t)-1; in rc_reset() 497 if (rc->range < RC_TOP_VALUE) { in rc_normalize() 498 rc->range <<= RC_SHIFT_BITS; in rc_normalize() 520 bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob; in rc_bit() 522 rc->range = bound; in rc_bit() 526 rc->range -= bound; in rc_bit() 576 rc->range >>= 1; in rc_direct() 577 rc->code -= rc->range; in rc_direct() 579 rc->code += rc->range & mask; in rc_direct()
|
| /xen/xen/arch/x86/cpu/mtrr/ |
| A D | generic.c | 347 int block=-1, range; in set_fixed_ranges() local 350 for (range=0; range < fixed_range_blocks[block].ranges; range++) in set_fixed_ranges() 351 set_fixed_range(fixed_range_blocks[block].base_msr + range, in set_fixed_ranges()
|
| /xen/docs/features/ |
| A D | qemu-deprivilege.pandoc | 38 ## Setting up a group and userid range 48 `xen-qemuuser-range-base` with the first UID. For example, under 51 adduser --system --uid 131072 --group --no-create-home xen-qemuuser-range-base 61 for the range base (as is done above) will result in all UIDs being
|