/xen-4.10.0-shim-comet/tools/libxl/ |
A D | check-xl-vcpupin-parse.data-example | 14 nodes:all*0*cpumap: all 15 all,nodes:all*0*cpumap: all 35 nodes:1*0*cpumap: 8-15 36 nodes:0*0*cpumap: 0-7 37 nodes:0*0*cpumap: 0-7 38 nodes:0*0*cpumap: 0-7 45 nodes:1-1*0*cpumap: 8-15 46 nodes:1-1*0*cpumap: 8-15 47 nodes:0-1*0*cpumap: all 48 nodes:0-0*0*cpumap: 0-7 [all …]
|
A D | libxl.c | 450 uint32_t *nodes; in libxl_get_pci_topology() local 462 nodes = libxl__zalloc(gc, sizeof(*nodes) * *num_devs); in libxl_get_pci_topology() 470 if (xc_pcitopoinfo(ctx->xch, *num_devs, devs, nodes) != 0) { in libxl_get_pci_topology() 481 ret[i].node = ((nodes[i] == XEN_INVALID_NODE_ID) || in libxl_get_pci_topology() 482 (nodes[i] == XEN_INVALID_DEV)) ? in libxl_get_pci_topology() 483 LIBXL_PCITOPOLOGY_INVALID_ENTRY : nodes[i]; in libxl_get_pci_topology()
|
/xen-4.10.0-shim-comet/xen/arch/x86/ |
A D | numa.c | 76 spdx = paddr_to_pdx(nodes[i].start); in populate_memnodemap() 77 epdx = paddr_to_pdx(nodes[i].end - 1) + 1; in populate_memnodemap() 127 spdx = paddr_to_pdx(nodes[i].start); in extract_lsb_from_nodes() 128 epdx = paddr_to_pdx(nodes[i].end - 1) + 1; in extract_lsb_from_nodes() 149 shift = extract_lsb_from_nodes(nodes, numnodes); in compute_hash_shift() 208 struct node nodes[MAX_NUMNODES]; in numa_emulation() local 222 memset(&nodes,0,sizeof(nodes)); in numa_emulation() 228 nodes[i].end = nodes[i].start + sz; in numa_emulation() 231 nodes[i].start, nodes[i].end, in numa_emulation() 232 (nodes[i].end - nodes[i].start) >> 20); in numa_emulation() [all …]
|
A D | srat.c | 28 static struct node nodes[MAX_NUMNODES] __initdata; variable 139 struct node *nd = &nodes[i]; in cutoff_node() 329 struct node *nd = &nodes[node]; in acpi_numa_memory_affinity_init() 376 if (start < nodes[j].end in nodes_cover_memory() 377 && end > nodes[j].start) { in nodes_cover_memory() 378 if (start >= nodes[j].start) { in nodes_cover_memory() 379 start = nodes[j].end; in nodes_cover_memory() 382 if (end <= nodes[j].end) { in nodes_cover_memory() 383 end = nodes[j].start; in nodes_cover_memory() 484 u64 size = nodes[i].end - nodes[i].start; in acpi_scan_nodes() [all …]
|
/xen-4.10.0-shim-comet/docs/man/ |
A D | xlcpupool.cfg.pod.5 | 86 =item B<nodes="NODES"> 88 Specifies the cpus of the NUMA-nodes given in C<NODES> (an integer or 90 specified nodes are allocated in the new cpupool. 107 "nodes:" modifier can be used. E.g., "0,node:1,nodes:2-3,^10-13" means 108 that pcpus 0, plus all the cpus of NUMA nodes 1,2,3 with the exception 113 If neither B<nodes> nor B<cpus> are specified only the first free cpu
|
A D | xl-numa-placement.pod.7 | 33 I<node-affinity>. The node-affinity of a domain is the set of NUMA nodes 74 itself always tries to run the domain's vCPUs on one of the nodes in 164 to the nodes to which the pCPUs in the soft affinity mask belong; 170 will be equal to the nodes to which the pCPUs present both in hard and 193 The first thing to do is find the nodes or the sets of nodes (from now 205 candidates involving fewer nodes are considered better. In case 206 two (or more) candidates span the same number of nodes, 225 Giving preference to candidates with fewer nodes ensures better 227 different nodes. Favoring candidates with fewer vCPUs already runnable 291 it won't scale well to systems with arbitrary number of nodes. [all …]
|
/xen-4.10.0-shim-comet/tools/ocaml/xenstored/ |
A D | trie.ml | 55 let mem_node nodes key = 56 List.exists (fun n -> n.Node.key = key) nodes 58 let find_node nodes key = 59 List.find (fun n -> n.Node.key = key) nodes 61 let replace_node nodes key node = 67 aux nodes 69 let remove_node nodes key = 75 aux nodes
|
A D | trie.mli | 40 every nodes of [t] containing no values and having no chil. *) 44 As nodes of the trie [t] do not necessary contains a value, the second argument of 48 (** [iter_path f t p] iterates [f] over nodes associated with the path [p] in the trie [t]. 52 (** [fold f t x] fold [f] over every nodes of [t], with [x] as initial value. *)
|
A D | oxenstored.conf.in | 33 # involve a set of nodes that is writable by at most one other domain, 39 # A transaction which involves a set of nodes which can be modified by
|
A D | symbol.mli | 19 (** Xenstore nodes names are often the same, ie. "local", "domain", "device", ... so it is worth to
|
/xen-4.10.0-shim-comet/stubdom/grub.patches/ |
A D | 61btrfs.diff | 1647 + btrfs_item_key_to_cpu(&path->nodes[0], 2408 + leaf = &path->nodes[0]; 2815 + buf = &path->nodes[level]; 2932 + fi = btrfs_item_ptr(&path->nodes[0], 2945 + nodes[0], 2956 + path->nodes[0].dev.part, 2957 + path->nodes[0].dev.length, 2958 + path->nodes[0].dev_bytenr >> 2968 + path->nodes[0].data + from, 3155 + di = btrfs_item_ptr(&path->nodes[0], [all …]
|
/xen-4.10.0-shim-comet/tools/hotplug/Linux/systemd/ |
A D | xen-init-dom0.service.in | 2 Description=xen-init-dom0, initialise Dom0 configuration (xenstore nodes, JSON configuration stub)
|
/xen-4.10.0-shim-comet/tools/libxc/ |
A D | xc_misc.c | 292 uint32_t *nodes) in xc_pcitopoinfo() argument 299 DECLARE_HYPERCALL_BOUNCE(nodes, num_devs* sizeof(*nodes), in xc_pcitopoinfo() 304 if ( (ret = xc_hypercall_bounce_pre(xch, nodes)) ) in xc_pcitopoinfo() 314 set_xen_guest_handle_offset(sysctl.u.pcitopoinfo.nodes, nodes, in xc_pcitopoinfo() 325 xc_hypercall_bounce_post(xch, nodes); in xc_pcitopoinfo()
|
/xen-4.10.0-shim-comet/tools/xenstore/ |
A D | TODO | 6 - Dynamic/supply nodes
|
/xen-4.10.0-shim-comet/xen/arch/x86/oprofile/ |
A D | op_model_athlon.c | 454 int nodes; in init_ibs_nmi() local 459 nodes = 0; in init_ibs_nmi() 482 nodes++; in init_ibs_nmi() 488 if (!nodes) { in init_ibs_nmi()
|
/xen-4.10.0-shim-comet/docs/misc/ |
A D | block-scripts.txt | 45 When the script is run, the following nodes shall already have been 58 When the script is run, the following nodes shall already have been 109 other nodes. The reason we haven't done this yet is that the main
|
A D | qemu-backends.txt | 15 user root) the backend nodes must be written before qemu is dropping
|
A D | efi.markdown | 16 relevant device tree nodes.
|
A D | 9pfs.markdown | 65 information. The toolstack creates front and back nodes with state 95 Backend configuration nodes, written by the toolstack, read by the
|
/xen-4.10.0-shim-comet/xen/include/asm-x86/ |
A D | numa.h | 24 extern int compute_hash_shift(struct node *nodes, int numnodes,
|
/xen-4.10.0-shim-comet/tools/blktap2/drivers/ |
A D | block-cache.c | 99 uint32_t nodes; member 180 return tree->size + tree->nodes * sizeof(radix_tree_node_t); in radix_tree_size() 200 tree->nodes++; in radix_tree_allocate_node() 218 tree->nodes--; in radix_tree_free_node()
|
/xen-4.10.0-shim-comet/tools/xl/ |
A D | xl_cpupool.c | 47 XLU_ConfigList *nodes; in main_cpupoolcreate() local 157 if (!xlu_cfg_get_list(config, "nodes", &nodes, 0, 0)) { in main_cpupoolcreate() 166 while ((buf = xlu_cfg_get_listitem(nodes, n_nodes)) != NULL) { in main_cpupoolcreate()
|
/xen-4.10.0-shim-comet/tools/xenstat/xentop/ |
A D | TODO | 25 from any node of all other nodes in a cluster)
|
/xen-4.10.0-shim-comet/xen/common/ |
A D | sysctl.c | 413 guest_handle_is_null(ti->nodes) ) in do_sysctl() 441 if ( copy_to_guest_offset(ti->nodes, i, &node, 1) ) in do_sysctl()
|
/xen-4.10.0-shim-comet/xen/include/public/ |
A D | sysctl.h | 696 XEN_GUEST_HANDLE_64(uint32) nodes;
|