1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef KERNEL_LIB_TOPOLOGY_SYSTEM_TOPOLOGY_H_
6 #define KERNEL_LIB_TOPOLOGY_SYSTEM_TOPOLOGY_H_
7
8 #include <fbl/unique_ptr.h>
9 #include <fbl/vector.h>
10 #include <zircon/boot/image.h>
11 #include <zircon/types.h>
12
13 /*
14 * Captures the physical layout of the core system (processors, caches, etc..).
15 * The data will be layed out as a tree, with processor nodes on the bottom and other types above
16 * them. The expected usage is to start from a processor node and walk up/down to discover the
17 * relationships you are interested in.
18 */
19
20 namespace system_topology {
21 // A single node in the topology graph. The union and types here mirror the flat structure,
22 // zbi_topology_node_t.
23 struct Node {
24 uint8_t entity_type;
25 union {
26 zbi_topology_processor_t processor;
27 zbi_topology_cluster_t cluster;
28 zbi_topology_numa_region_t numa_region;
29 } entity;
30 Node* parent;
31 fbl::Vector<Node*> children;
32 };
33
34 // We define a typedef here as we may want to change this type as the design evolves. For example,
35 // if we add run-time updateability we may want to hold a lock.
36 typedef const fbl::Vector<Node*>& IterableProcessors;
37
38 // A view of the system topology that is defined in early boot and static during the run of the
39 // system.
40 class Graph {
41 public:
42 // Takes the flat topology array, validates it, and sets it as the current topology. Returns an
43 // error if the topology is invalid.
44 //
45 // This should only be called during early boot (platform_init), after that this data is
46 // considered static so no locks are used. If it is desired to set this later in operation than
47 // we MUST redesign this process to consider concurrent readers.
48 // Returns ZX_ERR_ALREADY_EXISTS if state already set or ZX_ERR_INVALID_ARGS if provided graph
49 // fails validation.
50 zx_status_t Update(zbi_topology_node_t* nodes, size_t count);
51
52 // Provides iterable container of pointers to all processor nodes.
processors()53 IterableProcessors processors() {
54 return processors_;
55 }
56
57 // Finds the processor node that is assigned the given logical id.
58 // Sets processor to point to that node. If it wasn't found, returns ZX_ERR_NOT_FOUND.
ProcessorByLogicalId(uint16_t id,Node ** processor)59 zx_status_t ProcessorByLogicalId(uint16_t id, Node** processor) {
60 if (id > processors_by_logical_id_.size()) {
61 return ZX_ERR_NOT_FOUND;
62 }
63
64 *processor = processors_by_logical_id_[id];
65 return ZX_OK;
66 }
67
68 private:
69 // Validates that in the provided flat topology:
70 // - all processors are leaf nodes, and all leaf nodes are processors.
71 // - there are no cycles.
72 // - It is stored in a "depth first" ordering, with parents adjacent to
73 // their children.
74 bool Validate(zbi_topology_node_t* nodes, int count) const;
75
76 fbl::unique_ptr<Node[]> nodes_;
77 fbl::Vector<Node*> processors_;
78
79 // This is in essence a map with logical ID being the index in the vector.
80 // It will contain duplicates for SMT processors so we need it in addition to processors_.
81 fbl::Vector<Node*> processors_by_logical_id_;
82 };
83
84 // Get the global instance of the SystemTopology. This will be updated in early boot to contain the
85 // source of truth view of the system.
86 // This should be called once before the platform becomes multithreaded. We don't use a raw global
87 // because we can't ensure that it is initialized, if this is used in the initialization of other
88 // global objects.
GetMutableSystemTopology()89 inline Graph& GetMutableSystemTopology() {
90 static Graph graph;
91 return graph;
92 }
93
94 // The method of the above most things should use, only the platform init code needs the mutable
95 // version.
GetSystemTopology()96 inline const Graph& GetSystemTopology() {
97 return GetMutableSystemTopology();
98 }
99
100 } // namespace system_topology
101
102 #endif //KERNEL_LIB_TOPOLOGY_SYSTEM_TOPOLOGY_H_
103