1 /*
2 * (c) 2009 Alexander Warg <warg@os.inf.tu-dresden.de>
3 * economic rights: Technische Universität Dresden (Germany)
4 *
5 * This file is part of TUD:OS and distributed under the terms of the
6 * GNU General Public License 2.
7 * Please see the COPYING-GPL-2 file for details.
8 */
9 #include "sched_proxy.h"
10 #include "globals.h"
11 #include "debug.h"
12
13 #include <algorithm>
14 #include <l4/re/env>
15 #include <l4/sys/scheduler>
16
17 //#include <cstdio>
18
19 static
20 l4_sched_cpu_set_t
blow_up(l4_sched_cpu_set_t const & src,unsigned char gran)21 blow_up(l4_sched_cpu_set_t const &src, unsigned char gran)
22 {
23 l4_sched_cpu_set_t n;
24 gran &= sizeof(l4_umword_t) * 8 - 1;
25 unsigned char og = src.granularity() & (sizeof(l4_umword_t) * 8 - 1);
26 n.set(gran, src.offset() & (~0UL << og));
27 n.map = 0;
28 for (unsigned i = 0; i < sizeof(l4_umword_t) * 8; ++i)
29 if (src.map & (1UL << (i >> (og - gran))))
30 n.map |= 1UL << i;
31
32 return n;
33 }
34
35 static
operator &(l4_sched_cpu_set_t const & a,l4_sched_cpu_set_t const & b)36 l4_sched_cpu_set_t operator & (l4_sched_cpu_set_t const &a, l4_sched_cpu_set_t const &b)
37 {
38 l4_sched_cpu_set_t _a, _b;
39 unsigned char const ga = a.granularity() & (sizeof(l4_umword_t) * 8 - 1);
40 unsigned char const gb = b.granularity() & (sizeof(l4_umword_t) * 8 - 1);
41 if (ga < gb)
42 {
43 _b = blow_up(b, ga);
44 _a = a;
45 }
46 else if (ga == gb)
47 {
48 _a = a;
49 _b = b;
50 }
51 else
52 {
53 _a = blow_up(a, gb);
54 _b = b;
55 }
56
57 long ofs_dif = _a.offset() - _b.offset();
58 long unsigned abs_ofs_dif;
59 if (ofs_dif < 0)
60 abs_ofs_dif = -ofs_dif;
61 else
62 abs_ofs_dif = ofs_dif;
63
64 if (abs_ofs_dif >= sizeof(l4_umword_t) * 8)
65 return l4_sched_cpu_set(0, 0, 0);
66
67 if (ofs_dif < 0)
68 {
69 _b.map &= (_a.map >> abs_ofs_dif);
70 return _b;
71 }
72 else
73 {
74 _a.map &= (_b.map >> abs_ofs_dif);
75 return _a;
76 }
77 }
78
79 Sched_proxy::List Sched_proxy::_list;
80
Sched_proxy()81 Sched_proxy::Sched_proxy() :
82 Icu(1, &_scheduler_irq),
83 _real_cpus(l4_sched_cpu_set(0, 0, 0)), _cpu_mask(_real_cpus),
84 _max_cpus(0),
85 _prio_offset(0), _prio_limit(0)
86 {
87 rescan_cpus();
88 _list.push_front(this);
89 }
90
91 void
rescan_cpus()92 Sched_proxy::rescan_cpus()
93 {
94 l4_sched_cpu_set_t c;
95 l4_umword_t max = 0;
96 c.map = 0;
97 c.gran_offset = 0;
98
99 int e = l4_error(L4Re::Env::env()->scheduler()->info(&max, &c));
100 if (e < 0)
101 return;
102
103 _max_cpus = std::min<unsigned>(sizeof(l4_umword_t) * 8, max);
104 _real_cpus = c;
105
106 _cpus = _real_cpus & _cpu_mask;
107 }
108
109 int
info(l4_umword_t * cpu_max,l4_sched_cpu_set_t * cpus)110 Sched_proxy::info(l4_umword_t *cpu_max, l4_sched_cpu_set_t *cpus)
111 {
112 *cpu_max = _max_cpus;
113 unsigned char g = cpus->granularity() & (sizeof(l4_umword_t) * 8 - 1);
114 l4_umword_t offs = cpus->offset() & (~0UL << g);
115 if (offs >= _max_cpus)
116 return -L4_ERANGE;
117
118 cpus->map = 0;
119 unsigned b = 0;
120 for (unsigned i = offs; i < _max_cpus && b < sizeof(l4_umword_t) * 8;)
121 {
122 if (_cpus.map & (1UL << i))
123 cpus->map |= 1UL << b;
124
125 ++i;
126
127 if (!(i & ~(~0UL << g)))
128 ++b;
129 }
130
131 return L4_EOK;
132 }
133
134
135 int
run_thread(L4::Cap<L4::Thread> thread,l4_sched_param_t const & sp)136 Sched_proxy::run_thread(L4::Cap<L4::Thread> thread, l4_sched_param_t const &sp)
137 {
138 l4_sched_param_t s = sp;
139 s.prio = std::min(sp.prio + _prio_offset, (l4_umword_t)_prio_limit);
140 s.affinity = sp.affinity & _cpus;
141 if (0)
142 {
143 printf("loader[%p] run_thread: o=%u scheduler affinity = %lx "
144 "sp.m=%lx sp.o=%u sp.g=%u\n",
145 this, _cpus.offset(), _cpus.map, sp.affinity.map,
146 sp.affinity.offset(), sp.affinity.granularity());
147 printf("loader[%p] "
148 " s.m=%lx s.o=%u s.g=%u\n",
149 this, s.affinity.map, s.affinity.offset(),
150 s.affinity.granularity());
151 }
152 return l4_error(L4Re::Env::env()->scheduler()->run_thread(thread, s));
153 }
154
155 int
idle_time(l4_sched_cpu_set_t const &,l4_kernel_clock_t &)156 Sched_proxy::idle_time(l4_sched_cpu_set_t const &, l4_kernel_clock_t &)
157 { return -L4_ENOSYS; }
158
159
160 L4::Cap<L4::Thread>
received_thread(L4::Ipc::Snd_fpage const & fp)161 Sched_proxy::received_thread(L4::Ipc::Snd_fpage const &fp)
162 {
163 if (!fp.cap_received())
164 return L4::Cap<L4::Thread>::Invalid;
165
166 return L4::Cap<L4::Thread>(Rcv_cap << L4_CAP_SHIFT);
167 }
168
169 void
restrict_cpus(l4_umword_t cpus)170 Sched_proxy::restrict_cpus(l4_umword_t cpus)
171 {
172 _cpu_mask = l4_sched_cpu_set(0, 0, cpus);
173 _cpus = _real_cpus & _cpu_mask;
174 }
175
176
177 class Cpu_hotplug_server :
178 public L4::Irqep_t<Cpu_hotplug_server, Moe::Server_object>
179 {
180 public:
handle_irq()181 void handle_irq()
182 {
183 for (auto i : Sched_proxy::_list)
184 {
185 i->rescan_cpus();
186 i->hotplug_event();
187 }
188 }
189
Cpu_hotplug_server()190 Cpu_hotplug_server()
191 {
192 L4::Cap<L4::Irq> irq = object_pool.cap_alloc()->alloc<L4::Irq>();
193 if (!irq)
194 {
195 Err(Err::Fatal).printf("Could not allocate capability for CPU hotplug\n");
196 return;
197 }
198
199 if (l4_error(L4::Cap<L4::Factory>(L4_BASE_FACTORY_CAP)->create(irq)) < 0)
200 {
201 Err(Err::Fatal).printf("Could not allocate IRQ for CPU hotplug\n");
202 return;
203 }
204
205 if (l4_error(irq->bind_thread(L4::Cap<L4::Thread>(L4_BASE_THREAD_CAP), l4_umword_t(this))) < 0)
206 {
207 Err(Err::Fatal).printf("Could not attach to CPU hotplug IRQ\n");
208 return;
209 }
210
211 if (l4_error(L4Re::Env::env()->scheduler()->bind(0, irq)) < 0)
212 {
213 Err(Err::Fatal).printf("Could not bind CPU hotplug IRQ to scheduler\n");
214 return;
215 }
216 }
217 };
218
219 static Cpu_hotplug_server _cpu_hotplug_server;
220