1 /*
2 * (c) 2011 Adam Lackorzynski <adam@os.inf.tu-dresden.de>
3 * economic rights: Technische Universität Dresden (Germany)
4 *
5 * This file is part of TUD:OS and distributed under the terms of the
6 * GNU General Public License 2.
7 * Please see the COPYING-GPL-2 file for details.
8 *
9 * As a special exception, you may use this file as part of a free software
10 * library without restriction. Specifically, if other files instantiate
11 * templates or use macros or inline functions from this file, or you compile
12 * this file and link it with other files to produce an executable, this
13 * file does not by itself cause the resulting executable to be covered by
14 * the GNU General Public License. This exception does not however
15 * invalidate any other reasons why the executable file might be covered by
16 * the GNU General Public License.
17 */
18 /*
19 * Note, do _NOT_ use this lock unless you got advised to do so.
20 */
21 /*
22 * This lock-implementation is quite simplistic, especially its list of
23 * blockers on a lock will only grow, i.e. ex-blockers will not be removed.
24 * We also have one kernel-provided irq per lock+thread. For general use,
25 * this is too much.
26 */
27
28 #include <l4/util/llulc.h>
29 #include <l4/sys/semaphore>
30 #include <l4/sys/factory>
31 #include <l4/util/atomic.h>
32
33 struct l4ullulock_struct_t {
34 unsigned long lock;
35 void * blocklistroot;
36 l4_umword_t block_cnt;
37 void * (*mem_alloc)(size_t);
38 void (*mem_free)(void *);
39 l4_cap_idx_t (*cap_alloc)(void);
40 void (*cap_free)(l4_cap_idx_t c);
41 L4::Cap<L4::Factory> factory;
42 };
43
add_atomic(l4_umword_t * x,int delta)44 static l4_umword_t add_atomic(l4_umword_t *x, int delta)
45 {
46 l4_umword_t oldval, newval;
47 do
48 {
49 oldval = *x;
50 newval = oldval + delta;
51 }
52 while (!l4util_cmpxchg(x, oldval, newval));
53
54 return oldval;
55 }
56
57 class Lock
58 {
59 public:
60 typedef L4::Cap<L4::Semaphore> Lockcap;
61 typedef l4ullulock_t Allocator;
62
Lock(l4_utcb_t * u,Allocator * al)63 explicit Lock(l4_utcb_t *u, Allocator *al)
64 : _utcb(u), _triggers(0), _next(0), _allocator(al)
65 {}
66
init_root()67 void init_root() { _next = this; }
68
lock(Lockcap c)69 void lock(Lockcap c) { _lock = c; }
70
block()71 void block() throw()
72 { _lock->down(); add_atomic(&_triggers, -1); }
73
wakeup()74 void wakeup() throw()
75 {
76 if (_triggers < 2)
77 {
78 add_atomic(&_triggers, 1);
79 _lock->trigger();
80 }
81 }
82
get_dummy(l4ullulock_t * t)83 static Lock *get_dummy(l4ullulock_t *t)
84 { return reinterpret_cast<Lock *>(t->blocklistroot); }
85
enqueue(l4ullulock_t * t)86 void enqueue(l4ullulock_t *t)
87 {
88 Lock *r = get_dummy(t);
89 l4_umword_t oldval, newval;
90
91 do
92 {
93 _next = r->_next;
94 oldval = (l4_umword_t)r->_next;
95 newval = (l4_umword_t)this;
96 }
97 while (!l4util_cmpxchg((volatile l4_umword_t*)&r->_next, oldval, newval));
98 }
99
operator new(size_t,void * p)100 void *operator new(size_t, void *p)
101 { return p; }
102
operator delete(void * p)103 void operator delete(void *p)
104 { reinterpret_cast<Lock *>(p)->_allocator->mem_free(p); }
105
shift(l4ullulock_t * t)106 static void shift(l4ullulock_t *t)
107 { t->blocklistroot = (void *)get_dummy(t)->_next; }
108
109 static void free_list(l4ullulock_t *t);
110
111 static Lock *find(l4ullulock_t *t, l4_utcb_t *u);
112 static void wakeup_others(l4ullulock_t *t, l4_utcb_t *u);
113
114 private:
115 l4_utcb_t *_utcb;
116 Lockcap _lock;
117 l4_umword_t _triggers;
118 Lock *_next;
119 Allocator *_allocator;
120 };
121
122
find(l4ullulock_t * t,l4_utcb_t * u)123 Lock *Lock::find(l4ullulock_t *t, l4_utcb_t *u)
124 {
125 Lock *r = get_dummy(t)->_next;
126 Lock *i = r;
127
128 do
129 {
130 if (u == i->_utcb)
131 return i;
132 i = i->_next;
133 }
134 while (i != r);
135
136 return 0;
137 }
138
wakeup_others(l4ullulock_t * t,l4_utcb_t * u)139 void Lock::wakeup_others(l4ullulock_t *t, l4_utcb_t *u)
140 {
141 Lock *r = get_dummy(t)->_next;
142 Lock *i = r;
143
144 do
145 {
146 if (i->_utcb && u != i->_utcb)
147 i->wakeup();
148 i = i->_next;
149 }
150 while (i != r);
151 }
152
free_list(l4ullulock_t * t)153 void Lock::free_list(l4ullulock_t *t)
154 {
155 Lock *r = get_dummy(t)->_next;
156 Lock *i = r;
157
158 do
159 {
160 Lock *d = i;
161 i = i->_next;
162 delete d;
163 }
164 while (i != r);
165
166 delete r;
167 }
168
create_new_thread_lock(l4ullulock_t * t,l4_utcb_t * u)169 static Lock *create_new_thread_lock(l4ullulock_t *t, l4_utcb_t *u)
170 {
171 int err;
172 L4::Cap<L4::Factory> f;
173 void *p = t->mem_alloc(sizeof(Lock));
174 if (!p)
175 return 0;
176
177 Lock *x = new (p) Lock(u, t);
178
179 L4::Cap<L4::Semaphore> c = L4::Cap<L4::Semaphore>(t->cap_alloc());
180 if (!c.is_valid())
181 goto fail1;
182
183 f = L4::Cap<L4::Factory>(t->factory);
184
185 err = l4_error(f->create(c, L4_PROTO_SEMAPHORE));
186 if (err < 0)
187 goto fail2;
188
189 x->lock(c);
190 return x;
191
192 fail2:
193 t->cap_free(c.cap());
194 fail1:
195 delete x;
196 return 0;
197 }
198
l4ullulock_init(l4ullulock_t ** t,void * (* mem_alloc)(size_t x),void (* mem_free)(void * p),l4_cap_idx_t (* cap_alloc)(void),void (* cap_free)(l4_cap_idx_t c),l4_cap_idx_t factory)199 int l4ullulock_init(l4ullulock_t **t,
200 void *(*mem_alloc)(size_t x),
201 void (*mem_free)(void *p),
202 l4_cap_idx_t (*cap_alloc)(void),
203 void (*cap_free)(l4_cap_idx_t c),
204 l4_cap_idx_t factory)
205 {
206 l4ullulock_t *_t = (l4ullulock_t *)mem_alloc(sizeof(*_t));
207 if (!_t)
208 return -L4_ENOMEM;
209
210 _t->lock = 0;
211 _t->block_cnt = 0;
212 _t->mem_alloc = mem_alloc;
213 _t->mem_free = mem_free;
214 _t->cap_alloc = cap_alloc;
215 _t->cap_free = cap_free;
216 _t->factory = L4::Cap<L4::Factory>(factory);
217
218 void *p = mem_alloc(sizeof(Lock));
219 if (!p)
220 {
221 mem_free(_t);
222 return -L4_ENOMEM;
223 }
224
225 Lock *l = new (p) Lock(0, _t);
226 l->init_root();
227 _t->blocklistroot = l;
228 *t = _t;
229 return 0;
230 }
231
l4ullulock_deinit(l4ullulock_t * t)232 int l4ullulock_deinit(l4ullulock_t *t)
233 {
234 Lock::free_list(t);
235 t->mem_free(t);
236 return 0;
237 }
238
l4ullulock_lock(l4ullulock_t * t,l4_utcb_t * u)239 int l4ullulock_lock(l4ullulock_t *t, l4_utcb_t *u)
240 {
241 while (add_atomic(&t->lock, 1) > 0)
242 {
243 // we need to block, someone has the lock already
244 Lock *tl = Lock::find(t, u);
245 if (!tl)
246 {
247 tl = create_new_thread_lock(t, u);
248 if (!tl)
249 {
250 add_atomic(&t->lock, -1);
251 return -L4_ENOMEM;
252 }
253
254 tl->enqueue(t);
255
256 add_atomic(&t->lock, -1);
257 continue;
258 }
259
260 add_atomic(&t->lock, -1);
261 asm volatile("" : : : "memory");
262 add_atomic(&t->block_cnt, 1);
263 asm volatile("" : : : "memory");
264
265 tl->block();
266
267 asm volatile("" : : : "memory");
268 add_atomic(&t->block_cnt, -1);
269 }
270
271 return 0;
272 }
273
l4ullulock_unlock(l4ullulock_t * t,l4_utcb_t * u)274 int l4ullulock_unlock(l4ullulock_t *t, l4_utcb_t *u)
275 {
276 add_atomic(&t->lock, -1);
277 asm volatile("" : : : "memory");
278 if (t->block_cnt > 0)
279 {
280 Lock::shift(t);
281 Lock::wakeup_others(t, u);
282 }
283
284 return 0;
285 }
286