1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* $NetBSD: queue.h,v 1.49.6.1 2008/11/20 03:22:38 snj Exp $ */ 3 4 /* 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)queue.h 8.5 (Berkeley) 8/20/94 33 */ 34 35 #ifndef _SYS_QUEUE_H_ 36 #define _SYS_QUEUE_H_ 37 38 /*#include <sys/null.h> */ 39 40 /* 41 * This file defines five types of data structures: singly-linked lists, 42 * lists, simple queues, tail queues, and circular queues. 43 * 44 * A singly-linked list is headed by a single forward pointer. The 45 * elements are singly linked for minimum space and pointer manipulation 46 * overhead at the expense of O(n) removal for arbitrary elements. New 47 * elements can be added to the list after an existing element or at the 48 * head of the list. Elements being removed from the head of the list 49 * should use the explicit macro for this purpose for optimum 50 * efficiency. A singly-linked list may only be traversed in the forward 51 * direction. Singly-linked lists are ideal for applications with large 52 * datasets and few or no removals or for implementing a LIFO queue. 53 * 54 * A list is headed by a single forward pointer (or an array of forward 55 * pointers for a hash table header). The elements are doubly linked 56 * so that an arbitrary element can be removed without a need to 57 * traverse the list. New elements can be added to the list before 58 * or after an existing element or at the head of the list. A list 59 * may only be traversed in the forward direction. 60 * 61 * A simple queue is headed by a pair of pointers, one the head of the 62 * list and the other to the tail of the list. The elements are singly 63 * linked to save space, so elements can only be removed from the 64 * head of the list. New elements can be added to the list after 65 * an existing element, at the head of the list, or at the end of the 66 * list. A simple queue may only be traversed in the forward direction. 67 * 68 * A tail queue is headed by a pair of pointers, one to the head of the 69 * list and the other to the tail of the list. The elements are doubly 70 * linked so that an arbitrary element can be removed without a need to 71 * traverse the list. New elements can be added to the list before or 72 * after an existing element, at the head of the list, or at the end of 73 * the list. A tail queue may be traversed in either direction. 74 * 75 * A circle queue is headed by a pair of pointers, one to the head of the 76 * list and the other to the tail of the list. The elements are doubly 77 * linked so that an arbitrary element can be removed without a need to 78 * traverse the list. New elements can be added to the list before or after 79 * an existing element, at the head of the list, or at the end of the list. 80 * A circle queue may be traversed in either direction, but has a more 81 * complex end of list detection. 82 * 83 * For details on the use of these macros, see the queue(3) manual page. 84 */ 85 86 /* 87 * List definitions. 88 */ 89 #define LIST_HEAD(name, type) \ 90 struct name { \ 91 struct type *lh_first; /* first element */ \ 92 } 93 94 #define LIST_HEAD_INITIALIZER(head) \ 95 { NULL } 96 97 #define LIST_ENTRY(type) \ 98 struct { \ 99 struct type *le_next; /* next element */ \ 100 struct type **le_prev; /* address of previous next element */ \ 101 } 102 103 /* 104 * List functions. 105 */ 106 #if defined(_KERNEL) && defined(QUEUEDEBUG) 107 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \ 108 if ((head)->lh_first && \ 109 (head)->lh_first->field.le_prev != &(head)->lh_first) \ 110 panic("LIST_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__); 111 #define QUEUEDEBUG_LIST_OP(elm, field) \ 112 if ((elm)->field.le_next && \ 113 (elm)->field.le_next->field.le_prev != \ 114 &(elm)->field.le_next) \ 115 panic("LIST_* forw %p %s:%d", (elm), __FILE__, __LINE__);\ 116 if (*(elm)->field.le_prev != (elm)) \ 117 panic("LIST_* back %p %s:%d", (elm), __FILE__, __LINE__); 118 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \ 119 (elm)->field.le_next = (void *)1L; \ 120 (elm)->field.le_prev = (void *)1L; 121 #else 122 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) 123 #define QUEUEDEBUG_LIST_OP(elm, field) 124 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) 125 #endif 126 127 #define LIST_INIT(head) do { \ 128 (head)->lh_first = NULL; \ 129 } while (/* CONSTCOND */0) 130 131 #define LIST_INSERT_AFTER(listelm, elm, field) do { \ 132 QUEUEDEBUG_LIST_OP((listelm), field) \ 133 if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ 134 (listelm)->field.le_next->field.le_prev = \ 135 &(elm)->field.le_next; \ 136 (listelm)->field.le_next = (elm); \ 137 (elm)->field.le_prev = &(listelm)->field.le_next; \ 138 } while (/* CONSTCOND */0) 139 140 #define LIST_INSERT_BEFORE(listelm, elm, field) do { \ 141 QUEUEDEBUG_LIST_OP((listelm), field) \ 142 (elm)->field.le_prev = (listelm)->field.le_prev; \ 143 (elm)->field.le_next = (listelm); \ 144 *(listelm)->field.le_prev = (elm); \ 145 (listelm)->field.le_prev = &(elm)->field.le_next; \ 146 } while (/* CONSTCOND */0) 147 148 #define LIST_INSERT_HEAD(head, elm, field) do { \ 149 QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \ 150 if (((elm)->field.le_next = (head)->lh_first) != NULL) \ 151 (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ 152 (head)->lh_first = (elm); \ 153 (elm)->field.le_prev = &(head)->lh_first; \ 154 } while (/* CONSTCOND */0) 155 156 #define LIST_REMOVE(elm, field) do { \ 157 QUEUEDEBUG_LIST_OP((elm), field) \ 158 if ((elm)->field.le_next != NULL) \ 159 (elm)->field.le_next->field.le_prev = \ 160 (elm)->field.le_prev; \ 161 *(elm)->field.le_prev = (elm)->field.le_next; \ 162 QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \ 163 } while (/* CONSTCOND */0) 164 165 #define LIST_FOREACH(var, head, field) \ 166 for ((var) = ((head)->lh_first); \ 167 (var); \ 168 (var) = ((var)->field.le_next)) 169 170 /* 171 * List access methods. 172 */ 173 #define LIST_EMPTY(head) ((head)->lh_first == NULL) 174 #define LIST_FIRST(head) ((head)->lh_first) 175 #define LIST_NEXT(elm, field) ((elm)->field.le_next) 176 177 #define LIST_FOREACH_SAFE(var, head, field, tvar) \ 178 for ((var) = LIST_FIRST((head)); \ 179 (var) && ((tvar) = LIST_NEXT((var), field), 1); \ 180 (var) = (tvar)) 181 182 /* 183 * Singly-linked List definitions. 184 */ 185 #define SLIST_HEAD(name, type) \ 186 struct name { \ 187 struct type *slh_first; /* first element */ \ 188 } 189 190 #define SLIST_HEAD_INITIALIZER(head) \ 191 { NULL } 192 193 #define SLIST_ENTRY(type) \ 194 struct { \ 195 struct type *sle_next; /* next element */ \ 196 } 197 198 /* 199 * Singly-linked List functions. 200 */ 201 #define SLIST_INIT(head) do { \ 202 (head)->slh_first = NULL; \ 203 } while (/* CONSTCOND */0) 204 205 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ 206 (elm)->field.sle_next = (slistelm)->field.sle_next; \ 207 (slistelm)->field.sle_next = (elm); \ 208 } while (/* CONSTCOND */0) 209 210 #define SLIST_INSERT_HEAD(head, elm, field) do { \ 211 (elm)->field.sle_next = (head)->slh_first; \ 212 (head)->slh_first = (elm); \ 213 } while (/* CONSTCOND */0) 214 215 #define SLIST_REMOVE_HEAD(head, field) do { \ 216 (head)->slh_first = (head)->slh_first->field.sle_next; \ 217 } while (/* CONSTCOND */0) 218 219 #define SLIST_REMOVE(head, elm, type, field) do { \ 220 if ((head)->slh_first == (elm)) { \ 221 SLIST_REMOVE_HEAD((head), field); \ 222 } \ 223 else { \ 224 struct type *curelm = (head)->slh_first; \ 225 while(curelm->field.sle_next != (elm)) \ 226 curelm = curelm->field.sle_next; \ 227 curelm->field.sle_next = \ 228 curelm->field.sle_next->field.sle_next; \ 229 } \ 230 } while (/* CONSTCOND */0) 231 232 #define SLIST_REMOVE_AFTER(slistelm, field) do { \ 233 (slistelm)->field.sle_next = \ 234 SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \ 235 } while (/* CONSTCOND */0) 236 237 #define SLIST_FOREACH(var, head, field) \ 238 for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next) 239 240 #define SLIST_FOREACH_SAFE(var, head, field, tvar) \ 241 for ((var) = SLIST_FIRST((head)); \ 242 (var) && ((tvar) = SLIST_NEXT((var), field), 1); \ 243 (var) = (tvar)) 244 245 /* 246 * Singly-linked List access methods. 247 */ 248 #define SLIST_EMPTY(head) ((head)->slh_first == NULL) 249 #define SLIST_FIRST(head) ((head)->slh_first) 250 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next) 251 252 /* 253 * Singly-linked Tail queue declarations. 254 */ 255 #define STAILQ_HEAD(name, type) \ 256 struct name { \ 257 struct type *stqh_first; /* first element */ \ 258 struct type **stqh_last; /* addr of last next element */ \ 259 } 260 261 #define STAILQ_HEAD_INITIALIZER(head) \ 262 { NULL, &(head).stqh_first } 263 264 #define STAILQ_ENTRY(type) \ 265 struct { \ 266 struct type *stqe_next; /* next element */ \ 267 } 268 269 /* 270 * Singly-linked Tail queue functions. 271 */ 272 #define STAILQ_INIT(head) do { \ 273 (head)->stqh_first = NULL; \ 274 (head)->stqh_last = &(head)->stqh_first; \ 275 } while (/* CONSTCOND */0) 276 277 #define STAILQ_INSERT_HEAD(head, elm, field) do { \ 278 if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \ 279 (head)->stqh_last = &(elm)->field.stqe_next; \ 280 (head)->stqh_first = (elm); \ 281 } while (/* CONSTCOND */0) 282 283 #define STAILQ_INSERT_TAIL(head, elm, field) do { \ 284 (elm)->field.stqe_next = NULL; \ 285 *(head)->stqh_last = (elm); \ 286 (head)->stqh_last = &(elm)->field.stqe_next; \ 287 } while (/* CONSTCOND */0) 288 289 #define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ 290 if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\ 291 (head)->stqh_last = &(elm)->field.stqe_next; \ 292 (listelm)->field.stqe_next = (elm); \ 293 } while (/* CONSTCOND */0) 294 295 #define STAILQ_REMOVE_HEAD(head, field) do { \ 296 if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \ 297 (head)->stqh_last = &(head)->stqh_first; \ 298 } while (/* CONSTCOND */0) 299 300 #define STAILQ_REMOVE(head, elm, type, field) do { \ 301 if ((head)->stqh_first == (elm)) { \ 302 STAILQ_REMOVE_HEAD((head), field); \ 303 } else { \ 304 struct type *curelm = (head)->stqh_first; \ 305 while (curelm->field.stqe_next != (elm)) \ 306 curelm = curelm->field.stqe_next; \ 307 if ((curelm->field.stqe_next = \ 308 curelm->field.stqe_next->field.stqe_next) == NULL) \ 309 (head)->stqh_last = &(curelm)->field.stqe_next; \ 310 } \ 311 } while (/* CONSTCOND */0) 312 313 #define STAILQ_REMOVE_AFTER(head, elm, field) do { \ 314 if ((STAILQ_NEXT(elm, field) = \ 315 STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \ 316 (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 317 } while (0) 318 319 #define STAILQ_FOREACH(var, head, field) \ 320 for ((var) = ((head)->stqh_first); \ 321 (var); \ 322 (var) = ((var)->field.stqe_next)) 323 324 #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ 325 for ((var) = STAILQ_FIRST((head)); \ 326 (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ 327 (var) = (tvar)) 328 329 #define STAILQ_CONCAT(head1, head2) do { \ 330 if (!STAILQ_EMPTY((head2))) { \ 331 *(head1)->stqh_last = (head2)->stqh_first; \ 332 (head1)->stqh_last = (head2)->stqh_last; \ 333 STAILQ_INIT((head2)); \ 334 } \ 335 } while (/* CONSTCOND */0) 336 337 /* 338 * Singly-linked Tail queue access methods. 339 */ 340 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) 341 #define STAILQ_FIRST(head) ((head)->stqh_first) 342 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) 343 344 /* 345 * Simple queue definitions. 346 */ 347 #define SIMPLEQ_HEAD(name, type) \ 348 struct name { \ 349 struct type *sqh_first; /* first element */ \ 350 struct type **sqh_last; /* addr of last next element */ \ 351 } 352 353 #define SIMPLEQ_HEAD_INITIALIZER(head) \ 354 { NULL, &(head).sqh_first } 355 356 #define SIMPLEQ_ENTRY(type) \ 357 struct { \ 358 struct type *sqe_next; /* next element */ \ 359 } 360 361 /* 362 * Simple queue functions. 363 */ 364 #define SIMPLEQ_INIT(head) do { \ 365 (head)->sqh_first = NULL; \ 366 (head)->sqh_last = &(head)->sqh_first; \ 367 } while (/* CONSTCOND */0) 368 369 #define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ 370 if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ 371 (head)->sqh_last = &(elm)->field.sqe_next; \ 372 (head)->sqh_first = (elm); \ 373 } while (/* CONSTCOND */0) 374 375 #define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ 376 (elm)->field.sqe_next = NULL; \ 377 *(head)->sqh_last = (elm); \ 378 (head)->sqh_last = &(elm)->field.sqe_next; \ 379 } while (/* CONSTCOND */0) 380 381 #define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ 382 if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\ 383 (head)->sqh_last = &(elm)->field.sqe_next; \ 384 (listelm)->field.sqe_next = (elm); \ 385 } while (/* CONSTCOND */0) 386 387 #define SIMPLEQ_REMOVE_HEAD(head, field) do { \ 388 if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ 389 (head)->sqh_last = &(head)->sqh_first; \ 390 } while (/* CONSTCOND */0) 391 392 #define SIMPLEQ_REMOVE(head, elm, type, field) do { \ 393 if ((head)->sqh_first == (elm)) { \ 394 SIMPLEQ_REMOVE_HEAD((head), field); \ 395 } else { \ 396 struct type *curelm = (head)->sqh_first; \ 397 while (curelm->field.sqe_next != (elm)) \ 398 curelm = curelm->field.sqe_next; \ 399 if ((curelm->field.sqe_next = \ 400 curelm->field.sqe_next->field.sqe_next) == NULL) \ 401 (head)->sqh_last = &(curelm)->field.sqe_next; \ 402 } \ 403 } while (/* CONSTCOND */0) 404 405 #define SIMPLEQ_FOREACH(var, head, field) \ 406 for ((var) = ((head)->sqh_first); \ 407 (var); \ 408 (var) = ((var)->field.sqe_next)) 409 410 /* 411 * Simple queue access methods. 412 */ 413 #define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) 414 #define SIMPLEQ_FIRST(head) ((head)->sqh_first) 415 #define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) 416 417 /* 418 * Tail queue definitions. 419 */ 420 #define _TAILQ_HEAD(name, type, qual) \ 421 struct name { \ 422 qual type *tqh_first; /* first element */ \ 423 qual type *qual *tqh_last; /* addr of last next element */ \ 424 } 425 #define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,) 426 427 #define TAILQ_HEAD_INITIALIZER(head) \ 428 { NULL, &(head).tqh_first } 429 430 #define _TAILQ_ENTRY(type, qual) \ 431 struct { \ 432 qual type *tqe_next; /* next element */ \ 433 qual type *qual *tqe_prev; /* address of previous next element */\ 434 } 435 #define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,) 436 437 /* 438 * Tail queue functions. 439 */ 440 #if defined(_KERNEL) && defined(QUEUEDEBUG) 441 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \ 442 if ((head)->tqh_first && \ 443 (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \ 444 panic("TAILQ_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__); 445 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \ 446 if (*(head)->tqh_last != NULL) \ 447 panic("TAILQ_INSERT_TAIL %p %s:%d", (head), __FILE__, __LINE__); 448 #define QUEUEDEBUG_TAILQ_OP(elm, field) \ 449 if ((elm)->field.tqe_next && \ 450 (elm)->field.tqe_next->field.tqe_prev != \ 451 &(elm)->field.tqe_next) \ 452 panic("TAILQ_* forw %p %s:%d", (elm), __FILE__, __LINE__);\ 453 if (*(elm)->field.tqe_prev != (elm)) \ 454 panic("TAILQ_* back %p %s:%d", (elm), __FILE__, __LINE__); 455 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \ 456 if ((elm)->field.tqe_next == NULL && \ 457 (head)->tqh_last != &(elm)->field.tqe_next) \ 458 panic("TAILQ_PREREMOVE head %p elm %p %s:%d", \ 459 (head), (elm), __FILE__, __LINE__); 460 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \ 461 (elm)->field.tqe_next = (void *)1L; \ 462 (elm)->field.tqe_prev = (void *)1L; 463 #else 464 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) 465 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) 466 #define QUEUEDEBUG_TAILQ_OP(elm, field) 467 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) 468 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) 469 #endif 470 471 #define TAILQ_INIT(head) do { \ 472 (head)->tqh_first = NULL; \ 473 (head)->tqh_last = &(head)->tqh_first; \ 474 } while (/* CONSTCOND */0) 475 476 #define TAILQ_INSERT_HEAD(head, elm, field) do { \ 477 QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \ 478 if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ 479 (head)->tqh_first->field.tqe_prev = \ 480 &(elm)->field.tqe_next; \ 481 else \ 482 (head)->tqh_last = &(elm)->field.tqe_next; \ 483 (head)->tqh_first = (elm); \ 484 (elm)->field.tqe_prev = &(head)->tqh_first; \ 485 } while (/* CONSTCOND */0) 486 487 #define TAILQ_INSERT_TAIL(head, elm, field) do { \ 488 QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \ 489 (elm)->field.tqe_next = NULL; \ 490 (elm)->field.tqe_prev = (head)->tqh_last; \ 491 *(head)->tqh_last = (elm); \ 492 (head)->tqh_last = &(elm)->field.tqe_next; \ 493 } while (/* CONSTCOND */0) 494 495 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ 496 QUEUEDEBUG_TAILQ_OP((listelm), field) \ 497 if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ 498 (elm)->field.tqe_next->field.tqe_prev = \ 499 &(elm)->field.tqe_next; \ 500 else \ 501 (head)->tqh_last = &(elm)->field.tqe_next; \ 502 (listelm)->field.tqe_next = (elm); \ 503 (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ 504 } while (/* CONSTCOND */0) 505 506 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ 507 QUEUEDEBUG_TAILQ_OP((listelm), field) \ 508 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ 509 (elm)->field.tqe_next = (listelm); \ 510 *(listelm)->field.tqe_prev = (elm); \ 511 (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ 512 } while (/* CONSTCOND */0) 513 514 #define TAILQ_REMOVE(head, elm, field) do { \ 515 QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \ 516 QUEUEDEBUG_TAILQ_OP((elm), field) \ 517 if (((elm)->field.tqe_next) != NULL) \ 518 (elm)->field.tqe_next->field.tqe_prev = \ 519 (elm)->field.tqe_prev; \ 520 else \ 521 (head)->tqh_last = (elm)->field.tqe_prev; \ 522 *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ 523 QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \ 524 } while (/* CONSTCOND */0) 525 526 #define TAILQ_FOREACH(var, head, field) \ 527 for ((var) = ((head)->tqh_first); \ 528 (var); \ 529 (var) = ((var)->field.tqe_next)) 530 531 #define TAILQ_FOREACH_SAFE(var, head, field, next) \ 532 for ((var) = ((head)->tqh_first); \ 533 (var) != NULL && ((next) = TAILQ_NEXT(var, field), 1); \ 534 (var) = (next)) 535 536 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ 537 for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ 538 (var); \ 539 (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) 540 541 #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \ 542 for ((var) = TAILQ_LAST((head), headname); \ 543 (var) && ((prev) = TAILQ_PREV((var), headname, field), 1);\ 544 (var) = (prev)) 545 546 #define TAILQ_CONCAT(head1, head2, field) do { \ 547 if (!TAILQ_EMPTY(head2)) { \ 548 *(head1)->tqh_last = (head2)->tqh_first; \ 549 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ 550 (head1)->tqh_last = (head2)->tqh_last; \ 551 TAILQ_INIT((head2)); \ 552 } \ 553 } while (/* CONSTCOND */0) 554 555 /* 556 * Tail queue access methods. 557 */ 558 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) 559 #define TAILQ_FIRST(head) ((head)->tqh_first) 560 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) 561 562 #define TAILQ_LAST(head, headname) \ 563 (*(((struct headname *)((head)->tqh_last))->tqh_last)) 564 #define TAILQ_PREV(elm, headname, field) \ 565 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) 566 567 /* 568 * Circular queue definitions. 569 */ 570 #if defined(_KERNEL) && defined(QUEUEDEBUG) 571 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \ 572 if ((head)->cqh_first != (void *)(head) && \ 573 (head)->cqh_first->field.cqe_prev != (void *)(head)) \ 574 panic("CIRCLEQ head forw %p %s:%d", (head), \ 575 __FILE__, __LINE__); \ 576 if ((head)->cqh_last != (void *)(head) && \ 577 (head)->cqh_last->field.cqe_next != (void *)(head)) \ 578 panic("CIRCLEQ head back %p %s:%d", (head), \ 579 __FILE__, __LINE__); 580 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \ 581 if ((elm)->field.cqe_next == (void *)(head)) { \ 582 if ((head)->cqh_last != (elm)) \ 583 panic("CIRCLEQ elm last %p %s:%d", (elm), \ 584 __FILE__, __LINE__); \ 585 } else { \ 586 if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \ 587 panic("CIRCLEQ elm forw %p %s:%d", (elm), \ 588 __FILE__, __LINE__); \ 589 } \ 590 if ((elm)->field.cqe_prev == (void *)(head)) { \ 591 if ((head)->cqh_first != (elm)) \ 592 panic("CIRCLEQ elm first %p %s:%d", (elm), \ 593 __FILE__, __LINE__); \ 594 } else { \ 595 if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \ 596 panic("CIRCLEQ elm prev %p %s:%d", (elm), \ 597 __FILE__, __LINE__); \ 598 } 599 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \ 600 (elm)->field.cqe_next = (void *)1L; \ 601 (elm)->field.cqe_prev = (void *)1L; 602 #else 603 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) 604 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) 605 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) 606 #endif 607 608 #define CIRCLEQ_HEAD(name, type) \ 609 struct name { \ 610 struct type *cqh_first; /* first element */ \ 611 struct type *cqh_last; /* last element */ \ 612 } 613 614 #define CIRCLEQ_HEAD_INITIALIZER(head) \ 615 { (void *)&head, (void *)&head } 616 617 #define CIRCLEQ_ENTRY(type) \ 618 struct { \ 619 struct type *cqe_next; /* next element */ \ 620 struct type *cqe_prev; /* previous element */ \ 621 } 622 623 /* 624 * Circular queue functions. 625 */ 626 #define CIRCLEQ_INIT(head) do { \ 627 (head)->cqh_first = (void *)(head); \ 628 (head)->cqh_last = (void *)(head); \ 629 } while (/* CONSTCOND */0) 630 631 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ 632 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ 633 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \ 634 (elm)->field.cqe_next = (listelm)->field.cqe_next; \ 635 (elm)->field.cqe_prev = (listelm); \ 636 if ((listelm)->field.cqe_next == (void *)(head)) \ 637 (head)->cqh_last = (elm); \ 638 else \ 639 (listelm)->field.cqe_next->field.cqe_prev = (elm); \ 640 (listelm)->field.cqe_next = (elm); \ 641 } while (/* CONSTCOND */0) 642 643 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ 644 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ 645 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \ 646 (elm)->field.cqe_next = (listelm); \ 647 (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ 648 if ((listelm)->field.cqe_prev == (void *)(head)) \ 649 (head)->cqh_first = (elm); \ 650 else \ 651 (listelm)->field.cqe_prev->field.cqe_next = (elm); \ 652 (listelm)->field.cqe_prev = (elm); \ 653 } while (/* CONSTCOND */0) 654 655 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ 656 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ 657 (elm)->field.cqe_next = (head)->cqh_first; \ 658 (elm)->field.cqe_prev = (void *)(head); \ 659 if ((head)->cqh_last == (void *)(head)) \ 660 (head)->cqh_last = (elm); \ 661 else \ 662 (head)->cqh_first->field.cqe_prev = (elm); \ 663 (head)->cqh_first = (elm); \ 664 } while (/* CONSTCOND */0) 665 666 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ 667 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ 668 (elm)->field.cqe_next = (void *)(head); \ 669 (elm)->field.cqe_prev = (head)->cqh_last; \ 670 if ((head)->cqh_first == (void *)(head)) \ 671 (head)->cqh_first = (elm); \ 672 else \ 673 (head)->cqh_last->field.cqe_next = (elm); \ 674 (head)->cqh_last = (elm); \ 675 } while (/* CONSTCOND */0) 676 677 #define CIRCLEQ_REMOVE(head, elm, field) do { \ 678 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ 679 QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \ 680 if ((elm)->field.cqe_next == (void *)(head)) \ 681 (head)->cqh_last = (elm)->field.cqe_prev; \ 682 else \ 683 (elm)->field.cqe_next->field.cqe_prev = \ 684 (elm)->field.cqe_prev; \ 685 if ((elm)->field.cqe_prev == (void *)(head)) \ 686 (head)->cqh_first = (elm)->field.cqe_next; \ 687 else \ 688 (elm)->field.cqe_prev->field.cqe_next = \ 689 (elm)->field.cqe_next; \ 690 QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \ 691 } while (/* CONSTCOND */0) 692 693 #define CIRCLEQ_FOREACH(var, head, field) \ 694 for ((var) = ((head)->cqh_first); \ 695 (var) != (const void *)(head); \ 696 (var) = ((var)->field.cqe_next)) 697 698 #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ 699 for ((var) = ((head)->cqh_last); \ 700 (var) != (const void *)(head); \ 701 (var) = ((var)->field.cqe_prev)) 702 703 /* 704 * Circular queue access methods. 705 */ 706 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head)) 707 #define CIRCLEQ_FIRST(head) ((head)->cqh_first) 708 #define CIRCLEQ_LAST(head) ((head)->cqh_last) 709 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next) 710 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev) 711 712 #define CIRCLEQ_LOOP_NEXT(head, elm, field) \ 713 (((elm)->field.cqe_next == (void *)(head)) \ 714 ? ((head)->cqh_first) \ 715 : (elm->field.cqe_next)) 716 #define CIRCLEQ_LOOP_PREV(head, elm, field) \ 717 (((elm)->field.cqe_prev == (void *)(head)) \ 718 ? ((head)->cqh_last) \ 719 : (elm->field.cqe_prev)) 720 721 #endif /* !_SYS_QUEUE_H_ */ 722