1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* $NetBSD: queue.h,v 1.49.6.1 2008/11/20 03:22:38 snj Exp $ */ 3 4 /* 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)queue.h 8.5 (Berkeley) 8/20/94 33 */ 34 35 #ifndef _SYS_QUEUE_H_ 36 #define _SYS_QUEUE_H_ 37 38 /*#include <sys/null.h> */ 39 40 /* 41 * This file defines five types of data structures: singly-linked lists, 42 * lists, simple queues, tail queues, and circular queues. 43 * 44 * A singly-linked list is headed by a single forward pointer. The 45 * elements are singly linked for minimum space and pointer manipulation 46 * overhead at the expense of O(n) removal for arbitrary elements. New 47 * elements can be added to the list after an existing element or at the 48 * head of the list. Elements being removed from the head of the list 49 * should use the explicit macro for this purpose for optimum 50 * efficiency. A singly-linked list may only be traversed in the forward 51 * direction. Singly-linked lists are ideal for applications with large 52 * datasets and few or no removals or for implementing a LIFO queue. 53 * 54 * A list is headed by a single forward pointer (or an array of forward 55 * pointers for a hash table header). The elements are doubly linked 56 * so that an arbitrary element can be removed without a need to 57 * traverse the list. New elements can be added to the list before 58 * or after an existing element or at the head of the list. A list 59 * may only be traversed in the forward direction. 60 * 61 * A simple queue is headed by a pair of pointers, one the head of the 62 * list and the other to the tail of the list. The elements are singly 63 * linked to save space, so elements can only be removed from the 64 * head of the list. New elements can be added to the list after 65 * an existing element, at the head of the list, or at the end of the 66 * list. A simple queue may only be traversed in the forward direction. 67 * 68 * A tail queue is headed by a pair of pointers, one to the head of the 69 * list and the other to the tail of the list. The elements are doubly 70 * linked so that an arbitrary element can be removed without a need to 71 * traverse the list. New elements can be added to the list before or 72 * after an existing element, at the head of the list, or at the end of 73 * the list. A tail queue may be traversed in either direction. 74 * 75 * A circle queue is headed by a pair of pointers, one to the head of the 76 * list and the other to the tail of the list. The elements are doubly 77 * linked so that an arbitrary element can be removed without a need to 78 * traverse the list. New elements can be added to the list before or after 79 * an existing element, at the head of the list, or at the end of the list. 80 * A circle queue may be traversed in either direction, but has a more 81 * complex end of list detection. 82 * 83 * For details on the use of these macros, see the queue(3) manual page. 84 */ 85 86 /* 87 * List definitions. 88 */ 89 #define LIST_HEAD(name, type) \ 90 struct name { \ 91 struct type *lh_first; /* first element */ \ 92 } 93 94 #define LIST_HEAD_INITIALIZER(head) \ 95 { NULL } 96 97 #define LIST_ENTRY(type) \ 98 struct { \ 99 struct type *le_next; /* next element */ \ 100 struct type **le_prev; /* address of previous next element */ \ 101 } 102 103 /* 104 * List functions. 105 */ 106 #if defined(_KERNEL) && defined(QUEUEDEBUG) 107 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \ 108 if ((head)->lh_first && \ 109 (head)->lh_first->field.le_prev != &(head)->lh_first) \ 110 panic("LIST_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__); 111 #define QUEUEDEBUG_LIST_OP(elm, field) \ 112 if ((elm)->field.le_next && \ 113 (elm)->field.le_next->field.le_prev != \ 114 &(elm)->field.le_next) \ 115 panic("LIST_* forw %p %s:%d", (elm), __FILE__, __LINE__);\ 116 if (*(elm)->field.le_prev != (elm)) \ 117 panic("LIST_* back %p %s:%d", (elm), __FILE__, __LINE__); 118 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \ 119 (elm)->field.le_next = (void *)1L; \ 120 (elm)->field.le_prev = (void *)1L; 121 #else 122 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) 123 #define QUEUEDEBUG_LIST_OP(elm, field) 124 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) 125 #endif 126 127 #define LIST_INIT(head) do { \ 128 (head)->lh_first = NULL; \ 129 } while (/* CONSTCOND */0) 130 131 #define LIST_INSERT_AFTER(listelm, elm, field) do { \ 132 QUEUEDEBUG_LIST_OP((listelm), field) \ 133 if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ 134 (listelm)->field.le_next->field.le_prev = \ 135 &(elm)->field.le_next; \ 136 (listelm)->field.le_next = (elm); \ 137 (elm)->field.le_prev = &(listelm)->field.le_next; \ 138 } while (/* CONSTCOND */0) 139 140 #define LIST_INSERT_BEFORE(listelm, elm, field) do { \ 141 QUEUEDEBUG_LIST_OP((listelm), field) \ 142 (elm)->field.le_prev = (listelm)->field.le_prev; \ 143 (elm)->field.le_next = (listelm); \ 144 *(listelm)->field.le_prev = (elm); \ 145 (listelm)->field.le_prev = &(elm)->field.le_next; \ 146 } while (/* CONSTCOND */0) 147 148 #define LIST_INSERT_HEAD(head, elm, field) do { \ 149 QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \ 150 if (((elm)->field.le_next = (head)->lh_first) != NULL) \ 151 (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ 152 (head)->lh_first = (elm); \ 153 (elm)->field.le_prev = &(head)->lh_first; \ 154 } while (/* CONSTCOND */0) 155 156 #define LIST_REMOVE(elm, field) do { \ 157 QUEUEDEBUG_LIST_OP((elm), field) \ 158 if ((elm)->field.le_next != NULL) \ 159 (elm)->field.le_next->field.le_prev = \ 160 (elm)->field.le_prev; \ 161 *(elm)->field.le_prev = (elm)->field.le_next; \ 162 QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \ 163 } while (/* CONSTCOND */0) 164 165 #define LIST_FOREACH(var, head, field) \ 166 for ((var) = ((head)->lh_first); \ 167 (var); \ 168 (var) = ((var)->field.le_next)) 169 170 /* 171 * List access methods. 172 */ 173 #define LIST_EMPTY(head) ((head)->lh_first == NULL) 174 #define LIST_FIRST(head) ((head)->lh_first) 175 #define LIST_NEXT(elm, field) ((elm)->field.le_next) 176 177 /* 178 * Singly-linked List definitions. 179 */ 180 #define SLIST_HEAD(name, type) \ 181 struct name { \ 182 struct type *slh_first; /* first element */ \ 183 } 184 185 #define SLIST_HEAD_INITIALIZER(head) \ 186 { NULL } 187 188 #define SLIST_ENTRY(type) \ 189 struct { \ 190 struct type *sle_next; /* next element */ \ 191 } 192 193 /* 194 * Singly-linked List functions. 195 */ 196 #define SLIST_INIT(head) do { \ 197 (head)->slh_first = NULL; \ 198 } while (/* CONSTCOND */0) 199 200 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ 201 (elm)->field.sle_next = (slistelm)->field.sle_next; \ 202 (slistelm)->field.sle_next = (elm); \ 203 } while (/* CONSTCOND */0) 204 205 #define SLIST_INSERT_HEAD(head, elm, field) do { \ 206 (elm)->field.sle_next = (head)->slh_first; \ 207 (head)->slh_first = (elm); \ 208 } while (/* CONSTCOND */0) 209 210 #define SLIST_REMOVE_HEAD(head, field) do { \ 211 (head)->slh_first = (head)->slh_first->field.sle_next; \ 212 } while (/* CONSTCOND */0) 213 214 #define SLIST_REMOVE(head, elm, type, field) do { \ 215 if ((head)->slh_first == (elm)) { \ 216 SLIST_REMOVE_HEAD((head), field); \ 217 } \ 218 else { \ 219 struct type *curelm = (head)->slh_first; \ 220 while(curelm->field.sle_next != (elm)) \ 221 curelm = curelm->field.sle_next; \ 222 curelm->field.sle_next = \ 223 curelm->field.sle_next->field.sle_next; \ 224 } \ 225 } while (/* CONSTCOND */0) 226 227 #define SLIST_REMOVE_AFTER(slistelm, field) do { \ 228 (slistelm)->field.sle_next = \ 229 SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \ 230 } while (/* CONSTCOND */0) 231 232 #define SLIST_FOREACH(var, head, field) \ 233 for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next) 234 235 /* 236 * Singly-linked List access methods. 237 */ 238 #define SLIST_EMPTY(head) ((head)->slh_first == NULL) 239 #define SLIST_FIRST(head) ((head)->slh_first) 240 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next) 241 242 /* 243 * Singly-linked Tail queue declarations. 244 */ 245 #define STAILQ_HEAD(name, type) \ 246 struct name { \ 247 struct type *stqh_first; /* first element */ \ 248 struct type **stqh_last; /* addr of last next element */ \ 249 } 250 251 #define STAILQ_HEAD_INITIALIZER(head) \ 252 { NULL, &(head).stqh_first } 253 254 #define STAILQ_ENTRY(type) \ 255 struct { \ 256 struct type *stqe_next; /* next element */ \ 257 } 258 259 /* 260 * Singly-linked Tail queue functions. 261 */ 262 #define STAILQ_INIT(head) do { \ 263 (head)->stqh_first = NULL; \ 264 (head)->stqh_last = &(head)->stqh_first; \ 265 } while (/* CONSTCOND */0) 266 267 #define STAILQ_INSERT_HEAD(head, elm, field) do { \ 268 if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \ 269 (head)->stqh_last = &(elm)->field.stqe_next; \ 270 (head)->stqh_first = (elm); \ 271 } while (/* CONSTCOND */0) 272 273 #define STAILQ_INSERT_TAIL(head, elm, field) do { \ 274 (elm)->field.stqe_next = NULL; \ 275 *(head)->stqh_last = (elm); \ 276 (head)->stqh_last = &(elm)->field.stqe_next; \ 277 } while (/* CONSTCOND */0) 278 279 #define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ 280 if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\ 281 (head)->stqh_last = &(elm)->field.stqe_next; \ 282 (listelm)->field.stqe_next = (elm); \ 283 } while (/* CONSTCOND */0) 284 285 #define STAILQ_REMOVE_HEAD(head, field) do { \ 286 if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \ 287 (head)->stqh_last = &(head)->stqh_first; \ 288 } while (/* CONSTCOND */0) 289 290 #define STAILQ_REMOVE(head, elm, type, field) do { \ 291 if ((head)->stqh_first == (elm)) { \ 292 STAILQ_REMOVE_HEAD((head), field); \ 293 } else { \ 294 struct type *curelm = (head)->stqh_first; \ 295 while (curelm->field.stqe_next != (elm)) \ 296 curelm = curelm->field.stqe_next; \ 297 if ((curelm->field.stqe_next = \ 298 curelm->field.stqe_next->field.stqe_next) == NULL) \ 299 (head)->stqh_last = &(curelm)->field.stqe_next; \ 300 } \ 301 } while (/* CONSTCOND */0) 302 303 #define STAILQ_REMOVE_AFTER(head, elm, field) do { \ 304 if ((STAILQ_NEXT(elm, field) = \ 305 STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \ 306 (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 307 } while (0) 308 309 #define STAILQ_FOREACH(var, head, field) \ 310 for ((var) = ((head)->stqh_first); \ 311 (var); \ 312 (var) = ((var)->field.stqe_next)) 313 314 #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ 315 for ((var) = STAILQ_FIRST((head)); \ 316 (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ 317 (var) = (tvar)) 318 319 #define STAILQ_CONCAT(head1, head2) do { \ 320 if (!STAILQ_EMPTY((head2))) { \ 321 *(head1)->stqh_last = (head2)->stqh_first; \ 322 (head1)->stqh_last = (head2)->stqh_last; \ 323 STAILQ_INIT((head2)); \ 324 } \ 325 } while (/* CONSTCOND */0) 326 327 /* 328 * Singly-linked Tail queue access methods. 329 */ 330 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) 331 #define STAILQ_FIRST(head) ((head)->stqh_first) 332 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) 333 334 /* 335 * Simple queue definitions. 336 */ 337 #define SIMPLEQ_HEAD(name, type) \ 338 struct name { \ 339 struct type *sqh_first; /* first element */ \ 340 struct type **sqh_last; /* addr of last next element */ \ 341 } 342 343 #define SIMPLEQ_HEAD_INITIALIZER(head) \ 344 { NULL, &(head).sqh_first } 345 346 #define SIMPLEQ_ENTRY(type) \ 347 struct { \ 348 struct type *sqe_next; /* next element */ \ 349 } 350 351 /* 352 * Simple queue functions. 353 */ 354 #define SIMPLEQ_INIT(head) do { \ 355 (head)->sqh_first = NULL; \ 356 (head)->sqh_last = &(head)->sqh_first; \ 357 } while (/* CONSTCOND */0) 358 359 #define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ 360 if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ 361 (head)->sqh_last = &(elm)->field.sqe_next; \ 362 (head)->sqh_first = (elm); \ 363 } while (/* CONSTCOND */0) 364 365 #define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ 366 (elm)->field.sqe_next = NULL; \ 367 *(head)->sqh_last = (elm); \ 368 (head)->sqh_last = &(elm)->field.sqe_next; \ 369 } while (/* CONSTCOND */0) 370 371 #define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ 372 if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\ 373 (head)->sqh_last = &(elm)->field.sqe_next; \ 374 (listelm)->field.sqe_next = (elm); \ 375 } while (/* CONSTCOND */0) 376 377 #define SIMPLEQ_REMOVE_HEAD(head, field) do { \ 378 if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ 379 (head)->sqh_last = &(head)->sqh_first; \ 380 } while (/* CONSTCOND */0) 381 382 #define SIMPLEQ_REMOVE(head, elm, type, field) do { \ 383 if ((head)->sqh_first == (elm)) { \ 384 SIMPLEQ_REMOVE_HEAD((head), field); \ 385 } else { \ 386 struct type *curelm = (head)->sqh_first; \ 387 while (curelm->field.sqe_next != (elm)) \ 388 curelm = curelm->field.sqe_next; \ 389 if ((curelm->field.sqe_next = \ 390 curelm->field.sqe_next->field.sqe_next) == NULL) \ 391 (head)->sqh_last = &(curelm)->field.sqe_next; \ 392 } \ 393 } while (/* CONSTCOND */0) 394 395 #define SIMPLEQ_FOREACH(var, head, field) \ 396 for ((var) = ((head)->sqh_first); \ 397 (var); \ 398 (var) = ((var)->field.sqe_next)) 399 400 /* 401 * Simple queue access methods. 402 */ 403 #define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) 404 #define SIMPLEQ_FIRST(head) ((head)->sqh_first) 405 #define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) 406 407 /* 408 * Tail queue definitions. 409 */ 410 #define _TAILQ_HEAD(name, type, qual) \ 411 struct name { \ 412 qual type *tqh_first; /* first element */ \ 413 qual type *qual *tqh_last; /* addr of last next element */ \ 414 } 415 #define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,) 416 417 #define TAILQ_HEAD_INITIALIZER(head) \ 418 { NULL, &(head).tqh_first } 419 420 #define _TAILQ_ENTRY(type, qual) \ 421 struct { \ 422 qual type *tqe_next; /* next element */ \ 423 qual type *qual *tqe_prev; /* address of previous next element */\ 424 } 425 #define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,) 426 427 /* 428 * Tail queue functions. 429 */ 430 #if defined(_KERNEL) && defined(QUEUEDEBUG) 431 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \ 432 if ((head)->tqh_first && \ 433 (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \ 434 panic("TAILQ_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__); 435 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \ 436 if (*(head)->tqh_last != NULL) \ 437 panic("TAILQ_INSERT_TAIL %p %s:%d", (head), __FILE__, __LINE__); 438 #define QUEUEDEBUG_TAILQ_OP(elm, field) \ 439 if ((elm)->field.tqe_next && \ 440 (elm)->field.tqe_next->field.tqe_prev != \ 441 &(elm)->field.tqe_next) \ 442 panic("TAILQ_* forw %p %s:%d", (elm), __FILE__, __LINE__);\ 443 if (*(elm)->field.tqe_prev != (elm)) \ 444 panic("TAILQ_* back %p %s:%d", (elm), __FILE__, __LINE__); 445 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \ 446 if ((elm)->field.tqe_next == NULL && \ 447 (head)->tqh_last != &(elm)->field.tqe_next) \ 448 panic("TAILQ_PREREMOVE head %p elm %p %s:%d", \ 449 (head), (elm), __FILE__, __LINE__); 450 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \ 451 (elm)->field.tqe_next = (void *)1L; \ 452 (elm)->field.tqe_prev = (void *)1L; 453 #else 454 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) 455 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) 456 #define QUEUEDEBUG_TAILQ_OP(elm, field) 457 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) 458 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) 459 #endif 460 461 #define TAILQ_INIT(head) do { \ 462 (head)->tqh_first = NULL; \ 463 (head)->tqh_last = &(head)->tqh_first; \ 464 } while (/* CONSTCOND */0) 465 466 #define TAILQ_INSERT_HEAD(head, elm, field) do { \ 467 QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \ 468 if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ 469 (head)->tqh_first->field.tqe_prev = \ 470 &(elm)->field.tqe_next; \ 471 else \ 472 (head)->tqh_last = &(elm)->field.tqe_next; \ 473 (head)->tqh_first = (elm); \ 474 (elm)->field.tqe_prev = &(head)->tqh_first; \ 475 } while (/* CONSTCOND */0) 476 477 #define TAILQ_INSERT_TAIL(head, elm, field) do { \ 478 QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \ 479 (elm)->field.tqe_next = NULL; \ 480 (elm)->field.tqe_prev = (head)->tqh_last; \ 481 *(head)->tqh_last = (elm); \ 482 (head)->tqh_last = &(elm)->field.tqe_next; \ 483 } while (/* CONSTCOND */0) 484 485 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ 486 QUEUEDEBUG_TAILQ_OP((listelm), field) \ 487 if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ 488 (elm)->field.tqe_next->field.tqe_prev = \ 489 &(elm)->field.tqe_next; \ 490 else \ 491 (head)->tqh_last = &(elm)->field.tqe_next; \ 492 (listelm)->field.tqe_next = (elm); \ 493 (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ 494 } while (/* CONSTCOND */0) 495 496 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ 497 QUEUEDEBUG_TAILQ_OP((listelm), field) \ 498 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ 499 (elm)->field.tqe_next = (listelm); \ 500 *(listelm)->field.tqe_prev = (elm); \ 501 (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ 502 } while (/* CONSTCOND */0) 503 504 #define TAILQ_REMOVE(head, elm, field) do { \ 505 QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \ 506 QUEUEDEBUG_TAILQ_OP((elm), field) \ 507 if (((elm)->field.tqe_next) != NULL) \ 508 (elm)->field.tqe_next->field.tqe_prev = \ 509 (elm)->field.tqe_prev; \ 510 else \ 511 (head)->tqh_last = (elm)->field.tqe_prev; \ 512 *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ 513 QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \ 514 } while (/* CONSTCOND */0) 515 516 #define TAILQ_FOREACH(var, head, field) \ 517 for ((var) = ((head)->tqh_first); \ 518 (var); \ 519 (var) = ((var)->field.tqe_next)) 520 521 #define TAILQ_FOREACH_SAFE(var, head, field, next) \ 522 for ((var) = ((head)->tqh_first); \ 523 (var) != NULL && ((next) = TAILQ_NEXT(var, field), 1); \ 524 (var) = (next)) 525 526 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ 527 for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ 528 (var); \ 529 (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) 530 531 #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \ 532 for ((var) = TAILQ_LAST((head), headname); \ 533 (var) && ((prev) = TAILQ_PREV((var), headname, field), 1);\ 534 (var) = (prev)) 535 536 #define TAILQ_CONCAT(head1, head2, field) do { \ 537 if (!TAILQ_EMPTY(head2)) { \ 538 *(head1)->tqh_last = (head2)->tqh_first; \ 539 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ 540 (head1)->tqh_last = (head2)->tqh_last; \ 541 TAILQ_INIT((head2)); \ 542 } \ 543 } while (/* CONSTCOND */0) 544 545 /* 546 * Tail queue access methods. 547 */ 548 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) 549 #define TAILQ_FIRST(head) ((head)->tqh_first) 550 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) 551 552 #define TAILQ_LAST(head, headname) \ 553 (*(((struct headname *)((head)->tqh_last))->tqh_last)) 554 #define TAILQ_PREV(elm, headname, field) \ 555 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) 556 557 /* 558 * Circular queue definitions. 559 */ 560 #if defined(_KERNEL) && defined(QUEUEDEBUG) 561 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \ 562 if ((head)->cqh_first != (void *)(head) && \ 563 (head)->cqh_first->field.cqe_prev != (void *)(head)) \ 564 panic("CIRCLEQ head forw %p %s:%d", (head), \ 565 __FILE__, __LINE__); \ 566 if ((head)->cqh_last != (void *)(head) && \ 567 (head)->cqh_last->field.cqe_next != (void *)(head)) \ 568 panic("CIRCLEQ head back %p %s:%d", (head), \ 569 __FILE__, __LINE__); 570 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \ 571 if ((elm)->field.cqe_next == (void *)(head)) { \ 572 if ((head)->cqh_last != (elm)) \ 573 panic("CIRCLEQ elm last %p %s:%d", (elm), \ 574 __FILE__, __LINE__); \ 575 } else { \ 576 if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \ 577 panic("CIRCLEQ elm forw %p %s:%d", (elm), \ 578 __FILE__, __LINE__); \ 579 } \ 580 if ((elm)->field.cqe_prev == (void *)(head)) { \ 581 if ((head)->cqh_first != (elm)) \ 582 panic("CIRCLEQ elm first %p %s:%d", (elm), \ 583 __FILE__, __LINE__); \ 584 } else { \ 585 if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \ 586 panic("CIRCLEQ elm prev %p %s:%d", (elm), \ 587 __FILE__, __LINE__); \ 588 } 589 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \ 590 (elm)->field.cqe_next = (void *)1L; \ 591 (elm)->field.cqe_prev = (void *)1L; 592 #else 593 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) 594 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) 595 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) 596 #endif 597 598 #define CIRCLEQ_HEAD(name, type) \ 599 struct name { \ 600 struct type *cqh_first; /* first element */ \ 601 struct type *cqh_last; /* last element */ \ 602 } 603 604 #define CIRCLEQ_HEAD_INITIALIZER(head) \ 605 { (void *)&head, (void *)&head } 606 607 #define CIRCLEQ_ENTRY(type) \ 608 struct { \ 609 struct type *cqe_next; /* next element */ \ 610 struct type *cqe_prev; /* previous element */ \ 611 } 612 613 /* 614 * Circular queue functions. 615 */ 616 #define CIRCLEQ_INIT(head) do { \ 617 (head)->cqh_first = (void *)(head); \ 618 (head)->cqh_last = (void *)(head); \ 619 } while (/* CONSTCOND */0) 620 621 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ 622 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ 623 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \ 624 (elm)->field.cqe_next = (listelm)->field.cqe_next; \ 625 (elm)->field.cqe_prev = (listelm); \ 626 if ((listelm)->field.cqe_next == (void *)(head)) \ 627 (head)->cqh_last = (elm); \ 628 else \ 629 (listelm)->field.cqe_next->field.cqe_prev = (elm); \ 630 (listelm)->field.cqe_next = (elm); \ 631 } while (/* CONSTCOND */0) 632 633 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ 634 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ 635 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \ 636 (elm)->field.cqe_next = (listelm); \ 637 (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ 638 if ((listelm)->field.cqe_prev == (void *)(head)) \ 639 (head)->cqh_first = (elm); \ 640 else \ 641 (listelm)->field.cqe_prev->field.cqe_next = (elm); \ 642 (listelm)->field.cqe_prev = (elm); \ 643 } while (/* CONSTCOND */0) 644 645 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ 646 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ 647 (elm)->field.cqe_next = (head)->cqh_first; \ 648 (elm)->field.cqe_prev = (void *)(head); \ 649 if ((head)->cqh_last == (void *)(head)) \ 650 (head)->cqh_last = (elm); \ 651 else \ 652 (head)->cqh_first->field.cqe_prev = (elm); \ 653 (head)->cqh_first = (elm); \ 654 } while (/* CONSTCOND */0) 655 656 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ 657 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ 658 (elm)->field.cqe_next = (void *)(head); \ 659 (elm)->field.cqe_prev = (head)->cqh_last; \ 660 if ((head)->cqh_first == (void *)(head)) \ 661 (head)->cqh_first = (elm); \ 662 else \ 663 (head)->cqh_last->field.cqe_next = (elm); \ 664 (head)->cqh_last = (elm); \ 665 } while (/* CONSTCOND */0) 666 667 #define CIRCLEQ_REMOVE(head, elm, field) do { \ 668 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ 669 QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \ 670 if ((elm)->field.cqe_next == (void *)(head)) \ 671 (head)->cqh_last = (elm)->field.cqe_prev; \ 672 else \ 673 (elm)->field.cqe_next->field.cqe_prev = \ 674 (elm)->field.cqe_prev; \ 675 if ((elm)->field.cqe_prev == (void *)(head)) \ 676 (head)->cqh_first = (elm)->field.cqe_next; \ 677 else \ 678 (elm)->field.cqe_prev->field.cqe_next = \ 679 (elm)->field.cqe_next; \ 680 QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \ 681 } while (/* CONSTCOND */0) 682 683 #define CIRCLEQ_FOREACH(var, head, field) \ 684 for ((var) = ((head)->cqh_first); \ 685 (var) != (const void *)(head); \ 686 (var) = ((var)->field.cqe_next)) 687 688 #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ 689 for ((var) = ((head)->cqh_last); \ 690 (var) != (const void *)(head); \ 691 (var) = ((var)->field.cqe_prev)) 692 693 /* 694 * Circular queue access methods. 695 */ 696 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head)) 697 #define CIRCLEQ_FIRST(head) ((head)->cqh_first) 698 #define CIRCLEQ_LAST(head) ((head)->cqh_last) 699 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next) 700 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev) 701 702 #define CIRCLEQ_LOOP_NEXT(head, elm, field) \ 703 (((elm)->field.cqe_next == (void *)(head)) \ 704 ? ((head)->cqh_first) \ 705 : (elm->field.cqe_next)) 706 #define CIRCLEQ_LOOP_PREV(head, elm, field) \ 707 (((elm)->field.cqe_prev == (void *)(head)) \ 708 ? ((head)->cqh_last) \ 709 : (elm->field.cqe_prev)) 710 711 #endif /* !_SYS_QUEUE_H_ */ 712