1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* $NetBSD: queue.h,v 1.49.6.1 2008/11/20 03:22:38 snj Exp $ */ 3 4 /* 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)queue.h 8.5 (Berkeley) 8/20/94 33 */ 34 35 #ifndef _SYS_QUEUE_H_ 36 #define _SYS_QUEUE_H_ 37 38 /*#include <sys/null.h> */ 39 40 /* 41 * This file defines five types of data structures: singly-linked lists, 42 * lists, simple queues, tail queues, and circular queues. 43 * 44 * A singly-linked list is headed by a single forward pointer. The 45 * elements are singly linked for minimum space and pointer manipulation 46 * overhead at the expense of O(n) removal for arbitrary elements. New 47 * elements can be added to the list after an existing element or at the 48 * head of the list. Elements being removed from the head of the list 49 * should use the explicit macro for this purpose for optimum 50 * efficiency. A singly-linked list may only be traversed in the forward 51 * direction. Singly-linked lists are ideal for applications with large 52 * datasets and few or no removals or for implementing a LIFO queue. 53 * 54 * A list is headed by a single forward pointer (or an array of forward 55 * pointers for a hash table header). The elements are doubly linked 56 * so that an arbitrary element can be removed without a need to 57 * traverse the list. New elements can be added to the list before 58 * or after an existing element or at the head of the list. A list 59 * may only be traversed in the forward direction. 60 * 61 * A simple queue is headed by a pair of pointers, one the head of the 62 * list and the other to the tail of the list. The elements are singly 63 * linked to save space, so elements can only be removed from the 64 * head of the list. New elements can be added to the list after 65 * an existing element, at the head of the list, or at the end of the 66 * list. A simple queue may only be traversed in the forward direction. 67 * 68 * A tail queue is headed by a pair of pointers, one to the head of the 69 * list and the other to the tail of the list. The elements are doubly 70 * linked so that an arbitrary element can be removed without a need to 71 * traverse the list. New elements can be added to the list before or 72 * after an existing element, at the head of the list, or at the end of 73 * the list. A tail queue may be traversed in either direction. 74 * 75 * A circle queue is headed by a pair of pointers, one to the head of the 76 * list and the other to the tail of the list. The elements are doubly 77 * linked so that an arbitrary element can be removed without a need to 78 * traverse the list. New elements can be added to the list before or after 79 * an existing element, at the head of the list, or at the end of the list. 80 * A circle queue may be traversed in either direction, but has a more 81 * complex end of list detection. 82 * 83 * For details on the use of these macros, see the queue(3) manual page. 84 */ 85 86 /* 87 * List definitions. 88 */ 89 #define LIST_HEAD(name, type) \ 90 struct name { \ 91 struct type *lh_first; /* first element */ \ 92 } 93 94 #define LIST_HEAD_INITIALIZER(head) \ 95 { NULL } 96 97 #define LIST_ENTRY(type) \ 98 struct { \ 99 struct type *le_next; /* next element */ \ 100 struct type **le_prev; /* address of previous next element */ \ 101 } 102 103 /* 104 * List functions. 105 */ 106 #if defined(_KERNEL) && defined(QUEUEDEBUG) 107 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \ 108 if ((head)->lh_first && \ 109 (head)->lh_first->field.le_prev != &(head)->lh_first) \ 110 panic("LIST_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__); 111 #define QUEUEDEBUG_LIST_OP(elm, field) \ 112 if ((elm)->field.le_next && \ 113 (elm)->field.le_next->field.le_prev != \ 114 &(elm)->field.le_next) \ 115 panic("LIST_* forw %p %s:%d", (elm), __FILE__, __LINE__);\ 116 if (*(elm)->field.le_prev != (elm)) \ 117 panic("LIST_* back %p %s:%d", (elm), __FILE__, __LINE__); 118 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \ 119 (elm)->field.le_next = (void *)1L; \ 120 (elm)->field.le_prev = (void *)1L; 121 #else 122 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) 123 #define QUEUEDEBUG_LIST_OP(elm, field) 124 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) 125 #endif 126 127 #define LIST_INIT(head) do { \ 128 (head)->lh_first = NULL; \ 129 } while (/* CONSTCOND */0) 130 131 #define LIST_INSERT_AFTER(listelm, elm, field) do { \ 132 QUEUEDEBUG_LIST_OP((listelm), field) \ 133 if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ 134 (listelm)->field.le_next->field.le_prev = \ 135 &(elm)->field.le_next; \ 136 (listelm)->field.le_next = (elm); \ 137 (elm)->field.le_prev = &(listelm)->field.le_next; \ 138 } while (/* CONSTCOND */0) 139 140 #define LIST_INSERT_BEFORE(listelm, elm, field) do { \ 141 QUEUEDEBUG_LIST_OP((listelm), field) \ 142 (elm)->field.le_prev = (listelm)->field.le_prev; \ 143 (elm)->field.le_next = (listelm); \ 144 *(listelm)->field.le_prev = (elm); \ 145 (listelm)->field.le_prev = &(elm)->field.le_next; \ 146 } while (/* CONSTCOND */0) 147 148 #define LIST_INSERT_HEAD(head, elm, field) do { \ 149 QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \ 150 if (((elm)->field.le_next = (head)->lh_first) != NULL) \ 151 (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ 152 (head)->lh_first = (elm); \ 153 (elm)->field.le_prev = &(head)->lh_first; \ 154 } while (/* CONSTCOND */0) 155 156 #define LIST_REMOVE(elm, field) do { \ 157 QUEUEDEBUG_LIST_OP((elm), field) \ 158 if ((elm)->field.le_next != NULL) \ 159 (elm)->field.le_next->field.le_prev = \ 160 (elm)->field.le_prev; \ 161 *(elm)->field.le_prev = (elm)->field.le_next; \ 162 QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \ 163 } while (/* CONSTCOND */0) 164 165 #define LIST_FOREACH(var, head, field) \ 166 for ((var) = ((head)->lh_first); \ 167 (var); \ 168 (var) = ((var)->field.le_next)) 169 170 /* 171 * List access methods. 172 */ 173 #define LIST_EMPTY(head) ((head)->lh_first == NULL) 174 #define LIST_FIRST(head) ((head)->lh_first) 175 #define LIST_NEXT(elm, field) ((elm)->field.le_next) 176 177 #define LIST_FOREACH_SAFE(var, head, field, tvar) \ 178 for ((var) = LIST_FIRST((head)); \ 179 (var) && ((tvar) = LIST_NEXT((var), field), 1); \ 180 (var) = (tvar)) 181 182 /* 183 * Singly-linked List definitions. 184 */ 185 #define SLIST_HEAD(name, type) \ 186 struct name { \ 187 struct type *slh_first; /* first element */ \ 188 } 189 190 #define SLIST_HEAD_INITIALIZER(head) \ 191 { NULL } 192 193 #define SLIST_ENTRY(type) \ 194 struct { \ 195 struct type *sle_next; /* next element */ \ 196 } 197 198 /* 199 * Singly-linked List functions. 200 */ 201 #define SLIST_INIT(head) do { \ 202 (head)->slh_first = NULL; \ 203 } while (/* CONSTCOND */0) 204 205 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ 206 (elm)->field.sle_next = (slistelm)->field.sle_next; \ 207 (slistelm)->field.sle_next = (elm); \ 208 } while (/* CONSTCOND */0) 209 210 #define SLIST_INSERT_HEAD(head, elm, field) do { \ 211 (elm)->field.sle_next = (head)->slh_first; \ 212 (head)->slh_first = (elm); \ 213 } while (/* CONSTCOND */0) 214 215 #define SLIST_REMOVE_HEAD(head, field) do { \ 216 (head)->slh_first = (head)->slh_first->field.sle_next; \ 217 } while (/* CONSTCOND */0) 218 219 #define SLIST_REMOVE(head, elm, type, field) do { \ 220 if ((head)->slh_first == (elm)) { \ 221 SLIST_REMOVE_HEAD((head), field); \ 222 } \ 223 else { \ 224 struct type *curelm = (head)->slh_first; \ 225 while(curelm->field.sle_next != (elm)) \ 226 curelm = curelm->field.sle_next; \ 227 curelm->field.sle_next = \ 228 curelm->field.sle_next->field.sle_next; \ 229 } \ 230 } while (/* CONSTCOND */0) 231 232 #define SLIST_REMOVE_AFTER(slistelm, field) do { \ 233 (slistelm)->field.sle_next = \ 234 SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \ 235 } while (/* CONSTCOND */0) 236 237 #define SLIST_FOREACH(var, head, field) \ 238 for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next) 239 240 /* 241 * Singly-linked List access methods. 242 */ 243 #define SLIST_EMPTY(head) ((head)->slh_first == NULL) 244 #define SLIST_FIRST(head) ((head)->slh_first) 245 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next) 246 247 /* 248 * Singly-linked Tail queue declarations. 249 */ 250 #define STAILQ_HEAD(name, type) \ 251 struct name { \ 252 struct type *stqh_first; /* first element */ \ 253 struct type **stqh_last; /* addr of last next element */ \ 254 } 255 256 #define STAILQ_HEAD_INITIALIZER(head) \ 257 { NULL, &(head).stqh_first } 258 259 #define STAILQ_ENTRY(type) \ 260 struct { \ 261 struct type *stqe_next; /* next element */ \ 262 } 263 264 /* 265 * Singly-linked Tail queue functions. 266 */ 267 #define STAILQ_INIT(head) do { \ 268 (head)->stqh_first = NULL; \ 269 (head)->stqh_last = &(head)->stqh_first; \ 270 } while (/* CONSTCOND */0) 271 272 #define STAILQ_INSERT_HEAD(head, elm, field) do { \ 273 if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \ 274 (head)->stqh_last = &(elm)->field.stqe_next; \ 275 (head)->stqh_first = (elm); \ 276 } while (/* CONSTCOND */0) 277 278 #define STAILQ_INSERT_TAIL(head, elm, field) do { \ 279 (elm)->field.stqe_next = NULL; \ 280 *(head)->stqh_last = (elm); \ 281 (head)->stqh_last = &(elm)->field.stqe_next; \ 282 } while (/* CONSTCOND */0) 283 284 #define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ 285 if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\ 286 (head)->stqh_last = &(elm)->field.stqe_next; \ 287 (listelm)->field.stqe_next = (elm); \ 288 } while (/* CONSTCOND */0) 289 290 #define STAILQ_REMOVE_HEAD(head, field) do { \ 291 if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \ 292 (head)->stqh_last = &(head)->stqh_first; \ 293 } while (/* CONSTCOND */0) 294 295 #define STAILQ_REMOVE(head, elm, type, field) do { \ 296 if ((head)->stqh_first == (elm)) { \ 297 STAILQ_REMOVE_HEAD((head), field); \ 298 } else { \ 299 struct type *curelm = (head)->stqh_first; \ 300 while (curelm->field.stqe_next != (elm)) \ 301 curelm = curelm->field.stqe_next; \ 302 if ((curelm->field.stqe_next = \ 303 curelm->field.stqe_next->field.stqe_next) == NULL) \ 304 (head)->stqh_last = &(curelm)->field.stqe_next; \ 305 } \ 306 } while (/* CONSTCOND */0) 307 308 #define STAILQ_REMOVE_AFTER(head, elm, field) do { \ 309 if ((STAILQ_NEXT(elm, field) = \ 310 STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \ 311 (head)->stqh_last = &STAILQ_NEXT((elm), field); \ 312 } while (0) 313 314 #define STAILQ_FOREACH(var, head, field) \ 315 for ((var) = ((head)->stqh_first); \ 316 (var); \ 317 (var) = ((var)->field.stqe_next)) 318 319 #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ 320 for ((var) = STAILQ_FIRST((head)); \ 321 (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ 322 (var) = (tvar)) 323 324 #define STAILQ_CONCAT(head1, head2) do { \ 325 if (!STAILQ_EMPTY((head2))) { \ 326 *(head1)->stqh_last = (head2)->stqh_first; \ 327 (head1)->stqh_last = (head2)->stqh_last; \ 328 STAILQ_INIT((head2)); \ 329 } \ 330 } while (/* CONSTCOND */0) 331 332 /* 333 * Singly-linked Tail queue access methods. 334 */ 335 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) 336 #define STAILQ_FIRST(head) ((head)->stqh_first) 337 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) 338 339 /* 340 * Simple queue definitions. 341 */ 342 #define SIMPLEQ_HEAD(name, type) \ 343 struct name { \ 344 struct type *sqh_first; /* first element */ \ 345 struct type **sqh_last; /* addr of last next element */ \ 346 } 347 348 #define SIMPLEQ_HEAD_INITIALIZER(head) \ 349 { NULL, &(head).sqh_first } 350 351 #define SIMPLEQ_ENTRY(type) \ 352 struct { \ 353 struct type *sqe_next; /* next element */ \ 354 } 355 356 /* 357 * Simple queue functions. 358 */ 359 #define SIMPLEQ_INIT(head) do { \ 360 (head)->sqh_first = NULL; \ 361 (head)->sqh_last = &(head)->sqh_first; \ 362 } while (/* CONSTCOND */0) 363 364 #define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ 365 if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ 366 (head)->sqh_last = &(elm)->field.sqe_next; \ 367 (head)->sqh_first = (elm); \ 368 } while (/* CONSTCOND */0) 369 370 #define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ 371 (elm)->field.sqe_next = NULL; \ 372 *(head)->sqh_last = (elm); \ 373 (head)->sqh_last = &(elm)->field.sqe_next; \ 374 } while (/* CONSTCOND */0) 375 376 #define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ 377 if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\ 378 (head)->sqh_last = &(elm)->field.sqe_next; \ 379 (listelm)->field.sqe_next = (elm); \ 380 } while (/* CONSTCOND */0) 381 382 #define SIMPLEQ_REMOVE_HEAD(head, field) do { \ 383 if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ 384 (head)->sqh_last = &(head)->sqh_first; \ 385 } while (/* CONSTCOND */0) 386 387 #define SIMPLEQ_REMOVE(head, elm, type, field) do { \ 388 if ((head)->sqh_first == (elm)) { \ 389 SIMPLEQ_REMOVE_HEAD((head), field); \ 390 } else { \ 391 struct type *curelm = (head)->sqh_first; \ 392 while (curelm->field.sqe_next != (elm)) \ 393 curelm = curelm->field.sqe_next; \ 394 if ((curelm->field.sqe_next = \ 395 curelm->field.sqe_next->field.sqe_next) == NULL) \ 396 (head)->sqh_last = &(curelm)->field.sqe_next; \ 397 } \ 398 } while (/* CONSTCOND */0) 399 400 #define SIMPLEQ_FOREACH(var, head, field) \ 401 for ((var) = ((head)->sqh_first); \ 402 (var); \ 403 (var) = ((var)->field.sqe_next)) 404 405 /* 406 * Simple queue access methods. 407 */ 408 #define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) 409 #define SIMPLEQ_FIRST(head) ((head)->sqh_first) 410 #define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) 411 412 /* 413 * Tail queue definitions. 414 */ 415 #define _TAILQ_HEAD(name, type, qual) \ 416 struct name { \ 417 qual type *tqh_first; /* first element */ \ 418 qual type *qual *tqh_last; /* addr of last next element */ \ 419 } 420 #define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,) 421 422 #define TAILQ_HEAD_INITIALIZER(head) \ 423 { NULL, &(head).tqh_first } 424 425 #define _TAILQ_ENTRY(type, qual) \ 426 struct { \ 427 qual type *tqe_next; /* next element */ \ 428 qual type *qual *tqe_prev; /* address of previous next element */\ 429 } 430 #define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,) 431 432 /* 433 * Tail queue functions. 434 */ 435 #if defined(_KERNEL) && defined(QUEUEDEBUG) 436 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \ 437 if ((head)->tqh_first && \ 438 (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \ 439 panic("TAILQ_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__); 440 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \ 441 if (*(head)->tqh_last != NULL) \ 442 panic("TAILQ_INSERT_TAIL %p %s:%d", (head), __FILE__, __LINE__); 443 #define QUEUEDEBUG_TAILQ_OP(elm, field) \ 444 if ((elm)->field.tqe_next && \ 445 (elm)->field.tqe_next->field.tqe_prev != \ 446 &(elm)->field.tqe_next) \ 447 panic("TAILQ_* forw %p %s:%d", (elm), __FILE__, __LINE__);\ 448 if (*(elm)->field.tqe_prev != (elm)) \ 449 panic("TAILQ_* back %p %s:%d", (elm), __FILE__, __LINE__); 450 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \ 451 if ((elm)->field.tqe_next == NULL && \ 452 (head)->tqh_last != &(elm)->field.tqe_next) \ 453 panic("TAILQ_PREREMOVE head %p elm %p %s:%d", \ 454 (head), (elm), __FILE__, __LINE__); 455 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \ 456 (elm)->field.tqe_next = (void *)1L; \ 457 (elm)->field.tqe_prev = (void *)1L; 458 #else 459 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) 460 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) 461 #define QUEUEDEBUG_TAILQ_OP(elm, field) 462 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) 463 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) 464 #endif 465 466 #define TAILQ_INIT(head) do { \ 467 (head)->tqh_first = NULL; \ 468 (head)->tqh_last = &(head)->tqh_first; \ 469 } while (/* CONSTCOND */0) 470 471 #define TAILQ_INSERT_HEAD(head, elm, field) do { \ 472 QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \ 473 if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ 474 (head)->tqh_first->field.tqe_prev = \ 475 &(elm)->field.tqe_next; \ 476 else \ 477 (head)->tqh_last = &(elm)->field.tqe_next; \ 478 (head)->tqh_first = (elm); \ 479 (elm)->field.tqe_prev = &(head)->tqh_first; \ 480 } while (/* CONSTCOND */0) 481 482 #define TAILQ_INSERT_TAIL(head, elm, field) do { \ 483 QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \ 484 (elm)->field.tqe_next = NULL; \ 485 (elm)->field.tqe_prev = (head)->tqh_last; \ 486 *(head)->tqh_last = (elm); \ 487 (head)->tqh_last = &(elm)->field.tqe_next; \ 488 } while (/* CONSTCOND */0) 489 490 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ 491 QUEUEDEBUG_TAILQ_OP((listelm), field) \ 492 if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ 493 (elm)->field.tqe_next->field.tqe_prev = \ 494 &(elm)->field.tqe_next; \ 495 else \ 496 (head)->tqh_last = &(elm)->field.tqe_next; \ 497 (listelm)->field.tqe_next = (elm); \ 498 (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ 499 } while (/* CONSTCOND */0) 500 501 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ 502 QUEUEDEBUG_TAILQ_OP((listelm), field) \ 503 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ 504 (elm)->field.tqe_next = (listelm); \ 505 *(listelm)->field.tqe_prev = (elm); \ 506 (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ 507 } while (/* CONSTCOND */0) 508 509 #define TAILQ_REMOVE(head, elm, field) do { \ 510 QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \ 511 QUEUEDEBUG_TAILQ_OP((elm), field) \ 512 if (((elm)->field.tqe_next) != NULL) \ 513 (elm)->field.tqe_next->field.tqe_prev = \ 514 (elm)->field.tqe_prev; \ 515 else \ 516 (head)->tqh_last = (elm)->field.tqe_prev; \ 517 *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ 518 QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \ 519 } while (/* CONSTCOND */0) 520 521 #define TAILQ_FOREACH(var, head, field) \ 522 for ((var) = ((head)->tqh_first); \ 523 (var); \ 524 (var) = ((var)->field.tqe_next)) 525 526 #define TAILQ_FOREACH_SAFE(var, head, field, next) \ 527 for ((var) = ((head)->tqh_first); \ 528 (var) != NULL && ((next) = TAILQ_NEXT(var, field), 1); \ 529 (var) = (next)) 530 531 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ 532 for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ 533 (var); \ 534 (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) 535 536 #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \ 537 for ((var) = TAILQ_LAST((head), headname); \ 538 (var) && ((prev) = TAILQ_PREV((var), headname, field), 1);\ 539 (var) = (prev)) 540 541 #define TAILQ_CONCAT(head1, head2, field) do { \ 542 if (!TAILQ_EMPTY(head2)) { \ 543 *(head1)->tqh_last = (head2)->tqh_first; \ 544 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ 545 (head1)->tqh_last = (head2)->tqh_last; \ 546 TAILQ_INIT((head2)); \ 547 } \ 548 } while (/* CONSTCOND */0) 549 550 /* 551 * Tail queue access methods. 552 */ 553 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) 554 #define TAILQ_FIRST(head) ((head)->tqh_first) 555 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) 556 557 #define TAILQ_LAST(head, headname) \ 558 (*(((struct headname *)((head)->tqh_last))->tqh_last)) 559 #define TAILQ_PREV(elm, headname, field) \ 560 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) 561 562 /* 563 * Circular queue definitions. 564 */ 565 #if defined(_KERNEL) && defined(QUEUEDEBUG) 566 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \ 567 if ((head)->cqh_first != (void *)(head) && \ 568 (head)->cqh_first->field.cqe_prev != (void *)(head)) \ 569 panic("CIRCLEQ head forw %p %s:%d", (head), \ 570 __FILE__, __LINE__); \ 571 if ((head)->cqh_last != (void *)(head) && \ 572 (head)->cqh_last->field.cqe_next != (void *)(head)) \ 573 panic("CIRCLEQ head back %p %s:%d", (head), \ 574 __FILE__, __LINE__); 575 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \ 576 if ((elm)->field.cqe_next == (void *)(head)) { \ 577 if ((head)->cqh_last != (elm)) \ 578 panic("CIRCLEQ elm last %p %s:%d", (elm), \ 579 __FILE__, __LINE__); \ 580 } else { \ 581 if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \ 582 panic("CIRCLEQ elm forw %p %s:%d", (elm), \ 583 __FILE__, __LINE__); \ 584 } \ 585 if ((elm)->field.cqe_prev == (void *)(head)) { \ 586 if ((head)->cqh_first != (elm)) \ 587 panic("CIRCLEQ elm first %p %s:%d", (elm), \ 588 __FILE__, __LINE__); \ 589 } else { \ 590 if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \ 591 panic("CIRCLEQ elm prev %p %s:%d", (elm), \ 592 __FILE__, __LINE__); \ 593 } 594 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \ 595 (elm)->field.cqe_next = (void *)1L; \ 596 (elm)->field.cqe_prev = (void *)1L; 597 #else 598 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) 599 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) 600 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) 601 #endif 602 603 #define CIRCLEQ_HEAD(name, type) \ 604 struct name { \ 605 struct type *cqh_first; /* first element */ \ 606 struct type *cqh_last; /* last element */ \ 607 } 608 609 #define CIRCLEQ_HEAD_INITIALIZER(head) \ 610 { (void *)&head, (void *)&head } 611 612 #define CIRCLEQ_ENTRY(type) \ 613 struct { \ 614 struct type *cqe_next; /* next element */ \ 615 struct type *cqe_prev; /* previous element */ \ 616 } 617 618 /* 619 * Circular queue functions. 620 */ 621 #define CIRCLEQ_INIT(head) do { \ 622 (head)->cqh_first = (void *)(head); \ 623 (head)->cqh_last = (void *)(head); \ 624 } while (/* CONSTCOND */0) 625 626 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ 627 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ 628 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \ 629 (elm)->field.cqe_next = (listelm)->field.cqe_next; \ 630 (elm)->field.cqe_prev = (listelm); \ 631 if ((listelm)->field.cqe_next == (void *)(head)) \ 632 (head)->cqh_last = (elm); \ 633 else \ 634 (listelm)->field.cqe_next->field.cqe_prev = (elm); \ 635 (listelm)->field.cqe_next = (elm); \ 636 } while (/* CONSTCOND */0) 637 638 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ 639 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ 640 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \ 641 (elm)->field.cqe_next = (listelm); \ 642 (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ 643 if ((listelm)->field.cqe_prev == (void *)(head)) \ 644 (head)->cqh_first = (elm); \ 645 else \ 646 (listelm)->field.cqe_prev->field.cqe_next = (elm); \ 647 (listelm)->field.cqe_prev = (elm); \ 648 } while (/* CONSTCOND */0) 649 650 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ 651 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ 652 (elm)->field.cqe_next = (head)->cqh_first; \ 653 (elm)->field.cqe_prev = (void *)(head); \ 654 if ((head)->cqh_last == (void *)(head)) \ 655 (head)->cqh_last = (elm); \ 656 else \ 657 (head)->cqh_first->field.cqe_prev = (elm); \ 658 (head)->cqh_first = (elm); \ 659 } while (/* CONSTCOND */0) 660 661 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ 662 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ 663 (elm)->field.cqe_next = (void *)(head); \ 664 (elm)->field.cqe_prev = (head)->cqh_last; \ 665 if ((head)->cqh_first == (void *)(head)) \ 666 (head)->cqh_first = (elm); \ 667 else \ 668 (head)->cqh_last->field.cqe_next = (elm); \ 669 (head)->cqh_last = (elm); \ 670 } while (/* CONSTCOND */0) 671 672 #define CIRCLEQ_REMOVE(head, elm, field) do { \ 673 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \ 674 QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \ 675 if ((elm)->field.cqe_next == (void *)(head)) \ 676 (head)->cqh_last = (elm)->field.cqe_prev; \ 677 else \ 678 (elm)->field.cqe_next->field.cqe_prev = \ 679 (elm)->field.cqe_prev; \ 680 if ((elm)->field.cqe_prev == (void *)(head)) \ 681 (head)->cqh_first = (elm)->field.cqe_next; \ 682 else \ 683 (elm)->field.cqe_prev->field.cqe_next = \ 684 (elm)->field.cqe_next; \ 685 QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \ 686 } while (/* CONSTCOND */0) 687 688 #define CIRCLEQ_FOREACH(var, head, field) \ 689 for ((var) = ((head)->cqh_first); \ 690 (var) != (const void *)(head); \ 691 (var) = ((var)->field.cqe_next)) 692 693 #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ 694 for ((var) = ((head)->cqh_last); \ 695 (var) != (const void *)(head); \ 696 (var) = ((var)->field.cqe_prev)) 697 698 /* 699 * Circular queue access methods. 700 */ 701 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head)) 702 #define CIRCLEQ_FIRST(head) ((head)->cqh_first) 703 #define CIRCLEQ_LAST(head) ((head)->cqh_last) 704 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next) 705 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev) 706 707 #define CIRCLEQ_LOOP_NEXT(head, elm, field) \ 708 (((elm)->field.cqe_next == (void *)(head)) \ 709 ? ((head)->cqh_first) \ 710 : (elm->field.cqe_next)) 711 #define CIRCLEQ_LOOP_PREV(head, elm, field) \ 712 (((elm)->field.cqe_prev == (void *)(head)) \ 713 ? ((head)->cqh_last) \ 714 : (elm->field.cqe_prev)) 715 716 #endif /* !_SYS_QUEUE_H_ */ 717