1
0
mirror of https://github.com/samba-team/samba.git synced 2025-01-06 13:18:07 +03:00
samba-mirror/third_party/heimdal/include/heimqueue.h
Stefan Metzmacher 7055827b8f HEIMDAL: move code from source4/heimdal* to third_party/heimdal*
This makes it clearer that we always want to do heimdal changes
via the lorikeet-heimdal repository.

Signed-off-by: Stefan Metzmacher <metze@samba.org>
Reviewed-by: Joseph Sutton <josephsutton@catalyst.net.nz>

Autobuild-User(master): Joseph Sutton <jsutton@samba.org>
Autobuild-Date(master): Wed Jan 19 21:41:59 UTC 2022 on sn-devel-184
2022-01-19 21:41:59 +00:00

271 lines
9.6 KiB
C

/* $NetBSD: queue.h,v 1.38 2004/04/18 14:12:05 lukem Exp $ */
/* $Id$ */
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
*/
#ifndef _HEIM_QUEUE_H_
#define _HEIM_QUEUE_H_
/*
* Singly-linked List definitions.
*/
#define HEIM_SLIST_HEAD(name, type) \
struct name { \
struct type *slh_first; /* first element */ \
}
#define HEIM_SLIST_HEAD_INITIALIZER(head) \
{ NULL }
#define HEIM_SLIST_ENTRY(type) \
struct { \
struct type *sle_next; /* next element */ \
}
/*
* Singly-linked List functions.
*/
#define HEIM_SLIST_INIT(head) do { \
(head)->slh_first = NULL; \
} while (/*CONSTCOND*/0)
#define HEIM_SLIST_INSERT_AFTER(slistelm, elm, field) do { \
(elm)->field.sle_next = (slistelm)->field.sle_next; \
(slistelm)->field.sle_next = (elm); \
} while (/*CONSTCOND*/0)
#define HEIM_SLIST_INSERT_HEAD(head, elm, field) do { \
(elm)->field.sle_next = (head)->slh_first; \
(head)->slh_first = (elm); \
} while (/*CONSTCOND*/0)
#define HEIM_SLIST_REMOVE_HEAD(head, field) do { \
(head)->slh_first = (head)->slh_first->field.sle_next; \
} while (/*CONSTCOND*/0)
#define HEIM_SLIST_REMOVE(head, elm, type, field) do { \
if ((head)->slh_first == (elm)) { \
HEIM_SLIST_REMOVE_HEAD((head), field); \
} \
else { \
struct type *curelm = (head)->slh_first; \
while(curelm->field.sle_next != (elm)) \
curelm = curelm->field.sle_next; \
curelm->field.sle_next = \
curelm->field.sle_next->field.sle_next; \
} \
} while (/*CONSTCOND*/0)
#define HEIM_SLIST_FOREACH(var, head, field) \
for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
/*
* Singly-linked List access methods.
*/
#define HEIM_SLIST_EMPTY(head) ((head)->slh_first == NULL)
#define HEIM_SLIST_FIRST(head) ((head)->slh_first)
#define HEIM_SLIST_NEXT(elm, field) ((elm)->field.sle_next)
/*
* Singly-linked List atomic functions.
*/
#include "heimbase.h"
#define HEIM_SLIST_ATOMIC_HEAD(name, type) \
struct name { \
heim_base_atomic(struct type *) slh_first; /* first element */ \
}
#define HEIM_SLIST_ATOMIC_ENTRY(type) \
struct { \
heim_base_atomic(struct type *) sle_next; /* next element */ \
}
#define HEIM_SLIST_ATOMIC_INSERT_HEAD(head, elm, field) do { \
(elm)->field.sle_next = \
heim_base_exchange_pointer(&(head)->slh_first, (elm)); \
} while (/*CONSTCOND*/0)
#define HEIM_SLIST_ATOMIC_FOREACH(var, head, field) \
for ((var) = heim_base_atomic_load(&(head)->slh_first); \
(var) != NULL; \
(var) = heim_base_atomic_load(&(var)->field.sle_next))
/*
* Tail queue definitions.
*/
#define HEIM_TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; /* first element */ \
struct type **tqh_last; /* addr of last next element */ \
}
#define HEIM_TAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).tqh_first }
#define HEIM_TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
struct type **tqe_prev; /* address of previous next element */ \
}
/*
* Tail queue functions.
*/
#if defined(_KERNEL) && defined(QUEUEDEBUG)
#define QUEUEDEBUG_HEIM_TAILQ_INSERT_HEAD(head, elm, field) \
if ((head)->tqh_first && \
(head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
panic("HEIM_TAILQ_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
#define QUEUEDEBUG_HEIM_TAILQ_INSERT_TAIL(head, elm, field) \
if (*(head)->tqh_last != NULL) \
panic("HEIM_TAILQ_INSERT_TAIL %p %s:%d", (head), __FILE__, __LINE__);
#define QUEUEDEBUG_HEIM_TAILQ_OP(elm, field) \
if ((elm)->field.tqe_next && \
(elm)->field.tqe_next->field.tqe_prev != \
&(elm)->field.tqe_next) \
panic("HEIM_TAILQ_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
if (*(elm)->field.tqe_prev != (elm)) \
panic("HEIM_TAILQ_* back %p %s:%d", (elm), __FILE__, __LINE__);
#define QUEUEDEBUG_HEIM_TAILQ_PREREMOVE(head, elm, field) \
if ((elm)->field.tqe_next == NULL && \
(head)->tqh_last != &(elm)->field.tqe_next) \
panic("HEIM_TAILQ_PREREMOVE head %p elm %p %s:%d", \
(head), (elm), __FILE__, __LINE__);
#define QUEUEDEBUG_HEIM_TAILQ_POSTREMOVE(elm, field) \
(elm)->field.tqe_next = (void *)1L; \
(elm)->field.tqe_prev = (void *)1L;
#else
#define QUEUEDEBUG_HEIM_TAILQ_INSERT_HEAD(head, elm, field)
#define QUEUEDEBUG_HEIM_TAILQ_INSERT_TAIL(head, elm, field)
#define QUEUEDEBUG_HEIM_TAILQ_OP(elm, field)
#define QUEUEDEBUG_HEIM_TAILQ_PREREMOVE(head, elm, field)
#define QUEUEDEBUG_HEIM_TAILQ_POSTREMOVE(elm, field)
#endif
#define HEIM_TAILQ_INIT(head) do { \
(head)->tqh_first = NULL; \
(head)->tqh_last = &(head)->tqh_first; \
} while (/*CONSTCOND*/0)
#define HEIM_TAILQ_INSERT_HEAD(head, elm, field) do { \
QUEUEDEBUG_HEIM_TAILQ_INSERT_HEAD((head), (elm), field) \
if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
(head)->tqh_first->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(head)->tqh_first = (elm); \
(elm)->field.tqe_prev = &(head)->tqh_first; \
} while (/*CONSTCOND*/0)
#define HEIM_TAILQ_INSERT_TAIL(head, elm, field) do { \
QUEUEDEBUG_HEIM_TAILQ_INSERT_TAIL((head), (elm), field) \
(elm)->field.tqe_next = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
(head)->tqh_last = &(elm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define HEIM_TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
QUEUEDEBUG_HEIM_TAILQ_OP((listelm), field) \
if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
(elm)->field.tqe_next->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(listelm)->field.tqe_next = (elm); \
(elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define HEIM_TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
QUEUEDEBUG_HEIM_TAILQ_OP((listelm), field) \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
(elm)->field.tqe_next = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define HEIM_TAILQ_REMOVE(head, elm, field) do { \
QUEUEDEBUG_HEIM_TAILQ_PREREMOVE((head), (elm), field) \
QUEUEDEBUG_HEIM_TAILQ_OP((elm), field) \
if (((elm)->field.tqe_next) != NULL) \
(elm)->field.tqe_next->field.tqe_prev = \
(elm)->field.tqe_prev; \
else \
(head)->tqh_last = (elm)->field.tqe_prev; \
*(elm)->field.tqe_prev = (elm)->field.tqe_next; \
QUEUEDEBUG_HEIM_TAILQ_POSTREMOVE((elm), field); \
} while (/*CONSTCOND*/0)
#define HEIM_TAILQ_FOREACH(var, head, field) \
for ((var) = ((head)->tqh_first); \
(var); \
(var) = ((var)->field.tqe_next))
#define HEIM_TAILQ_FOREACH_SAFE(var, head, field, next) \
for ((var) = ((head)->tqh_first); \
(var) != NULL && ((next) = HEIM_TAILQ_NEXT(var, field), 1); \
(var) = (next))
#define HEIM_TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
(var); \
(var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
#define HEIM_TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \
for ((var) = HEIM_TAILQ_LAST((head), headname); \
(var) && ((prev) = HEIM_TAILQ_PREV((var), headname, field), 1);\
(var) = (prev))
#define HEIM_TAILQ_CONCAT(head1, head2, field) do { \
if (!HEIM_TAILQ_EMPTY(head2)) { \
*(head1)->tqh_last = (head2)->tqh_first; \
(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
(head1)->tqh_last = (head2)->tqh_last; \
HEIM_TAILQ_INIT((head2)); \
} \
} while (/*CONSTCOND*/0)
/*
* Tail queue access methods.
*/
#define HEIM_TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
#define HEIM_TAILQ_FIRST(head) ((head)->tqh_first)
#define HEIM_TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define HEIM_TAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))
#define HEIM_TAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
#endif /* !_HEIM_QUEUE_H_ */