mirror of
https://github.com/samba-team/samba.git
synced 2025-02-28 01:58:17 +03:00
IRIX spinlock patch from James Peach <jpeach@sgi.com>.
Jeremy. (This used to be commit 1ae1987a002716e8aa0d4bc0dd68f580ad762e47)
This commit is contained in:
parent
f2c6cec339
commit
86d65dc070
@ -2013,6 +2013,14 @@ if test x"$samba_cv_SYSCONF_SC_NGROUPS_MAX" = x"yes"; then
|
||||
AC_DEFINE(SYSCONF_SC_NGROUPS_MAX,1,[Whether sysconf(_SC_NGROUPS_MAX) is available])
|
||||
fi
|
||||
|
||||
AC_CACHE_CHECK([for sysconf(_SC_NPROC_ONLN)],samba_cv_SYSCONF_SC_NPROC_ONLN,[
|
||||
AC_TRY_RUN([#include <unistd.h>
|
||||
main() { exit(sysconf(_SC_NPROC_ONLN) == -1 ? 1 : 0); }],
|
||||
samba_cv_SYSCONF_SC_NPROC_ONLN=yes,samba_cv_SYSCONF_SC_NPROC_ONLN=no,samba_cv_SYSCONF_SC_NPROC_ONLN=cross)])
|
||||
if test x"$samba_cv_SYSCONF_SC_NPROC_ONLN" = x"yes"; then
|
||||
AC_DEFINE(SYSCONF_SC_NPROC_ONLN,1,[Whether sysconf(_SC_NPROC_ONLN) is available])
|
||||
fi
|
||||
|
||||
AC_CACHE_CHECK([for root],samba_cv_HAVE_ROOT,[
|
||||
AC_TRY_RUN([main() { exit(getuid() != 0); }],
|
||||
samba_cv_HAVE_ROOT=yes,samba_cv_HAVE_ROOT=no,samba_cv_HAVE_ROOT=cross)])
|
||||
|
@ -143,6 +143,47 @@ static inline int __spin_is_locked(spinlock_t *lock)
|
||||
return (*lock != 1);
|
||||
}
|
||||
|
||||
#elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730)
|
||||
|
||||
/* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See
|
||||
* sync(3) for the details of the intrinsic operations.
|
||||
*
|
||||
* "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro.
|
||||
*/
|
||||
|
||||
#if defined(STANDALONE)
|
||||
|
||||
/* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */
|
||||
#define inline __inline
|
||||
|
||||
#endif /* STANDALONE */
|
||||
|
||||
/* Returns 0 if the lock is acquired, EBUSY otherwise. */
|
||||
static inline int __spin_trylock(spinlock_t *lock)
|
||||
{
|
||||
unsigned int val;
|
||||
val = __lock_test_and_set(lock, 1);
|
||||
return val == 0 ? 0 : EBUSY;
|
||||
}
|
||||
|
||||
static inline void __spin_unlock(spinlock_t *lock)
|
||||
{
|
||||
__lock_release(lock);
|
||||
}
|
||||
|
||||
static inline void __spin_lock_init(spinlock_t *lock)
|
||||
{
|
||||
__lock_release(lock);
|
||||
}
|
||||
|
||||
/* Returns 1 if the lock is held, 0 otherwise. */
|
||||
static inline int __spin_is_locked(spinlock_t *lock)
|
||||
{
|
||||
unsigned int val;
|
||||
val = __add_and_fetch(lock, 0);
|
||||
return val;
|
||||
}
|
||||
|
||||
#elif defined(MIPS_SPINLOCKS)
|
||||
|
||||
static inline unsigned int load_linked(unsigned long addr)
|
||||
@ -221,7 +262,11 @@ static void yield_cpu(void)
|
||||
|
||||
static int this_is_smp(void)
|
||||
{
|
||||
#if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN)
|
||||
return (sysconf(_SC_NPROC_ONLN) > 1) ? 1 : 0;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
x
Reference in New Issue
Block a user