eaae7841ba
Add fast paths to find_*_bit() functions as per kernel implementation. Link: https://lkml.kernel.org/r/20210401003153.97325-12-yury.norov@gmail.com Signed-off-by: Yury Norov <yury.norov@gmail.com> Acked-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Alexey Klimov <aklimov@redhat.com> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: David Sterba <dsterba@suse.com> Cc: Dennis Zhou <dennis@kernel.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Jianpeng Ma <jianpeng.ma@intel.com> Cc: Joe Perches <joe@perches.com> Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Rich Felker <dalias@libc.org> Cc: Stefano Brivio <sbrivio@redhat.com> Cc: Wei Yang <richard.weiyang@linux.alibaba.com> Cc: Wolfram Sang <wsa+renesas@sang-engineering.com> Cc: Yoshinori Sato <ysato@users.osdn.me> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
115 lines
2.6 KiB
C
115 lines
2.6 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/* bit search implementation
|
|
*
|
|
* Copied from lib/find_bit.c to tools/lib/find_bit.c
|
|
*
|
|
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*
|
|
* Copyright (C) 2008 IBM Corporation
|
|
* 'find_last_bit' is written by Rusty Russell <rusty@rustcorp.com.au>
|
|
* (Inspired by David Howell's find_next_bit implementation)
|
|
*
|
|
* Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
|
|
* size and improve performance, 2015.
|
|
*/
|
|
|
|
#include <linux/bitops.h>
|
|
#include <linux/bitmap.h>
|
|
#include <linux/kernel.h>
|
|
|
|
#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
|
|
!defined(find_next_and_bit)
|
|
|
|
/*
|
|
* This is a common helper function for find_next_bit, find_next_zero_bit, and
|
|
* find_next_and_bit. The differences are:
|
|
* - The "invert" argument, which is XORed with each fetched word before
|
|
* searching it for one bits.
|
|
* - The optional "addr2", which is anded with "addr1" if present.
|
|
*/
|
|
unsigned long _find_next_bit(const unsigned long *addr1,
|
|
const unsigned long *addr2, unsigned long nbits,
|
|
unsigned long start, unsigned long invert, unsigned long le)
|
|
{
|
|
unsigned long tmp, mask;
|
|
(void) le;
|
|
|
|
if (unlikely(start >= nbits))
|
|
return nbits;
|
|
|
|
tmp = addr1[start / BITS_PER_LONG];
|
|
if (addr2)
|
|
tmp &= addr2[start / BITS_PER_LONG];
|
|
tmp ^= invert;
|
|
|
|
/* Handle 1st word. */
|
|
mask = BITMAP_FIRST_WORD_MASK(start);
|
|
|
|
/*
|
|
* Due to the lack of swab() in tools, and the fact that it doesn't
|
|
* need little-endian support, just comment it out
|
|
*/
|
|
#if (0)
|
|
if (le)
|
|
mask = swab(mask);
|
|
#endif
|
|
|
|
tmp &= mask;
|
|
|
|
start = round_down(start, BITS_PER_LONG);
|
|
|
|
while (!tmp) {
|
|
start += BITS_PER_LONG;
|
|
if (start >= nbits)
|
|
return nbits;
|
|
|
|
tmp = addr1[start / BITS_PER_LONG];
|
|
if (addr2)
|
|
tmp &= addr2[start / BITS_PER_LONG];
|
|
tmp ^= invert;
|
|
}
|
|
|
|
#if (0)
|
|
if (le)
|
|
tmp = swab(tmp);
|
|
#endif
|
|
|
|
return min(start + __ffs(tmp), nbits);
|
|
}
|
|
#endif
|
|
|
|
#ifndef find_first_bit
|
|
/*
|
|
* Find the first set bit in a memory region.
|
|
*/
|
|
unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
|
|
{
|
|
unsigned long idx;
|
|
|
|
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
|
|
if (addr[idx])
|
|
return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
|
|
}
|
|
|
|
return size;
|
|
}
|
|
#endif
|
|
|
|
#ifndef find_first_zero_bit
|
|
/*
|
|
* Find the first cleared bit in a memory region.
|
|
*/
|
|
unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size)
|
|
{
|
|
unsigned long idx;
|
|
|
|
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
|
|
if (addr[idx] != ~0UL)
|
|
return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
|
|
}
|
|
|
|
return size;
|
|
}
|
|
#endif
|