Merge branch 'idr-4.11' of git://git.infradead.org/users/willy/linux-dax
Pull IDR rewrite from Matthew Wilcox: "The most significant part of the following is the patch to rewrite the IDR & IDA to be clients of the radix tree. But there's much more, including an enhancement of the IDA to be significantly more space efficient, an IDR & IDA test suite, some improvements to the IDR API (and driver changes to take advantage of those improvements), several improvements to the radix tree test suite and RCU annotations. The IDR & IDA rewrite had a good spin in linux-next and Andrew's tree for most of the last cycle. Coupled with the IDR test suite, I feel pretty confident that any remaining bugs are quite hard to hit. 0-day did a great job of watching my git tree and pointing out problems; as it hit them, I added new test-cases to be sure not to be caught the same way twice" Willy goes on to expand a bit on the IDR rewrite rationale: "The radix tree and the IDR use very similar data structures. Merging the two codebases lets us share the memory allocation pools, and results in a net deletion of 500 lines of code. It also opens up the possibility of exposing more of the features of the radix tree to users of the IDR (and I have some interesting patches along those lines waiting for 4.12) It also shrinks the size of the 'struct idr' from 40 bytes to 24 which will shrink a fair few data structures that embed an IDR" * 'idr-4.11' of git://git.infradead.org/users/willy/linux-dax: (32 commits) radix tree test suite: Add config option for map shift idr: Add missing __rcu annotations radix-tree: Fix __rcu annotations radix-tree: Add rcu_dereference and rcu_assign_pointer calls radix tree test suite: Run iteration tests for longer radix tree test suite: Fix split/join memory leaks radix tree test suite: Fix leaks in regression2.c radix tree test suite: Fix leaky tests radix tree test suite: Enable address sanitizer radix_tree_iter_resume: Fix out of bounds error radix-tree: Store a pointer to the root in each node radix-tree: Chain preallocated nodes through ->parent radix tree test suite: Dial down verbosity with -v radix tree test suite: Introduce kmalloc_verbose idr: Return the deleted entry from idr_remove radix tree test suite: Build separate binaries for some tests ida: Use exceptional entries for small IDAs ida: Move ida_bitmap to a percpu variable Reimplement IDR and IDA using the radix tree radix-tree: Add radix_tree_iter_delete ...
This commit is contained in:
commit
cf393195c3
@ -1980,13 +1980,12 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
||||
card->lbfqc = ns_stat_lfbqc_get(stat);
|
||||
|
||||
id = le32_to_cpu(rsqe->buffer_handle);
|
||||
skb = idr_find(&card->idr, id);
|
||||
skb = idr_remove(&card->idr, id);
|
||||
if (!skb) {
|
||||
RXPRINTK(KERN_ERR
|
||||
"nicstar%d: idr_find() failed!\n", card->index);
|
||||
"nicstar%d: skb not found!\n", card->index);
|
||||
return;
|
||||
}
|
||||
idr_remove(&card->idr, id);
|
||||
dma_sync_single_for_cpu(&card->pcidev->dev,
|
||||
NS_PRV_DMA(skb),
|
||||
(NS_PRV_BUFTYPE(skb) == BUF_SM
|
||||
|
@ -2915,11 +2915,9 @@ out_idr_remove_vol:
|
||||
idr_remove(&connection->peer_devices, vnr);
|
||||
out_idr_remove_from_resource:
|
||||
for_each_connection(connection, resource) {
|
||||
peer_device = idr_find(&connection->peer_devices, vnr);
|
||||
if (peer_device) {
|
||||
idr_remove(&connection->peer_devices, vnr);
|
||||
peer_device = idr_remove(&connection->peer_devices, vnr);
|
||||
if (peer_device)
|
||||
kref_put(&connection->kref, drbd_destroy_connection);
|
||||
}
|
||||
}
|
||||
for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
|
||||
list_del(&peer_device->peer_devices);
|
||||
|
@ -1307,8 +1307,7 @@ static void iso_resource_work(struct work_struct *work)
|
||||
*/
|
||||
if (r->todo == ISO_RES_REALLOC && !success &&
|
||||
!client->in_shutdown &&
|
||||
idr_find(&client->resource_idr, r->resource.handle)) {
|
||||
idr_remove(&client->resource_idr, r->resource.handle);
|
||||
idr_remove(&client->resource_idr, r->resource.handle)) {
|
||||
client_put(client);
|
||||
free = true;
|
||||
}
|
||||
|
@ -70,10 +70,10 @@ static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
|
||||
struct amdgpu_bo_list *list;
|
||||
|
||||
mutex_lock(&fpriv->bo_list_lock);
|
||||
list = idr_find(&fpriv->bo_list_handles, id);
|
||||
list = idr_remove(&fpriv->bo_list_handles, id);
|
||||
if (list) {
|
||||
/* Another user may have a reference to this list still */
|
||||
mutex_lock(&list->lock);
|
||||
idr_remove(&fpriv->bo_list_handles, id);
|
||||
mutex_unlock(&list->lock);
|
||||
amdgpu_bo_list_free(list);
|
||||
}
|
||||
|
@ -135,15 +135,11 @@ static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
|
||||
struct amdgpu_ctx *ctx;
|
||||
|
||||
mutex_lock(&mgr->lock);
|
||||
ctx = idr_find(&mgr->ctx_handles, id);
|
||||
if (ctx) {
|
||||
idr_remove(&mgr->ctx_handles, id);
|
||||
ctx = idr_remove(&mgr->ctx_handles, id);
|
||||
if (ctx)
|
||||
kref_put(&ctx->refcount, amdgpu_ctx_do_release);
|
||||
mutex_unlock(&mgr->lock);
|
||||
return 0;
|
||||
}
|
||||
mutex_unlock(&mgr->lock);
|
||||
return -EINVAL;
|
||||
return ctx ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static int amdgpu_ctx_query(struct amdgpu_device *adev,
|
||||
|
@ -346,9 +346,7 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&priv->ack_status_lock, flags);
|
||||
ack_skb = idr_find(&priv->ack_status_frames, tx_status->tx_token_id);
|
||||
if (ack_skb)
|
||||
idr_remove(&priv->ack_status_frames, tx_status->tx_token_id);
|
||||
ack_skb = idr_remove(&priv->ack_status_frames, tx_status->tx_token_id);
|
||||
spin_unlock_irqrestore(&priv->ack_status_lock, flags);
|
||||
|
||||
if (ack_skb) {
|
||||
|
@ -642,9 +642,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
|
||||
WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
|
||||
|
||||
spin_lock(&udev->commands_lock);
|
||||
cmd = idr_find(&udev->commands, entry->hdr.cmd_id);
|
||||
if (cmd)
|
||||
idr_remove(&udev->commands, cmd->cmd_id);
|
||||
cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
|
||||
spin_unlock(&udev->commands_lock);
|
||||
|
||||
if (!cmd) {
|
||||
|
@ -12,47 +12,29 @@
|
||||
#ifndef __IDR_H__
|
||||
#define __IDR_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
/*
|
||||
* Using 6 bits at each layer allows us to allocate 7 layers out of each page.
|
||||
* 8 bits only gave us 3 layers out of every pair of pages, which is less
|
||||
* efficient except for trees with a largest element between 192-255 inclusive.
|
||||
*/
|
||||
#define IDR_BITS 6
|
||||
#define IDR_SIZE (1 << IDR_BITS)
|
||||
#define IDR_MASK ((1 << IDR_BITS)-1)
|
||||
|
||||
struct idr_layer {
|
||||
int prefix; /* the ID prefix of this idr_layer */
|
||||
int layer; /* distance from leaf */
|
||||
struct idr_layer __rcu *ary[1<<IDR_BITS];
|
||||
int count; /* When zero, we can release it */
|
||||
union {
|
||||
/* A zero bit means "space here" */
|
||||
DECLARE_BITMAP(bitmap, IDR_SIZE);
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
};
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
struct idr {
|
||||
struct idr_layer __rcu *hint; /* the last layer allocated from */
|
||||
struct idr_layer __rcu *top;
|
||||
int layers; /* only valid w/o concurrent changes */
|
||||
int cur; /* current pos for cyclic allocation */
|
||||
spinlock_t lock;
|
||||
int id_free_cnt;
|
||||
struct idr_layer *id_free;
|
||||
struct radix_tree_root idr_rt;
|
||||
unsigned int idr_next;
|
||||
};
|
||||
|
||||
#define IDR_INIT(name) \
|
||||
/*
|
||||
* The IDR API does not expose the tagging functionality of the radix tree
|
||||
* to users. Use tag 0 to track whether a node has free space below it.
|
||||
*/
|
||||
#define IDR_FREE 0
|
||||
|
||||
/* Set the IDR flag and the IDR_FREE tag */
|
||||
#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT))
|
||||
|
||||
#define IDR_INIT \
|
||||
{ \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
||||
.idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \
|
||||
}
|
||||
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
|
||||
#define DEFINE_IDR(name) struct idr name = IDR_INIT
|
||||
|
||||
/**
|
||||
* idr_get_cursor - Return the current position of the cyclic allocator
|
||||
@ -62,9 +44,9 @@ struct idr {
|
||||
* idr_alloc_cyclic() if it is free (otherwise the search will start from
|
||||
* this position).
|
||||
*/
|
||||
static inline unsigned int idr_get_cursor(struct idr *idr)
|
||||
static inline unsigned int idr_get_cursor(const struct idr *idr)
|
||||
{
|
||||
return READ_ONCE(idr->cur);
|
||||
return READ_ONCE(idr->idr_next);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -77,7 +59,7 @@ static inline unsigned int idr_get_cursor(struct idr *idr)
|
||||
*/
|
||||
static inline void idr_set_cursor(struct idr *idr, unsigned int val)
|
||||
{
|
||||
WRITE_ONCE(idr->cur, val);
|
||||
WRITE_ONCE(idr->idr_next, val);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -97,22 +79,31 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
|
||||
* period).
|
||||
*/
|
||||
|
||||
/*
|
||||
* This is what we export.
|
||||
*/
|
||||
|
||||
void *idr_find_slowpath(struct idr *idp, int id);
|
||||
void idr_preload(gfp_t gfp_mask);
|
||||
int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
|
||||
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask);
|
||||
int idr_for_each(struct idr *idp,
|
||||
int idr_alloc(struct idr *, void *entry, int start, int end, gfp_t);
|
||||
int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
|
||||
int idr_for_each(const struct idr *,
|
||||
int (*fn)(int id, void *p, void *data), void *data);
|
||||
void *idr_get_next(struct idr *idp, int *nextid);
|
||||
void *idr_replace(struct idr *idp, void *ptr, int id);
|
||||
void idr_remove(struct idr *idp, int id);
|
||||
void idr_destroy(struct idr *idp);
|
||||
void idr_init(struct idr *idp);
|
||||
bool idr_is_empty(struct idr *idp);
|
||||
void *idr_get_next(struct idr *, int *nextid);
|
||||
void *idr_replace(struct idr *, void *, int id);
|
||||
void idr_destroy(struct idr *);
|
||||
|
||||
static inline void *idr_remove(struct idr *idr, int id)
|
||||
{
|
||||
return radix_tree_delete_item(&idr->idr_rt, id, NULL);
|
||||
}
|
||||
|
||||
static inline void idr_init(struct idr *idr)
|
||||
{
|
||||
INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
|
||||
idr->idr_next = 0;
|
||||
}
|
||||
|
||||
static inline bool idr_is_empty(const struct idr *idr)
|
||||
{
|
||||
return radix_tree_empty(&idr->idr_rt) &&
|
||||
radix_tree_tagged(&idr->idr_rt, IDR_FREE);
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_preload_end - end preload section started with idr_preload()
|
||||
@ -137,19 +128,14 @@ static inline void idr_preload_end(void)
|
||||
* This function can be called under rcu_read_lock(), given that the leaf
|
||||
* pointers lifetimes are correctly managed.
|
||||
*/
|
||||
static inline void *idr_find(struct idr *idr, int id)
|
||||
static inline void *idr_find(const struct idr *idr, int id)
|
||||
{
|
||||
struct idr_layer *hint = rcu_dereference_raw(idr->hint);
|
||||
|
||||
if (hint && (id & ~IDR_MASK) == hint->prefix)
|
||||
return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
|
||||
|
||||
return idr_find_slowpath(idr, id);
|
||||
return radix_tree_lookup(&idr->idr_rt, id);
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_for_each_entry - iterate over an idr's elements of a given type
|
||||
* @idp: idr handle
|
||||
* @idr: idr handle
|
||||
* @entry: the type * to use as cursor
|
||||
* @id: id entry's key
|
||||
*
|
||||
@ -157,57 +143,60 @@ static inline void *idr_find(struct idr *idr, int id)
|
||||
* after normal terminatinon @entry is left with the value NULL. This
|
||||
* is convenient for a "not found" value.
|
||||
*/
|
||||
#define idr_for_each_entry(idp, entry, id) \
|
||||
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
|
||||
#define idr_for_each_entry(idr, entry, id) \
|
||||
for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
|
||||
|
||||
/**
|
||||
* idr_for_each_entry - continue iteration over an idr's elements of a given type
|
||||
* @idp: idr handle
|
||||
* idr_for_each_entry_continue - continue iteration over an idr's elements of a given type
|
||||
* @idr: idr handle
|
||||
* @entry: the type * to use as cursor
|
||||
* @id: id entry's key
|
||||
*
|
||||
* Continue to iterate over list of given type, continuing after
|
||||
* the current position.
|
||||
*/
|
||||
#define idr_for_each_entry_continue(idp, entry, id) \
|
||||
for ((entry) = idr_get_next((idp), &(id)); \
|
||||
#define idr_for_each_entry_continue(idr, entry, id) \
|
||||
for ((entry) = idr_get_next((idr), &(id)); \
|
||||
entry; \
|
||||
++id, (entry) = idr_get_next((idp), &(id)))
|
||||
++id, (entry) = idr_get_next((idr), &(id)))
|
||||
|
||||
/*
|
||||
* IDA - IDR based id allocator, use when translation from id to
|
||||
* pointer isn't necessary.
|
||||
*
|
||||
* IDA_BITMAP_LONGS is calculated to be one less to accommodate
|
||||
* ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
|
||||
*/
|
||||
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
|
||||
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1)
|
||||
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
|
||||
#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
|
||||
|
||||
struct ida_bitmap {
|
||||
long nr_busy;
|
||||
unsigned long bitmap[IDA_BITMAP_LONGS];
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap);
|
||||
|
||||
struct ida {
|
||||
struct idr idr;
|
||||
struct ida_bitmap *free_bitmap;
|
||||
struct radix_tree_root ida_rt;
|
||||
};
|
||||
|
||||
#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
|
||||
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
|
||||
#define IDA_INIT { \
|
||||
.ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \
|
||||
}
|
||||
#define DEFINE_IDA(name) struct ida name = IDA_INIT
|
||||
|
||||
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
|
||||
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
|
||||
void ida_remove(struct ida *ida, int id);
|
||||
void ida_destroy(struct ida *ida);
|
||||
void ida_init(struct ida *ida);
|
||||
|
||||
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
|
||||
gfp_t gfp_mask);
|
||||
void ida_simple_remove(struct ida *ida, unsigned int id);
|
||||
|
||||
static inline void ida_init(struct ida *ida)
|
||||
{
|
||||
INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
|
||||
}
|
||||
|
||||
/**
|
||||
* ida_get_new - allocate new ID
|
||||
* @ida: idr handle
|
||||
@ -220,11 +209,8 @@ static inline int ida_get_new(struct ida *ida, int *p_id)
|
||||
return ida_get_new_above(ida, 0, p_id);
|
||||
}
|
||||
|
||||
static inline bool ida_is_empty(struct ida *ida)
|
||||
static inline bool ida_is_empty(const struct ida *ida)
|
||||
{
|
||||
return idr_is_empty(&ida->idr);
|
||||
return radix_tree_empty(&ida->ida_rt);
|
||||
}
|
||||
|
||||
void __init idr_init_cache(void);
|
||||
|
||||
#endif /* __IDR_H__ */
|
||||
|
@ -22,11 +22,13 @@
|
||||
#define _LINUX_RADIX_TREE_H
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* The bottom two bits of the slot determine how the remaining bits in the
|
||||
@ -94,7 +96,7 @@ struct radix_tree_node {
|
||||
unsigned char count; /* Total entry count */
|
||||
unsigned char exceptional; /* Exceptional entry count */
|
||||
struct radix_tree_node *parent; /* Used when ascending tree */
|
||||
void *private_data; /* For tree user */
|
||||
struct radix_tree_root *root; /* The tree we belong to */
|
||||
union {
|
||||
struct list_head private_list; /* For tree user */
|
||||
struct rcu_head rcu_head; /* Used when freeing node */
|
||||
@ -103,7 +105,10 @@ struct radix_tree_node {
|
||||
unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
|
||||
};
|
||||
|
||||
/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
|
||||
/* The top bits of gfp_mask are used to store the root tags and the IDR flag */
|
||||
#define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT))
|
||||
#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1)
|
||||
|
||||
struct radix_tree_root {
|
||||
gfp_t gfp_mask;
|
||||
struct radix_tree_node __rcu *rnode;
|
||||
@ -123,7 +128,7 @@ do { \
|
||||
(root)->rnode = NULL; \
|
||||
} while (0)
|
||||
|
||||
static inline bool radix_tree_empty(struct radix_tree_root *root)
|
||||
static inline bool radix_tree_empty(const struct radix_tree_root *root)
|
||||
{
|
||||
return root->rnode == NULL;
|
||||
}
|
||||
@ -216,10 +221,8 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
|
||||
*/
|
||||
|
||||
/**
|
||||
* radix_tree_deref_slot - dereference a slot
|
||||
* @pslot: pointer to slot, returned by radix_tree_lookup_slot
|
||||
* Returns: item that was stored in that slot with any direct pointer flag
|
||||
* removed.
|
||||
* radix_tree_deref_slot - dereference a slot
|
||||
* @slot: slot pointer, returned by radix_tree_lookup_slot
|
||||
*
|
||||
* For use with radix_tree_lookup_slot(). Caller must hold tree at least read
|
||||
* locked across slot lookup and dereference. Not required if write lock is
|
||||
@ -227,26 +230,27 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
|
||||
*
|
||||
* radix_tree_deref_retry must be used to confirm validity of the pointer if
|
||||
* only the read lock is held.
|
||||
*
|
||||
* Return: entry stored in that slot.
|
||||
*/
|
||||
static inline void *radix_tree_deref_slot(void **pslot)
|
||||
static inline void *radix_tree_deref_slot(void __rcu **slot)
|
||||
{
|
||||
return rcu_dereference(*pslot);
|
||||
return rcu_dereference(*slot);
|
||||
}
|
||||
|
||||
/**
|
||||
* radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held
|
||||
* @pslot: pointer to slot, returned by radix_tree_lookup_slot
|
||||
* Returns: item that was stored in that slot with any direct pointer flag
|
||||
* removed.
|
||||
* radix_tree_deref_slot_protected - dereference a slot with tree lock held
|
||||
* @slot: slot pointer, returned by radix_tree_lookup_slot
|
||||
*
|
||||
* Similar to radix_tree_deref_slot but only used during migration when a pages
|
||||
* mapping is being moved. The caller does not hold the RCU read lock but it
|
||||
* must hold the tree lock to prevent parallel updates.
|
||||
* Similar to radix_tree_deref_slot. The caller does not hold the RCU read
|
||||
* lock but it must hold the tree lock to prevent parallel updates.
|
||||
*
|
||||
* Return: entry stored in that slot.
|
||||
*/
|
||||
static inline void *radix_tree_deref_slot_protected(void **pslot,
|
||||
static inline void *radix_tree_deref_slot_protected(void __rcu **slot,
|
||||
spinlock_t *treelock)
|
||||
{
|
||||
return rcu_dereference_protected(*pslot, lockdep_is_held(treelock));
|
||||
return rcu_dereference_protected(*slot, lockdep_is_held(treelock));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -282,9 +286,9 @@ static inline int radix_tree_exception(void *arg)
|
||||
return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
|
||||
}
|
||||
|
||||
int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
|
||||
int __radix_tree_create(struct radix_tree_root *, unsigned long index,
|
||||
unsigned order, struct radix_tree_node **nodep,
|
||||
void ***slotp);
|
||||
void __rcu ***slotp);
|
||||
int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
|
||||
unsigned order, void *);
|
||||
static inline int radix_tree_insert(struct radix_tree_root *root,
|
||||
@ -292,55 +296,56 @@ static inline int radix_tree_insert(struct radix_tree_root *root,
|
||||
{
|
||||
return __radix_tree_insert(root, index, 0, entry);
|
||||
}
|
||||
void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
|
||||
struct radix_tree_node **nodep, void ***slotp);
|
||||
void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
|
||||
void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
|
||||
void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
|
||||
struct radix_tree_node **nodep, void __rcu ***slotp);
|
||||
void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
|
||||
void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *,
|
||||
unsigned long index);
|
||||
typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *);
|
||||
void __radix_tree_replace(struct radix_tree_root *root,
|
||||
struct radix_tree_node *node,
|
||||
void **slot, void *item,
|
||||
void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
|
||||
void __rcu **slot, void *entry,
|
||||
radix_tree_update_node_t update_node, void *private);
|
||||
void radix_tree_iter_replace(struct radix_tree_root *,
|
||||
const struct radix_tree_iter *, void **slot, void *item);
|
||||
void radix_tree_replace_slot(struct radix_tree_root *root,
|
||||
void **slot, void *item);
|
||||
void __radix_tree_delete_node(struct radix_tree_root *root,
|
||||
struct radix_tree_node *node,
|
||||
const struct radix_tree_iter *, void __rcu **slot, void *entry);
|
||||
void radix_tree_replace_slot(struct radix_tree_root *,
|
||||
void __rcu **slot, void *entry);
|
||||
void __radix_tree_delete_node(struct radix_tree_root *,
|
||||
struct radix_tree_node *,
|
||||
radix_tree_update_node_t update_node,
|
||||
void *private);
|
||||
void radix_tree_iter_delete(struct radix_tree_root *,
|
||||
struct radix_tree_iter *iter, void __rcu **slot);
|
||||
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
|
||||
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
|
||||
void radix_tree_clear_tags(struct radix_tree_root *root,
|
||||
struct radix_tree_node *node,
|
||||
void **slot);
|
||||
unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
|
||||
void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *,
|
||||
void __rcu **slot);
|
||||
unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
|
||||
void **results, unsigned long first_index,
|
||||
unsigned int max_items);
|
||||
unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
|
||||
void ***results, unsigned long *indices,
|
||||
unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *,
|
||||
void __rcu ***results, unsigned long *indices,
|
||||
unsigned long first_index, unsigned int max_items);
|
||||
int radix_tree_preload(gfp_t gfp_mask);
|
||||
int radix_tree_maybe_preload(gfp_t gfp_mask);
|
||||
int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
|
||||
void radix_tree_init(void);
|
||||
void *radix_tree_tag_set(struct radix_tree_root *root,
|
||||
void *radix_tree_tag_set(struct radix_tree_root *,
|
||||
unsigned long index, unsigned int tag);
|
||||
void *radix_tree_tag_clear(struct radix_tree_root *root,
|
||||
void *radix_tree_tag_clear(struct radix_tree_root *,
|
||||
unsigned long index, unsigned int tag);
|
||||
int radix_tree_tag_get(struct radix_tree_root *root,
|
||||
int radix_tree_tag_get(const struct radix_tree_root *,
|
||||
unsigned long index, unsigned int tag);
|
||||
void radix_tree_iter_tag_set(struct radix_tree_root *root,
|
||||
void radix_tree_iter_tag_set(struct radix_tree_root *,
|
||||
const struct radix_tree_iter *iter, unsigned int tag);
|
||||
unsigned int
|
||||
radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
|
||||
unsigned long first_index, unsigned int max_items,
|
||||
unsigned int tag);
|
||||
unsigned int
|
||||
radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
|
||||
unsigned long first_index, unsigned int max_items,
|
||||
unsigned int tag);
|
||||
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
|
||||
void radix_tree_iter_tag_clear(struct radix_tree_root *,
|
||||
const struct radix_tree_iter *iter, unsigned int tag);
|
||||
unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *,
|
||||
void **results, unsigned long first_index,
|
||||
unsigned int max_items, unsigned int tag);
|
||||
unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *,
|
||||
void __rcu ***results, unsigned long first_index,
|
||||
unsigned int max_items, unsigned int tag);
|
||||
int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
|
||||
|
||||
static inline void radix_tree_preload_end(void)
|
||||
{
|
||||
@ -352,10 +357,14 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index,
|
||||
unsigned new_order);
|
||||
int radix_tree_join(struct radix_tree_root *, unsigned long index,
|
||||
unsigned new_order, void *);
|
||||
void __rcu **idr_get_free(struct radix_tree_root *, struct radix_tree_iter *,
|
||||
gfp_t, int end);
|
||||
|
||||
#define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */
|
||||
#define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */
|
||||
#define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */
|
||||
enum {
|
||||
RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
|
||||
RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */
|
||||
RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */
|
||||
};
|
||||
|
||||
/**
|
||||
* radix_tree_iter_init - initialize radix tree iterator
|
||||
@ -364,7 +373,7 @@ int radix_tree_join(struct radix_tree_root *, unsigned long index,
|
||||
* @start: iteration starting index
|
||||
* Returns: NULL
|
||||
*/
|
||||
static __always_inline void **
|
||||
static __always_inline void __rcu **
|
||||
radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
|
||||
{
|
||||
/*
|
||||
@ -393,9 +402,45 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
|
||||
* Also it fills @iter with data about chunk: position in the tree (index),
|
||||
* its end (next_index), and constructs a bit mask for tagged iterating (tags).
|
||||
*/
|
||||
void **radix_tree_next_chunk(struct radix_tree_root *root,
|
||||
void __rcu **radix_tree_next_chunk(const struct radix_tree_root *,
|
||||
struct radix_tree_iter *iter, unsigned flags);
|
||||
|
||||
/**
|
||||
* radix_tree_iter_lookup - look up an index in the radix tree
|
||||
* @root: radix tree root
|
||||
* @iter: iterator state
|
||||
* @index: key to look up
|
||||
*
|
||||
* If @index is present in the radix tree, this function returns the slot
|
||||
* containing it and updates @iter to describe the entry. If @index is not
|
||||
* present, it returns NULL.
|
||||
*/
|
||||
static inline void __rcu **
|
||||
radix_tree_iter_lookup(const struct radix_tree_root *root,
|
||||
struct radix_tree_iter *iter, unsigned long index)
|
||||
{
|
||||
radix_tree_iter_init(iter, index);
|
||||
return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG);
|
||||
}
|
||||
|
||||
/**
|
||||
* radix_tree_iter_find - find a present entry
|
||||
* @root: radix tree root
|
||||
* @iter: iterator state
|
||||
* @index: start location
|
||||
*
|
||||
* This function returns the slot containing the entry with the lowest index
|
||||
* which is at least @index. If @index is larger than any present entry, this
|
||||
* function returns NULL. The @iter is updated to describe the entry found.
|
||||
*/
|
||||
static inline void __rcu **
|
||||
radix_tree_iter_find(const struct radix_tree_root *root,
|
||||
struct radix_tree_iter *iter, unsigned long index)
|
||||
{
|
||||
radix_tree_iter_init(iter, index);
|
||||
return radix_tree_next_chunk(root, iter, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* radix_tree_iter_retry - retry this chunk of the iteration
|
||||
* @iter: iterator state
|
||||
@ -406,7 +451,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
|
||||
* and continue the iteration.
|
||||
*/
|
||||
static inline __must_check
|
||||
void **radix_tree_iter_retry(struct radix_tree_iter *iter)
|
||||
void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
|
||||
{
|
||||
iter->next_index = iter->index;
|
||||
iter->tags = 0;
|
||||
@ -429,7 +474,7 @@ __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
|
||||
* have been invalidated by an insertion or deletion. Call this function
|
||||
* before releasing the lock to continue the iteration from the next index.
|
||||
*/
|
||||
void **__must_check radix_tree_iter_resume(void **slot,
|
||||
void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot,
|
||||
struct radix_tree_iter *iter);
|
||||
|
||||
/**
|
||||
@ -445,11 +490,11 @@ radix_tree_chunk_size(struct radix_tree_iter *iter)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RADIX_TREE_MULTIORDER
|
||||
void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter,
|
||||
unsigned flags);
|
||||
void __rcu **__radix_tree_next_slot(void __rcu **slot,
|
||||
struct radix_tree_iter *iter, unsigned flags);
|
||||
#else
|
||||
/* Can't happen without sibling entries, but the compiler can't tell that */
|
||||
static inline void ** __radix_tree_next_slot(void **slot,
|
||||
static inline void __rcu **__radix_tree_next_slot(void __rcu **slot,
|
||||
struct radix_tree_iter *iter, unsigned flags)
|
||||
{
|
||||
return slot;
|
||||
@ -475,8 +520,8 @@ static inline void ** __radix_tree_next_slot(void **slot,
|
||||
* b) we are doing non-tagged iteration, and iter->index and iter->next_index
|
||||
* have been set up so that radix_tree_chunk_size() returns 1 or 0.
|
||||
*/
|
||||
static __always_inline void **
|
||||
radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
|
||||
static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
|
||||
struct radix_tree_iter *iter, unsigned flags)
|
||||
{
|
||||
if (flags & RADIX_TREE_ITER_TAGGED) {
|
||||
iter->tags >>= 1;
|
||||
@ -514,7 +559,7 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
|
||||
return NULL;
|
||||
|
||||
found:
|
||||
if (unlikely(radix_tree_is_internal_node(*slot)))
|
||||
if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot))))
|
||||
return __radix_tree_next_slot(slot, iter, flags);
|
||||
return slot;
|
||||
}
|
||||
|
@ -554,7 +554,7 @@ asmlinkage __visible void __init start_kernel(void)
|
||||
if (WARN(!irqs_disabled(),
|
||||
"Interrupts were enabled *very* early, fixing it\n"))
|
||||
local_irq_disable();
|
||||
idr_init_cache();
|
||||
radix_tree_init();
|
||||
|
||||
/*
|
||||
* Allow workqueue creation and work item queueing/cancelling
|
||||
@ -569,7 +569,6 @@ asmlinkage __visible void __init start_kernel(void)
|
||||
trace_init();
|
||||
|
||||
context_tracking_init();
|
||||
radix_tree_init();
|
||||
/* init some links before init_ISA_irqs() */
|
||||
early_irq_init();
|
||||
init_IRQ();
|
||||
|
@ -25,6 +25,9 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
|
||||
earlycpio.o seq_buf.o siphash.o \
|
||||
nmi_backtrace.o nodemask.o win_minmax.o
|
||||
|
||||
CFLAGS_radix-tree.o += -DCONFIG_SPARSE_RCU_POINTER
|
||||
CFLAGS_idr.o += -DCONFIG_SPARSE_RCU_POINTER
|
||||
|
||||
lib-$(CONFIG_MMU) += ioremap.o
|
||||
lib-$(CONFIG_SMP) += cpumask.o
|
||||
lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o
|
||||
|
763
lib/radix-tree.c
763
lib/radix-tree.c
File diff suppressed because it is too large
Load Diff
@ -355,10 +355,8 @@ void workingset_update_node(struct radix_tree_node *node, void *private)
|
||||
* as node->private_list is protected by &mapping->tree_lock.
|
||||
*/
|
||||
if (node->count && node->count == node->exceptional) {
|
||||
if (list_empty(&node->private_list)) {
|
||||
node->private_data = mapping;
|
||||
if (list_empty(&node->private_list))
|
||||
list_lru_add(&shadow_nodes, &node->private_list);
|
||||
}
|
||||
} else {
|
||||
if (!list_empty(&node->private_list))
|
||||
list_lru_del(&shadow_nodes, &node->private_list);
|
||||
@ -436,7 +434,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
|
||||
*/
|
||||
|
||||
node = container_of(item, struct radix_tree_node, private_list);
|
||||
mapping = node->private_data;
|
||||
mapping = container_of(node->root, struct address_space, page_tree);
|
||||
|
||||
/* Coming from the list, invert the lock order */
|
||||
if (!spin_trylock(&mapping->tree_lock)) {
|
||||
|
@ -462,9 +462,7 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&local->ack_status_lock, flags);
|
||||
skb = idr_find(&local->ack_status_frames, info->ack_frame_id);
|
||||
if (skb)
|
||||
idr_remove(&local->ack_status_frames, info->ack_frame_id);
|
||||
skb = idr_remove(&local->ack_status_frames, info->ack_frame_id);
|
||||
spin_unlock_irqrestore(&local->ack_status_lock, flags);
|
||||
|
||||
if (!skb)
|
||||
|
@ -20,4 +20,7 @@ static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
|
||||
(((unsigned long *)addr)[nr / __BITS_PER_LONG])) != 0;
|
||||
}
|
||||
|
||||
#define __set_bit(nr, addr) set_bit(nr, addr)
|
||||
#define __clear_bit(nr, addr) clear_bit(nr, addr)
|
||||
|
||||
#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_ */
|
||||
|
@ -12,6 +12,14 @@
|
||||
unlikely(__ret_warn_on); \
|
||||
})
|
||||
|
||||
#define WARN_ON(condition) ({ \
|
||||
int __ret_warn_on = !!(condition); \
|
||||
if (unlikely(__ret_warn_on)) \
|
||||
__WARN_printf("assertion failed at %s:%d\n", \
|
||||
__FILE__, __LINE__); \
|
||||
unlikely(__ret_warn_on); \
|
||||
})
|
||||
|
||||
#define WARN_ON_ONCE(condition) ({ \
|
||||
static int __warned; \
|
||||
int __ret_warn_once = !!(condition); \
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <string.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <stdlib.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#define DECLARE_BITMAP(name,bits) \
|
||||
unsigned long name[BITS_TO_LONGS(bits)]
|
||||
|
@ -2,7 +2,6 @@
|
||||
#define _TOOLS_LINUX_BITOPS_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#ifndef __WORDSIZE
|
||||
|
@ -25,6 +25,8 @@
|
||||
#endif
|
||||
|
||||
#define __user
|
||||
#define __rcu
|
||||
#define __read_mostly
|
||||
|
||||
#ifndef __attribute_const__
|
||||
# define __attribute_const__
|
||||
@ -54,6 +56,8 @@
|
||||
# define unlikely(x) __builtin_expect(!!(x), 0)
|
||||
#endif
|
||||
|
||||
#define uninitialized_var(x) x = *(&(x))
|
||||
|
||||
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
|
||||
|
||||
#include <linux/types.h>
|
||||
|
5
tools/include/linux/spinlock.h
Normal file
5
tools/include/linux/spinlock.h
Normal file
@ -0,0 +1,5 @@
|
||||
#define spinlock_t pthread_mutex_t
|
||||
#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
#define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x)
|
||||
#define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x)
|
4
tools/testing/radix-tree/.gitignore
vendored
4
tools/testing/radix-tree/.gitignore
vendored
@ -1,2 +1,6 @@
|
||||
generated/map-shift.h
|
||||
idr.c
|
||||
idr-test
|
||||
main
|
||||
multiorder
|
||||
radix-tree.c
|
||||
|
@ -1,29 +1,47 @@
|
||||
|
||||
CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE
|
||||
CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address
|
||||
LDFLAGS += -lpthread -lurcu
|
||||
TARGETS = main
|
||||
OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \
|
||||
regression1.o regression2.o regression3.o multiorder.o \
|
||||
iteration_check.o benchmark.o
|
||||
TARGETS = main idr-test multiorder
|
||||
CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o
|
||||
OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
|
||||
tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
|
||||
|
||||
ifdef BENCHMARK
|
||||
CFLAGS += -DBENCHMARK=1
|
||||
ifndef SHIFT
|
||||
SHIFT=3
|
||||
endif
|
||||
|
||||
targets: $(TARGETS)
|
||||
targets: mapshift $(TARGETS)
|
||||
|
||||
main: $(OFILES)
|
||||
$(CC) $(CFLAGS) $(LDFLAGS) $(OFILES) -o main
|
||||
$(CC) $(CFLAGS) $(LDFLAGS) $^ -o main
|
||||
|
||||
idr-test: idr-test.o $(CORE_OFILES)
|
||||
$(CC) $(CFLAGS) $(LDFLAGS) $^ -o idr-test
|
||||
|
||||
multiorder: multiorder.o $(CORE_OFILES)
|
||||
$(CC) $(CFLAGS) $(LDFLAGS) $^ -o multiorder
|
||||
|
||||
clean:
|
||||
$(RM) -f $(TARGETS) *.o radix-tree.c
|
||||
$(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h
|
||||
|
||||
find_next_bit.o: ../../lib/find_bit.c
|
||||
$(CC) $(CFLAGS) -c -o $@ $<
|
||||
vpath %.c ../../lib
|
||||
|
||||
$(OFILES): *.h */*.h \
|
||||
$(OFILES): *.h */*.h generated/map-shift.h \
|
||||
../../include/linux/*.h \
|
||||
../../../include/linux/radix-tree.h
|
||||
../../include/asm/*.h \
|
||||
../../../include/linux/radix-tree.h \
|
||||
../../../include/linux/idr.h
|
||||
|
||||
radix-tree.c: ../../../lib/radix-tree.c
|
||||
sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
|
||||
|
||||
idr.c: ../../../lib/idr.c
|
||||
sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
|
||||
|
||||
.PHONY: mapshift
|
||||
|
||||
mapshift:
|
||||
@if ! grep -qw $(SHIFT) generated/map-shift.h; then \
|
||||
echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \
|
||||
generated/map-shift.h; \
|
||||
fi
|
||||
|
@ -71,7 +71,7 @@ static void benchmark_size(unsigned long size, unsigned long step, int order)
|
||||
tagged = benchmark_iter(&tree, true);
|
||||
normal = benchmark_iter(&tree, false);
|
||||
|
||||
printf("Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n",
|
||||
printv(2, "Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n",
|
||||
size, step, order, tagged, normal);
|
||||
|
||||
item_kill_tree(&tree);
|
||||
@ -85,8 +85,8 @@ void benchmark(void)
|
||||
128, 256, 512, 12345, 0};
|
||||
int c, s;
|
||||
|
||||
printf("starting benchmarks\n");
|
||||
printf("RADIX_TREE_MAP_SHIFT = %d\n", RADIX_TREE_MAP_SHIFT);
|
||||
printv(1, "starting benchmarks\n");
|
||||
printv(1, "RADIX_TREE_MAP_SHIFT = %d\n", RADIX_TREE_MAP_SHIFT);
|
||||
|
||||
for (c = 0; size[c]; c++)
|
||||
for (s = 0; step[s]; s++)
|
||||
|
@ -1,3 +1 @@
|
||||
#define CONFIG_RADIX_TREE_MULTIORDER 1
|
||||
#define CONFIG_SHMEM 1
|
||||
#define CONFIG_SWAP 1
|
||||
|
444
tools/testing/radix-tree/idr-test.c
Normal file
444
tools/testing/radix-tree/idr-test.c
Normal file
@ -0,0 +1,444 @@
|
||||
/*
|
||||
* idr-test.c: Test the IDR API
|
||||
* Copyright (c) 2016 Matthew Wilcox <willy@infradead.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
#include "test.h"
|
||||
|
||||
#define DUMMY_PTR ((void *)0x12)
|
||||
|
||||
int item_idr_free(int id, void *p, void *data)
|
||||
{
|
||||
struct item *item = p;
|
||||
assert(item->index == id);
|
||||
free(p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void item_idr_remove(struct idr *idr, int id)
|
||||
{
|
||||
struct item *item = idr_find(idr, id);
|
||||
assert(item->index == id);
|
||||
idr_remove(idr, id);
|
||||
free(item);
|
||||
}
|
||||
|
||||
void idr_alloc_test(void)
|
||||
{
|
||||
unsigned long i;
|
||||
DEFINE_IDR(idr);
|
||||
|
||||
assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0);
|
||||
assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd);
|
||||
idr_remove(&idr, 0x3ffd);
|
||||
idr_remove(&idr, 0);
|
||||
|
||||
for (i = 0x3ffe; i < 0x4003; i++) {
|
||||
int id;
|
||||
struct item *item;
|
||||
|
||||
if (i < 0x4000)
|
||||
item = item_create(i, 0);
|
||||
else
|
||||
item = item_create(i - 0x3fff, 0);
|
||||
|
||||
id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL);
|
||||
assert(id == item->index);
|
||||
}
|
||||
|
||||
idr_for_each(&idr, item_idr_free, &idr);
|
||||
idr_destroy(&idr);
|
||||
}
|
||||
|
||||
void idr_replace_test(void)
|
||||
{
|
||||
DEFINE_IDR(idr);
|
||||
|
||||
idr_alloc(&idr, (void *)-1, 10, 11, GFP_KERNEL);
|
||||
idr_replace(&idr, &idr, 10);
|
||||
|
||||
idr_destroy(&idr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlike the radix tree, you can put a NULL pointer -- with care -- into
|
||||
* the IDR. Some interfaces, like idr_find() do not distinguish between
|
||||
* "present, value is NULL" and "not present", but that's exactly what some
|
||||
* users want.
|
||||
*/
|
||||
void idr_null_test(void)
|
||||
{
|
||||
int i;
|
||||
DEFINE_IDR(idr);
|
||||
|
||||
assert(idr_is_empty(&idr));
|
||||
|
||||
assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
|
||||
assert(!idr_is_empty(&idr));
|
||||
idr_remove(&idr, 0);
|
||||
assert(idr_is_empty(&idr));
|
||||
|
||||
assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
|
||||
assert(!idr_is_empty(&idr));
|
||||
idr_destroy(&idr);
|
||||
assert(idr_is_empty(&idr));
|
||||
|
||||
for (i = 0; i < 10; i++) {
|
||||
assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i);
|
||||
}
|
||||
|
||||
assert(idr_replace(&idr, DUMMY_PTR, 3) == NULL);
|
||||
assert(idr_replace(&idr, DUMMY_PTR, 4) == NULL);
|
||||
assert(idr_replace(&idr, NULL, 4) == DUMMY_PTR);
|
||||
assert(idr_replace(&idr, DUMMY_PTR, 11) == ERR_PTR(-ENOENT));
|
||||
idr_remove(&idr, 5);
|
||||
assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5);
|
||||
idr_remove(&idr, 5);
|
||||
|
||||
for (i = 0; i < 9; i++) {
|
||||
idr_remove(&idr, i);
|
||||
assert(!idr_is_empty(&idr));
|
||||
}
|
||||
idr_remove(&idr, 8);
|
||||
assert(!idr_is_empty(&idr));
|
||||
idr_remove(&idr, 9);
|
||||
assert(idr_is_empty(&idr));
|
||||
|
||||
assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
|
||||
assert(idr_replace(&idr, DUMMY_PTR, 3) == ERR_PTR(-ENOENT));
|
||||
assert(idr_replace(&idr, DUMMY_PTR, 0) == NULL);
|
||||
assert(idr_replace(&idr, NULL, 0) == DUMMY_PTR);
|
||||
|
||||
idr_destroy(&idr);
|
||||
assert(idr_is_empty(&idr));
|
||||
|
||||
for (i = 1; i < 10; i++) {
|
||||
assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i);
|
||||
}
|
||||
|
||||
idr_destroy(&idr);
|
||||
assert(idr_is_empty(&idr));
|
||||
}
|
||||
|
||||
void idr_nowait_test(void)
|
||||
{
|
||||
unsigned int i;
|
||||
DEFINE_IDR(idr);
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
struct item *item = item_create(i, 0);
|
||||
assert(idr_alloc(&idr, item, i, i + 1, GFP_NOWAIT) == i);
|
||||
}
|
||||
|
||||
idr_preload_end();
|
||||
|
||||
idr_for_each(&idr, item_idr_free, &idr);
|
||||
idr_destroy(&idr);
|
||||
}
|
||||
|
||||
void idr_checks(void)
|
||||
{
|
||||
unsigned long i;
|
||||
DEFINE_IDR(idr);
|
||||
|
||||
for (i = 0; i < 10000; i++) {
|
||||
struct item *item = item_create(i, 0);
|
||||
assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i);
|
||||
}
|
||||
|
||||
assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0);
|
||||
|
||||
for (i = 0; i < 5000; i++)
|
||||
item_idr_remove(&idr, i);
|
||||
|
||||
idr_remove(&idr, 3);
|
||||
|
||||
idr_for_each(&idr, item_idr_free, &idr);
|
||||
idr_destroy(&idr);
|
||||
|
||||
assert(idr_is_empty(&idr));
|
||||
|
||||
idr_remove(&idr, 3);
|
||||
idr_remove(&idr, 0);
|
||||
|
||||
for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
|
||||
struct item *item = item_create(i, 0);
|
||||
assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
|
||||
}
|
||||
assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC);
|
||||
|
||||
idr_for_each(&idr, item_idr_free, &idr);
|
||||
idr_destroy(&idr);
|
||||
idr_destroy(&idr);
|
||||
|
||||
assert(idr_is_empty(&idr));
|
||||
|
||||
for (i = 1; i < 10000; i++) {
|
||||
struct item *item = item_create(i, 0);
|
||||
assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i);
|
||||
}
|
||||
|
||||
idr_for_each(&idr, item_idr_free, &idr);
|
||||
idr_destroy(&idr);
|
||||
|
||||
idr_replace_test();
|
||||
idr_alloc_test();
|
||||
idr_null_test();
|
||||
idr_nowait_test();
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that we get the correct error when we run out of memory doing
|
||||
* allocations. To ensure we run out of memory, just "forget" to preload.
|
||||
* The first test is for not having a bitmap available, and the second test
|
||||
* is for not being able to allocate a level of the radix tree.
|
||||
*/
|
||||
void ida_check_nomem(void)
|
||||
{
|
||||
DEFINE_IDA(ida);
|
||||
int id, err;
|
||||
|
||||
err = ida_get_new_above(&ida, 256, &id);
|
||||
assert(err == -EAGAIN);
|
||||
err = ida_get_new_above(&ida, 1UL << 30, &id);
|
||||
assert(err == -EAGAIN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check what happens when we fill a leaf and then delete it. This may
|
||||
* discover mishandling of IDR_FREE.
|
||||
*/
|
||||
void ida_check_leaf(void)
|
||||
{
|
||||
DEFINE_IDA(ida);
|
||||
int id;
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < IDA_BITMAP_BITS; i++) {
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
assert(!ida_get_new(&ida, &id));
|
||||
assert(id == i);
|
||||
}
|
||||
|
||||
ida_destroy(&ida);
|
||||
assert(ida_is_empty(&ida));
|
||||
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
assert(!ida_get_new(&ida, &id));
|
||||
assert(id == 0);
|
||||
ida_destroy(&ida);
|
||||
assert(ida_is_empty(&ida));
|
||||
}
|
||||
|
||||
/*
|
||||
* Check handling of conversions between exceptional entries and full bitmaps.
|
||||
*/
|
||||
void ida_check_conv(void)
|
||||
{
|
||||
DEFINE_IDA(ida);
|
||||
int id;
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) {
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
assert(!ida_get_new_above(&ida, i + 1, &id));
|
||||
assert(id == i + 1);
|
||||
assert(!ida_get_new_above(&ida, i + BITS_PER_LONG, &id));
|
||||
assert(id == i + BITS_PER_LONG);
|
||||
ida_remove(&ida, i + 1);
|
||||
ida_remove(&ida, i + BITS_PER_LONG);
|
||||
assert(ida_is_empty(&ida));
|
||||
}
|
||||
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
|
||||
for (i = 0; i < IDA_BITMAP_BITS * 2; i++) {
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
assert(!ida_get_new(&ida, &id));
|
||||
assert(id == i);
|
||||
}
|
||||
|
||||
for (i = IDA_BITMAP_BITS * 2; i > 0; i--) {
|
||||
ida_remove(&ida, i - 1);
|
||||
}
|
||||
assert(ida_is_empty(&ida));
|
||||
|
||||
for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++) {
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
assert(!ida_get_new(&ida, &id));
|
||||
assert(id == i);
|
||||
}
|
||||
|
||||
for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--) {
|
||||
ida_remove(&ida, i - 1);
|
||||
}
|
||||
assert(ida_is_empty(&ida));
|
||||
|
||||
radix_tree_cpu_dead(1);
|
||||
for (i = 0; i < 1000000; i++) {
|
||||
int err = ida_get_new(&ida, &id);
|
||||
if (err == -EAGAIN) {
|
||||
assert((i % IDA_BITMAP_BITS) == (BITS_PER_LONG - 2));
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
err = ida_get_new(&ida, &id);
|
||||
} else {
|
||||
assert((i % IDA_BITMAP_BITS) != (BITS_PER_LONG - 2));
|
||||
}
|
||||
assert(!err);
|
||||
assert(id == i);
|
||||
}
|
||||
ida_destroy(&ida);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check allocations up to and slightly above the maximum allowed (2^31-1) ID.
|
||||
* Allocating up to 2^31-1 should succeed, and then allocating the next one
|
||||
* should fail.
|
||||
*/
|
||||
void ida_check_max(void)
|
||||
{
|
||||
DEFINE_IDA(ida);
|
||||
int id, err;
|
||||
unsigned long i, j;
|
||||
|
||||
for (j = 1; j < 65537; j *= 2) {
|
||||
unsigned long base = (1UL << 31) - j;
|
||||
for (i = 0; i < j; i++) {
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
assert(!ida_get_new_above(&ida, base, &id));
|
||||
assert(id == base + i);
|
||||
}
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
err = ida_get_new_above(&ida, base, &id);
|
||||
assert(err == -ENOSPC);
|
||||
ida_destroy(&ida);
|
||||
assert(ida_is_empty(&ida));
|
||||
rcu_barrier();
|
||||
}
|
||||
}
|
||||
|
||||
void ida_check_random(void)
|
||||
{
|
||||
DEFINE_IDA(ida);
|
||||
DECLARE_BITMAP(bitmap, 2048);
|
||||
int id;
|
||||
unsigned int i;
|
||||
time_t s = time(NULL);
|
||||
|
||||
repeat:
|
||||
memset(bitmap, 0, sizeof(bitmap));
|
||||
for (i = 0; i < 100000; i++) {
|
||||
int i = rand();
|
||||
int bit = i & 2047;
|
||||
if (test_bit(bit, bitmap)) {
|
||||
__clear_bit(bit, bitmap);
|
||||
ida_remove(&ida, bit);
|
||||
} else {
|
||||
__set_bit(bit, bitmap);
|
||||
ida_pre_get(&ida, GFP_KERNEL);
|
||||
assert(!ida_get_new_above(&ida, bit, &id));
|
||||
assert(id == bit);
|
||||
}
|
||||
}
|
||||
ida_destroy(&ida);
|
||||
if (time(NULL) < s + 10)
|
||||
goto repeat;
|
||||
}
|
||||
|
||||
void ida_checks(void)
|
||||
{
|
||||
DEFINE_IDA(ida);
|
||||
int id;
|
||||
unsigned long i;
|
||||
|
||||
radix_tree_cpu_dead(1);
|
||||
ida_check_nomem();
|
||||
|
||||
for (i = 0; i < 10000; i++) {
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
assert(!ida_get_new(&ida, &id));
|
||||
assert(id == i);
|
||||
}
|
||||
|
||||
ida_remove(&ida, 20);
|
||||
ida_remove(&ida, 21);
|
||||
for (i = 0; i < 3; i++) {
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
assert(!ida_get_new(&ida, &id));
|
||||
if (i == 2)
|
||||
assert(id == 10000);
|
||||
}
|
||||
|
||||
for (i = 0; i < 5000; i++)
|
||||
ida_remove(&ida, i);
|
||||
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
assert(!ida_get_new_above(&ida, 5000, &id));
|
||||
assert(id == 10001);
|
||||
|
||||
ida_destroy(&ida);
|
||||
|
||||
assert(ida_is_empty(&ida));
|
||||
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
assert(!ida_get_new_above(&ida, 1, &id));
|
||||
assert(id == 1);
|
||||
|
||||
ida_remove(&ida, id);
|
||||
assert(ida_is_empty(&ida));
|
||||
ida_destroy(&ida);
|
||||
assert(ida_is_empty(&ida));
|
||||
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
assert(!ida_get_new_above(&ida, 1, &id));
|
||||
ida_destroy(&ida);
|
||||
assert(ida_is_empty(&ida));
|
||||
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
assert(!ida_get_new_above(&ida, 1, &id));
|
||||
assert(id == 1);
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
assert(!ida_get_new_above(&ida, 1025, &id));
|
||||
assert(id == 1025);
|
||||
assert(ida_pre_get(&ida, GFP_KERNEL));
|
||||
assert(!ida_get_new_above(&ida, 10000, &id));
|
||||
assert(id == 10000);
|
||||
ida_remove(&ida, 1025);
|
||||
ida_destroy(&ida);
|
||||
assert(ida_is_empty(&ida));
|
||||
|
||||
ida_check_leaf();
|
||||
ida_check_max();
|
||||
ida_check_conv();
|
||||
ida_check_random();
|
||||
|
||||
radix_tree_cpu_dead(1);
|
||||
}
|
||||
|
||||
int __weak main(void)
|
||||
{
|
||||
radix_tree_init();
|
||||
idr_checks();
|
||||
ida_checks();
|
||||
rcu_barrier();
|
||||
if (nr_allocated)
|
||||
printf("nr_allocated = %d\n", nr_allocated);
|
||||
return 0;
|
||||
}
|
@ -177,7 +177,7 @@ void iteration_test(unsigned order, unsigned test_duration)
|
||||
{
|
||||
int i;
|
||||
|
||||
printf("Running %siteration tests for %d seconds\n",
|
||||
printv(1, "Running %siteration tests for %d seconds\n",
|
||||
order > 0 ? "multiorder " : "", test_duration);
|
||||
|
||||
max_order = order;
|
||||
|
@ -5,7 +5,7 @@
|
||||
#include <unistd.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/poison.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/radix-tree.h>
|
||||
@ -13,6 +13,8 @@
|
||||
|
||||
int nr_allocated;
|
||||
int preempt_count;
|
||||
int kmalloc_verbose;
|
||||
int test_verbose;
|
||||
|
||||
struct kmem_cache {
|
||||
pthread_mutex_t lock;
|
||||
@ -22,27 +24,6 @@ struct kmem_cache {
|
||||
void (*ctor)(void *);
|
||||
};
|
||||
|
||||
void *mempool_alloc(mempool_t *pool, int gfp_mask)
|
||||
{
|
||||
return pool->alloc(gfp_mask, pool->data);
|
||||
}
|
||||
|
||||
void mempool_free(void *element, mempool_t *pool)
|
||||
{
|
||||
pool->free(element, pool->data);
|
||||
}
|
||||
|
||||
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
|
||||
mempool_free_t *free_fn, void *pool_data)
|
||||
{
|
||||
mempool_t *ret = malloc(sizeof(*ret));
|
||||
|
||||
ret->alloc = alloc_fn;
|
||||
ret->free = free_fn;
|
||||
ret->data = pool_data;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
|
||||
{
|
||||
struct radix_tree_node *node;
|
||||
@ -54,9 +35,9 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
|
||||
if (cachep->nr_objs) {
|
||||
cachep->nr_objs--;
|
||||
node = cachep->objs;
|
||||
cachep->objs = node->private_data;
|
||||
cachep->objs = node->parent;
|
||||
pthread_mutex_unlock(&cachep->lock);
|
||||
node->private_data = NULL;
|
||||
node->parent = NULL;
|
||||
} else {
|
||||
pthread_mutex_unlock(&cachep->lock);
|
||||
node = malloc(cachep->size);
|
||||
@ -65,6 +46,8 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
|
||||
}
|
||||
|
||||
uatomic_inc(&nr_allocated);
|
||||
if (kmalloc_verbose)
|
||||
printf("Allocating %p from slab\n", node);
|
||||
return node;
|
||||
}
|
||||
|
||||
@ -72,6 +55,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
|
||||
{
|
||||
assert(objp);
|
||||
uatomic_dec(&nr_allocated);
|
||||
if (kmalloc_verbose)
|
||||
printf("Freeing %p to slab\n", objp);
|
||||
pthread_mutex_lock(&cachep->lock);
|
||||
if (cachep->nr_objs > 10) {
|
||||
memset(objp, POISON_FREE, cachep->size);
|
||||
@ -79,7 +64,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
|
||||
} else {
|
||||
struct radix_tree_node *node = objp;
|
||||
cachep->nr_objs++;
|
||||
node->private_data = cachep->objs;
|
||||
node->parent = cachep->objs;
|
||||
cachep->objs = node;
|
||||
}
|
||||
pthread_mutex_unlock(&cachep->lock);
|
||||
@ -89,6 +74,8 @@ void *kmalloc(size_t size, gfp_t gfp)
|
||||
{
|
||||
void *ret = malloc(size);
|
||||
uatomic_inc(&nr_allocated);
|
||||
if (kmalloc_verbose)
|
||||
printf("Allocating %p from malloc\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -97,6 +84,8 @@ void kfree(void *p)
|
||||
if (!p)
|
||||
return;
|
||||
uatomic_dec(&nr_allocated);
|
||||
if (kmalloc_verbose)
|
||||
printf("Freeing %p to malloc\n", p);
|
||||
free(p);
|
||||
}
|
||||
|
||||
|
@ -1,160 +0,0 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
|
||||
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bitops/find.h>
|
||||
#include <linux/bitops/hweight.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
|
||||
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
#define BITS_PER_BYTE 8
|
||||
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
|
||||
|
||||
/**
|
||||
* __set_bit - Set a bit in memory
|
||||
* @nr: the bit to set
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* Unlike set_bit(), this function is non-atomic and may be reordered.
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static inline void __set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
||||
*p |= mask;
|
||||
}
|
||||
|
||||
static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
||||
*p &= ~mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* __change_bit - Toggle a bit in memory
|
||||
* @nr: the bit to change
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* Unlike change_bit(), this function is non-atomic and may be reordered.
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static inline void __change_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
||||
*p ^= mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* __test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old | mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old & ~mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/* WARNING: non atomic and it can be reordered! */
|
||||
static inline int __test_and_change_bit(int nr,
|
||||
volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old ^ mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* test_bit - Determine whether a bit is set
|
||||
* @nr: bit number to test
|
||||
* @addr: Address to start counting from
|
||||
*/
|
||||
static inline int test_bit(int nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
|
||||
}
|
||||
|
||||
/**
|
||||
* __ffs - find first bit in word.
|
||||
* @word: The word to search
|
||||
*
|
||||
* Undefined if no bit exists, so code should check against 0 first.
|
||||
*/
|
||||
static inline unsigned long __ffs(unsigned long word)
|
||||
{
|
||||
int num = 0;
|
||||
|
||||
if ((word & 0xffffffff) == 0) {
|
||||
num += 32;
|
||||
word >>= 32;
|
||||
}
|
||||
if ((word & 0xffff) == 0) {
|
||||
num += 16;
|
||||
word >>= 16;
|
||||
}
|
||||
if ((word & 0xff) == 0) {
|
||||
num += 8;
|
||||
word >>= 8;
|
||||
}
|
||||
if ((word & 0xf) == 0) {
|
||||
num += 4;
|
||||
word >>= 4;
|
||||
}
|
||||
if ((word & 0x3) == 0) {
|
||||
num += 2;
|
||||
word >>= 2;
|
||||
}
|
||||
if ((word & 0x1) == 0)
|
||||
num += 1;
|
||||
return num;
|
||||
}
|
||||
|
||||
unsigned long find_next_bit(const unsigned long *addr,
|
||||
unsigned long size,
|
||||
unsigned long offset);
|
||||
|
||||
static inline unsigned long hweight_long(unsigned long w)
|
||||
{
|
||||
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
|
||||
}
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
|
@ -1,43 +0,0 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS___FFS_H_
|
||||
#define _ASM_GENERIC_BITOPS___FFS_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
|
||||
/**
|
||||
* __ffs - find first bit in word.
|
||||
* @word: The word to search
|
||||
*
|
||||
* Undefined if no bit exists, so code should check against 0 first.
|
||||
*/
|
||||
static inline unsigned long __ffs(unsigned long word)
|
||||
{
|
||||
int num = 0;
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
if ((word & 0xffffffff) == 0) {
|
||||
num += 32;
|
||||
word >>= 32;
|
||||
}
|
||||
#endif
|
||||
if ((word & 0xffff) == 0) {
|
||||
num += 16;
|
||||
word >>= 16;
|
||||
}
|
||||
if ((word & 0xff) == 0) {
|
||||
num += 8;
|
||||
word >>= 8;
|
||||
}
|
||||
if ((word & 0xf) == 0) {
|
||||
num += 4;
|
||||
word >>= 4;
|
||||
}
|
||||
if ((word & 0x3) == 0) {
|
||||
num += 2;
|
||||
word >>= 2;
|
||||
}
|
||||
if ((word & 0x1) == 0)
|
||||
num += 1;
|
||||
return num;
|
||||
}
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */
|
@ -1,41 +0,0 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS_FFS_H_
|
||||
#define _ASM_GENERIC_BITOPS_FFS_H_
|
||||
|
||||
/**
|
||||
* ffs - find first bit set
|
||||
* @x: the word to search
|
||||
*
|
||||
* This is defined the same way as
|
||||
* the libc and compiler builtin ffs routines, therefore
|
||||
* differs in spirit from the above ffz (man ffs).
|
||||
*/
|
||||
static inline int ffs(int x)
|
||||
{
|
||||
int r = 1;
|
||||
|
||||
if (!x)
|
||||
return 0;
|
||||
if (!(x & 0xffff)) {
|
||||
x >>= 16;
|
||||
r += 16;
|
||||
}
|
||||
if (!(x & 0xff)) {
|
||||
x >>= 8;
|
||||
r += 8;
|
||||
}
|
||||
if (!(x & 0xf)) {
|
||||
x >>= 4;
|
||||
r += 4;
|
||||
}
|
||||
if (!(x & 3)) {
|
||||
x >>= 2;
|
||||
r += 2;
|
||||
}
|
||||
if (!(x & 1)) {
|
||||
x >>= 1;
|
||||
r += 1;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */
|
@ -1,12 +0,0 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
|
||||
#define _ASM_GENERIC_BITOPS_FFZ_H_
|
||||
|
||||
/*
|
||||
* ffz - find first zero in word.
|
||||
* @word: The word to search
|
||||
*
|
||||
* Undefined if no zero exists, so code should check against ~0UL first.
|
||||
*/
|
||||
#define ffz(x) __ffs(~(x))
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
|
@ -1,13 +0,0 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS_FIND_H_
|
||||
#define _ASM_GENERIC_BITOPS_FIND_H_
|
||||
|
||||
extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
|
||||
size, unsigned long offset);
|
||||
|
||||
extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
|
||||
long size, unsigned long offset);
|
||||
|
||||
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
|
||||
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
|
||||
|
||||
#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
|
@ -1,41 +0,0 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS_FLS_H_
|
||||
#define _ASM_GENERIC_BITOPS_FLS_H_
|
||||
|
||||
/**
|
||||
* fls - find last (most-significant) bit set
|
||||
* @x: the word to search
|
||||
*
|
||||
* This is defined the same way as ffs.
|
||||
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
|
||||
*/
|
||||
|
||||
static inline int fls(int x)
|
||||
{
|
||||
int r = 32;
|
||||
|
||||
if (!x)
|
||||
return 0;
|
||||
if (!(x & 0xffff0000u)) {
|
||||
x <<= 16;
|
||||
r -= 16;
|
||||
}
|
||||
if (!(x & 0xff000000u)) {
|
||||
x <<= 8;
|
||||
r -= 8;
|
||||
}
|
||||
if (!(x & 0xf0000000u)) {
|
||||
x <<= 4;
|
||||
r -= 4;
|
||||
}
|
||||
if (!(x & 0xc0000000u)) {
|
||||
x <<= 2;
|
||||
r -= 2;
|
||||
}
|
||||
if (!(x & 0x80000000u)) {
|
||||
x <<= 1;
|
||||
r -= 1;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
|
@ -1,14 +0,0 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
|
||||
#define _ASM_GENERIC_BITOPS_FLS64_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
|
||||
static inline int fls64(__u64 x)
|
||||
{
|
||||
__u32 h = x >> 32;
|
||||
if (h)
|
||||
return fls(h) + 32;
|
||||
return fls(x);
|
||||
}
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
|
@ -1,11 +0,0 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
|
||||
#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
|
||||
extern unsigned int hweight32(unsigned int w);
|
||||
extern unsigned int hweight16(unsigned int w);
|
||||
extern unsigned int hweight8(unsigned int w);
|
||||
extern unsigned long hweight64(__u64 w);
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
|
@ -1,53 +0,0 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS_LE_H_
|
||||
#define _ASM_GENERIC_BITOPS_LE_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
|
||||
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
|
||||
#define generic_test_le_bit(nr, addr) test_bit(nr, addr)
|
||||
#define generic___set_le_bit(nr, addr) __set_bit(nr, addr)
|
||||
#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr)
|
||||
|
||||
#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr)
|
||||
#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr)
|
||||
|
||||
#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr)
|
||||
#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr)
|
||||
|
||||
#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset)
|
||||
|
||||
#elif defined(__BIG_ENDIAN)
|
||||
|
||||
#define generic_test_le_bit(nr, addr) \
|
||||
test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
|
||||
#define generic___set_le_bit(nr, addr) \
|
||||
__set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
|
||||
#define generic___clear_le_bit(nr, addr) \
|
||||
__clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
|
||||
|
||||
#define generic_test_and_set_le_bit(nr, addr) \
|
||||
test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
|
||||
#define generic_test_and_clear_le_bit(nr, addr) \
|
||||
test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
|
||||
|
||||
#define generic___test_and_set_le_bit(nr, addr) \
|
||||
__test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
|
||||
#define generic___test_and_clear_le_bit(nr, addr) \
|
||||
__test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
|
||||
|
||||
extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
|
||||
unsigned long size, unsigned long offset);
|
||||
|
||||
#else
|
||||
#error "Please fix <asm/byteorder.h>"
|
||||
#endif
|
||||
|
||||
#define generic_find_first_zero_le_bit(addr, size) \
|
||||
generic_find_next_zero_le_bit((addr), (size), 0)
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
|
@ -1,110 +0,0 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
|
||||
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
|
||||
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
|
||||
/**
|
||||
* __set_bit - Set a bit in memory
|
||||
* @nr: the bit to set
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* Unlike set_bit(), this function is non-atomic and may be reordered.
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static inline void __set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
|
||||
|
||||
*p |= mask;
|
||||
}
|
||||
|
||||
static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
|
||||
|
||||
*p &= ~mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* __change_bit - Toggle a bit in memory
|
||||
* @nr: the bit to change
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* Unlike change_bit(), this function is non-atomic and may be reordered.
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static inline void __change_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
|
||||
|
||||
*p ^= mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* __test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old | mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old & ~mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/* WARNING: non atomic and it can be reordered! */
|
||||
static inline int __test_and_change_bit(int nr,
|
||||
volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old ^ mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* test_bit - Determine whether a bit is set
|
||||
* @nr: bit number to test
|
||||
* @addr: Address to start counting from
|
||||
*/
|
||||
static inline int test_bit(int nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
|
||||
}
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
|
@ -1,2 +0,0 @@
|
||||
|
||||
#define EXPORT_SYMBOL(sym)
|
@ -1,6 +1,8 @@
|
||||
#ifndef _GFP_H
|
||||
#define _GFP_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define __GFP_BITS_SHIFT 26
|
||||
#define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
||||
|
||||
@ -13,10 +15,12 @@
|
||||
#define __GFP_DIRECT_RECLAIM 0x400000u
|
||||
#define __GFP_KSWAPD_RECLAIM 0x2000000u
|
||||
|
||||
#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM|__GFP_KSWAPD_RECLAIM)
|
||||
#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM|__GFP_KSWAPD_RECLAIM)
|
||||
|
||||
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
|
||||
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
|
||||
#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
|
||||
|
||||
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
|
||||
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
|
||||
|
||||
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
|
||||
{
|
||||
|
1
tools/testing/radix-tree/linux/idr.h
Normal file
1
tools/testing/radix-tree/linux/idr.h
Normal file
@ -0,0 +1 @@
|
||||
#include "../../../../include/linux/idr.h"
|
@ -1 +1 @@
|
||||
/* An empty file stub that allows radix-tree.c to compile. */
|
||||
#define __init
|
||||
|
@ -1,64 +1,21 @@
|
||||
#ifndef _KERNEL_H
|
||||
#define _KERNEL_H
|
||||
|
||||
#include <assert.h>
|
||||
#include "../../include/linux/kernel.h"
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <stddef.h>
|
||||
#include <limits.h>
|
||||
|
||||
#include "../../include/linux/compiler.h"
|
||||
#include "../../include/linux/err.h"
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/log2.h>
|
||||
#include "../../../include/linux/kconfig.h"
|
||||
|
||||
#ifdef BENCHMARK
|
||||
#define RADIX_TREE_MAP_SHIFT 6
|
||||
#else
|
||||
#define RADIX_TREE_MAP_SHIFT 3
|
||||
#endif
|
||||
|
||||
#ifndef NULL
|
||||
#define NULL 0
|
||||
#endif
|
||||
|
||||
#define BUG_ON(expr) assert(!(expr))
|
||||
#define WARN_ON(expr) assert(!(expr))
|
||||
#define __init
|
||||
#define __must_check
|
||||
#define panic(expr)
|
||||
#define printk printf
|
||||
#define __force
|
||||
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
|
||||
#define pr_debug printk
|
||||
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
#define cpu_relax() barrier()
|
||||
#define pr_cont printk
|
||||
|
||||
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
|
||||
|
||||
#define container_of(ptr, type, member) ({ \
|
||||
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
|
||||
(type *)( (char *)__mptr - offsetof(type, member) );})
|
||||
#define min(a, b) ((a) < (b) ? (a) : (b))
|
||||
|
||||
#define cond_resched() sched_yield()
|
||||
|
||||
static inline int in_interrupt(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This looks more complex than it should be. But we need to
|
||||
* get the type for the ~ right in round_down (it needs to be
|
||||
* as wide as the result!), and we want to evaluate the macro
|
||||
* arguments just once each.
|
||||
*/
|
||||
#define __round_mask(x, y) ((__typeof__(x))((y)-1))
|
||||
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
|
||||
#define round_down(x, y) ((x) & ~__round_mask(x, y))
|
||||
|
||||
#define xchg(ptr, x) uatomic_xchg(ptr, x)
|
||||
|
||||
#endif /* _KERNEL_H */
|
||||
|
@ -1,16 +0,0 @@
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
typedef void *(mempool_alloc_t)(int gfp_mask, void *pool_data);
|
||||
typedef void (mempool_free_t)(void *element, void *pool_data);
|
||||
|
||||
typedef struct {
|
||||
mempool_alloc_t *alloc;
|
||||
mempool_free_t *free;
|
||||
void *data;
|
||||
} mempool_t;
|
||||
|
||||
void *mempool_alloc(mempool_t *pool, int gfp_mask);
|
||||
void mempool_free(void *element, mempool_t *pool);
|
||||
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
|
||||
mempool_free_t *free_fn, void *pool_data);
|
@ -1,7 +1,10 @@
|
||||
|
||||
#define DECLARE_PER_CPU(type, val) extern type val
|
||||
#define DEFINE_PER_CPU(type, val) type val
|
||||
|
||||
#define __get_cpu_var(var) var
|
||||
#define this_cpu_ptr(var) var
|
||||
#define this_cpu_read(var) var
|
||||
#define this_cpu_xchg(var, val) uatomic_xchg(&var, val)
|
||||
#define this_cpu_cmpxchg(var, old, new) uatomic_cmpxchg(&var, old, new)
|
||||
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
||||
#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
|
||||
|
@ -1,4 +1,14 @@
|
||||
#ifndef __LINUX_PREEMPT_H
|
||||
#define __LINUX_PREEMPT_H
|
||||
|
||||
extern int preempt_count;
|
||||
|
||||
#define preempt_disable() uatomic_inc(&preempt_count)
|
||||
#define preempt_enable() uatomic_dec(&preempt_count)
|
||||
|
||||
static inline int in_interrupt(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_PREEMPT_H */
|
||||
|
@ -1 +1,26 @@
|
||||
#ifndef _TEST_RADIX_TREE_H
|
||||
#define _TEST_RADIX_TREE_H
|
||||
|
||||
#include "generated/map-shift.h"
|
||||
#include "../../../../include/linux/radix-tree.h"
|
||||
|
||||
extern int kmalloc_verbose;
|
||||
extern int test_verbose;
|
||||
|
||||
static inline void trace_call_rcu(struct rcu_head *head,
|
||||
void (*func)(struct rcu_head *head))
|
||||
{
|
||||
if (kmalloc_verbose)
|
||||
printf("Delaying free of %p to slab\n", (char *)head -
|
||||
offsetof(struct radix_tree_node, rcu_head));
|
||||
call_rcu(head, func);
|
||||
}
|
||||
|
||||
#define printv(verbosity_level, fmt, ...) \
|
||||
if(test_verbose >= verbosity_level) \
|
||||
printf(fmt, ##__VA_ARGS__)
|
||||
|
||||
#undef call_rcu
|
||||
#define call_rcu(x, y) trace_call_rcu(x, y)
|
||||
|
||||
#endif /* _TEST_RADIX_TREE_H */
|
||||
|
@ -1,23 +0,0 @@
|
||||
#ifndef _TYPES_H
|
||||
#define _TYPES_H
|
||||
|
||||
#include "../../include/linux/types.h"
|
||||
|
||||
#define __rcu
|
||||
#define __read_mostly
|
||||
|
||||
static inline void INIT_LIST_HEAD(struct list_head *list)
|
||||
{
|
||||
list->next = list;
|
||||
list->prev = list;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
unsigned int x;
|
||||
} spinlock_t;
|
||||
|
||||
#define uninitialized_var(x) x = x
|
||||
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#endif
|
@ -3,6 +3,7 @@
|
||||
#include <unistd.h>
|
||||
#include <time.h>
|
||||
#include <assert.h>
|
||||
#include <limits.h>
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/radix-tree.h>
|
||||
@ -67,7 +68,7 @@ void big_gang_check(bool long_run)
|
||||
|
||||
for (i = 0; i < (long_run ? 1000 : 3); i++) {
|
||||
__big_gang_check();
|
||||
printf("%d ", i);
|
||||
printv(2, "%d ", i);
|
||||
fflush(stdout);
|
||||
}
|
||||
}
|
||||
@ -128,14 +129,19 @@ void check_copied_tags(struct radix_tree_root *tree, unsigned long start, unsign
|
||||
putchar('.'); */
|
||||
if (idx[i] < start || idx[i] > end) {
|
||||
if (item_tag_get(tree, idx[i], totag)) {
|
||||
printf("%lu-%lu: %lu, tags %d-%d\n", start, end, idx[i], item_tag_get(tree, idx[i], fromtag), item_tag_get(tree, idx[i], totag));
|
||||
printv(2, "%lu-%lu: %lu, tags %d-%d\n", start,
|
||||
end, idx[i], item_tag_get(tree, idx[i],
|
||||
fromtag),
|
||||
item_tag_get(tree, idx[i], totag));
|
||||
}
|
||||
assert(!item_tag_get(tree, idx[i], totag));
|
||||
continue;
|
||||
}
|
||||
if (item_tag_get(tree, idx[i], fromtag) ^
|
||||
item_tag_get(tree, idx[i], totag)) {
|
||||
printf("%lu-%lu: %lu, tags %d-%d\n", start, end, idx[i], item_tag_get(tree, idx[i], fromtag), item_tag_get(tree, idx[i], totag));
|
||||
printv(2, "%lu-%lu: %lu, tags %d-%d\n", start, end,
|
||||
idx[i], item_tag_get(tree, idx[i], fromtag),
|
||||
item_tag_get(tree, idx[i], totag));
|
||||
}
|
||||
assert(!(item_tag_get(tree, idx[i], fromtag) ^
|
||||
item_tag_get(tree, idx[i], totag)));
|
||||
@ -237,7 +243,7 @@ static void __locate_check(struct radix_tree_root *tree, unsigned long index,
|
||||
item = item_lookup(tree, index);
|
||||
index2 = find_item(tree, item);
|
||||
if (index != index2) {
|
||||
printf("index %ld order %d inserted; found %ld\n",
|
||||
printv(2, "index %ld order %d inserted; found %ld\n",
|
||||
index, order, index2);
|
||||
abort();
|
||||
}
|
||||
@ -288,43 +294,48 @@ static void single_thread_tests(bool long_run)
|
||||
{
|
||||
int i;
|
||||
|
||||
printf("starting single_thread_tests: %d allocated, preempt %d\n",
|
||||
printv(1, "starting single_thread_tests: %d allocated, preempt %d\n",
|
||||
nr_allocated, preempt_count);
|
||||
multiorder_checks();
|
||||
rcu_barrier();
|
||||
printf("after multiorder_check: %d allocated, preempt %d\n",
|
||||
printv(2, "after multiorder_check: %d allocated, preempt %d\n",
|
||||
nr_allocated, preempt_count);
|
||||
locate_check();
|
||||
rcu_barrier();
|
||||
printf("after locate_check: %d allocated, preempt %d\n",
|
||||
printv(2, "after locate_check: %d allocated, preempt %d\n",
|
||||
nr_allocated, preempt_count);
|
||||
tag_check();
|
||||
rcu_barrier();
|
||||
printf("after tag_check: %d allocated, preempt %d\n",
|
||||
printv(2, "after tag_check: %d allocated, preempt %d\n",
|
||||
nr_allocated, preempt_count);
|
||||
gang_check();
|
||||
rcu_barrier();
|
||||
printf("after gang_check: %d allocated, preempt %d\n",
|
||||
printv(2, "after gang_check: %d allocated, preempt %d\n",
|
||||
nr_allocated, preempt_count);
|
||||
add_and_check();
|
||||
rcu_barrier();
|
||||
printf("after add_and_check: %d allocated, preempt %d\n",
|
||||
printv(2, "after add_and_check: %d allocated, preempt %d\n",
|
||||
nr_allocated, preempt_count);
|
||||
dynamic_height_check();
|
||||
rcu_barrier();
|
||||
printf("after dynamic_height_check: %d allocated, preempt %d\n",
|
||||
printv(2, "after dynamic_height_check: %d allocated, preempt %d\n",
|
||||
nr_allocated, preempt_count);
|
||||
idr_checks();
|
||||
ida_checks();
|
||||
rcu_barrier();
|
||||
printv(2, "after idr_checks: %d allocated, preempt %d\n",
|
||||
nr_allocated, preempt_count);
|
||||
big_gang_check(long_run);
|
||||
rcu_barrier();
|
||||
printf("after big_gang_check: %d allocated, preempt %d\n",
|
||||
printv(2, "after big_gang_check: %d allocated, preempt %d\n",
|
||||
nr_allocated, preempt_count);
|
||||
for (i = 0; i < (long_run ? 2000 : 3); i++) {
|
||||
copy_tag_check();
|
||||
printf("%d ", i);
|
||||
printv(2, "%d ", i);
|
||||
fflush(stdout);
|
||||
}
|
||||
rcu_barrier();
|
||||
printf("after copy_tag_check: %d allocated, preempt %d\n",
|
||||
printv(2, "after copy_tag_check: %d allocated, preempt %d\n",
|
||||
nr_allocated, preempt_count);
|
||||
}
|
||||
|
||||
@ -334,24 +345,28 @@ int main(int argc, char **argv)
|
||||
int opt;
|
||||
unsigned int seed = time(NULL);
|
||||
|
||||
while ((opt = getopt(argc, argv, "ls:")) != -1) {
|
||||
while ((opt = getopt(argc, argv, "ls:v")) != -1) {
|
||||
if (opt == 'l')
|
||||
long_run = true;
|
||||
else if (opt == 's')
|
||||
seed = strtoul(optarg, NULL, 0);
|
||||
else if (opt == 'v')
|
||||
test_verbose++;
|
||||
}
|
||||
|
||||
printf("random seed %u\n", seed);
|
||||
srand(seed);
|
||||
|
||||
printf("running tests\n");
|
||||
|
||||
rcu_register_thread();
|
||||
radix_tree_init();
|
||||
|
||||
regression1_test();
|
||||
regression2_test();
|
||||
regression3_test();
|
||||
iteration_test(0, 10);
|
||||
iteration_test(7, 20);
|
||||
iteration_test(0, 10 + 90 * long_run);
|
||||
iteration_test(7, 10 + 90 * long_run);
|
||||
single_thread_tests(long_run);
|
||||
|
||||
/* Free any remaining preallocated nodes */
|
||||
@ -360,9 +375,11 @@ int main(int argc, char **argv)
|
||||
benchmark();
|
||||
|
||||
rcu_barrier();
|
||||
printf("after rcu_barrier: %d allocated, preempt %d\n",
|
||||
printv(2, "after rcu_barrier: %d allocated, preempt %d\n",
|
||||
nr_allocated, preempt_count);
|
||||
rcu_unregister_thread();
|
||||
|
||||
printf("tests completed\n");
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ static void __multiorder_tag_test(int index, int order)
|
||||
/* our canonical entry */
|
||||
base = index & ~((1 << order) - 1);
|
||||
|
||||
printf("Multiorder tag test with index %d, canonical entry %d\n",
|
||||
printv(2, "Multiorder tag test with index %d, canonical entry %d\n",
|
||||
index, base);
|
||||
|
||||
err = item_insert_order(&tree, index, order);
|
||||
@ -150,7 +150,7 @@ static void multiorder_check(unsigned long index, int order)
|
||||
struct item *item2 = item_create(min, order);
|
||||
RADIX_TREE(tree, GFP_KERNEL);
|
||||
|
||||
printf("Multiorder index %ld, order %d\n", index, order);
|
||||
printv(2, "Multiorder index %ld, order %d\n", index, order);
|
||||
|
||||
assert(item_insert_order(&tree, index, order) == 0);
|
||||
|
||||
@ -188,7 +188,7 @@ static void multiorder_shrink(unsigned long index, int order)
|
||||
RADIX_TREE(tree, GFP_KERNEL);
|
||||
struct radix_tree_node *node;
|
||||
|
||||
printf("Multiorder shrink index %ld, order %d\n", index, order);
|
||||
printv(2, "Multiorder shrink index %ld, order %d\n", index, order);
|
||||
|
||||
assert(item_insert_order(&tree, 0, order) == 0);
|
||||
|
||||
@ -209,7 +209,8 @@ static void multiorder_shrink(unsigned long index, int order)
|
||||
item_check_absent(&tree, i);
|
||||
|
||||
if (!item_delete(&tree, 0)) {
|
||||
printf("failed to delete index %ld (order %d)\n", index, order); abort();
|
||||
printv(2, "failed to delete index %ld (order %d)\n", index, order);
|
||||
abort();
|
||||
}
|
||||
|
||||
for (i = 0; i < 2*max; i++)
|
||||
@ -234,7 +235,7 @@ void multiorder_iteration(void)
|
||||
void **slot;
|
||||
int i, j, err;
|
||||
|
||||
printf("Multiorder iteration test\n");
|
||||
printv(1, "Multiorder iteration test\n");
|
||||
|
||||
#define NUM_ENTRIES 11
|
||||
int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128};
|
||||
@ -275,7 +276,7 @@ void multiorder_tagged_iteration(void)
|
||||
void **slot;
|
||||
int i, j;
|
||||
|
||||
printf("Multiorder tagged iteration test\n");
|
||||
printv(1, "Multiorder tagged iteration test\n");
|
||||
|
||||
#define MT_NUM_ENTRIES 9
|
||||
int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128};
|
||||
@ -355,6 +356,10 @@ void multiorder_tagged_iteration(void)
|
||||
item_kill_tree(&tree);
|
||||
}
|
||||
|
||||
/*
|
||||
* Basic join checks: make sure we can't find an entry in the tree after
|
||||
* a larger entry has replaced it
|
||||
*/
|
||||
static void multiorder_join1(unsigned long index,
|
||||
unsigned order1, unsigned order2)
|
||||
{
|
||||
@ -373,6 +378,10 @@ static void multiorder_join1(unsigned long index,
|
||||
item_kill_tree(&tree);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that the accounting of exceptional entries is handled correctly
|
||||
* by joining an exceptional entry to a normal pointer.
|
||||
*/
|
||||
static void multiorder_join2(unsigned order1, unsigned order2)
|
||||
{
|
||||
RADIX_TREE(tree, GFP_KERNEL);
|
||||
@ -386,6 +395,9 @@ static void multiorder_join2(unsigned order1, unsigned order2)
|
||||
assert(item2 == (void *)0x12UL);
|
||||
assert(node->exceptional == 1);
|
||||
|
||||
item2 = radix_tree_lookup(&tree, 0);
|
||||
free(item2);
|
||||
|
||||
radix_tree_join(&tree, 0, order1, item1);
|
||||
item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL);
|
||||
assert(item2 == item1);
|
||||
@ -453,7 +465,7 @@ static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc)
|
||||
{
|
||||
struct radix_tree_preload *rtp = &radix_tree_preloads;
|
||||
if (rtp->nr != 0)
|
||||
printf("split(%u %u) remaining %u\n", old_order, new_order,
|
||||
printv(2, "split(%u %u) remaining %u\n", old_order, new_order,
|
||||
rtp->nr);
|
||||
/*
|
||||
* Can't check for equality here as some nodes may have been
|
||||
@ -461,7 +473,7 @@ static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc)
|
||||
* nodes allocated since they should have all been preloaded.
|
||||
*/
|
||||
if (nr_allocated > alloc)
|
||||
printf("split(%u %u) allocated %u %u\n", old_order, new_order,
|
||||
printv(2, "split(%u %u) allocated %u %u\n", old_order, new_order,
|
||||
alloc, nr_allocated);
|
||||
}
|
||||
|
||||
@ -471,6 +483,7 @@ static void __multiorder_split(int old_order, int new_order)
|
||||
void **slot;
|
||||
struct radix_tree_iter iter;
|
||||
unsigned alloc;
|
||||
struct item *item;
|
||||
|
||||
radix_tree_preload(GFP_KERNEL);
|
||||
assert(item_insert_order(&tree, 0, old_order) == 0);
|
||||
@ -479,7 +492,7 @@ static void __multiorder_split(int old_order, int new_order)
|
||||
/* Wipe out the preloaded cache or it'll confuse check_mem() */
|
||||
radix_tree_cpu_dead(0);
|
||||
|
||||
radix_tree_tag_set(&tree, 0, 2);
|
||||
item = radix_tree_tag_set(&tree, 0, 2);
|
||||
|
||||
radix_tree_split_preload(old_order, new_order, GFP_KERNEL);
|
||||
alloc = nr_allocated;
|
||||
@ -492,6 +505,7 @@ static void __multiorder_split(int old_order, int new_order)
|
||||
radix_tree_preload_end();
|
||||
|
||||
item_kill_tree(&tree);
|
||||
free(item);
|
||||
}
|
||||
|
||||
static void __multiorder_split2(int old_order, int new_order)
|
||||
@ -633,3 +647,10 @@ void multiorder_checks(void)
|
||||
|
||||
radix_tree_cpu_dead(0);
|
||||
}
|
||||
|
||||
int __weak main(void)
|
||||
{
|
||||
radix_tree_init();
|
||||
multiorder_checks();
|
||||
return 0;
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ void regression1_test(void)
|
||||
long arg;
|
||||
|
||||
/* Regression #1 */
|
||||
printf("running regression test 1, should finish in under a minute\n");
|
||||
printv(1, "running regression test 1, should finish in under a minute\n");
|
||||
nr_threads = 2;
|
||||
pthread_barrier_init(&worker_barrier, NULL, nr_threads);
|
||||
|
||||
@ -216,5 +216,5 @@ void regression1_test(void)
|
||||
|
||||
free(threads);
|
||||
|
||||
printf("regression test 1, done\n");
|
||||
printv(1, "regression test 1, done\n");
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ void regression2_test(void)
|
||||
unsigned long int start, end;
|
||||
struct page *pages[1];
|
||||
|
||||
printf("running regression test 2 (should take milliseconds)\n");
|
||||
printv(1, "running regression test 2 (should take milliseconds)\n");
|
||||
/* 0. */
|
||||
for (i = 0; i <= max_slots - 1; i++) {
|
||||
p = page_alloc();
|
||||
@ -103,7 +103,7 @@ void regression2_test(void)
|
||||
|
||||
/* 4. */
|
||||
for (i = max_slots - 1; i >= 0; i--)
|
||||
radix_tree_delete(&mt_tree, i);
|
||||
free(radix_tree_delete(&mt_tree, i));
|
||||
|
||||
/* 5. */
|
||||
// NOTE: start should not be 0 because radix_tree_gang_lookup_tag_slot
|
||||
@ -114,7 +114,9 @@ void regression2_test(void)
|
||||
PAGECACHE_TAG_TOWRITE);
|
||||
|
||||
/* We remove all the remained nodes */
|
||||
radix_tree_delete(&mt_tree, max_slots);
|
||||
free(radix_tree_delete(&mt_tree, max_slots));
|
||||
|
||||
printf("regression test 2, done\n");
|
||||
BUG_ON(!radix_tree_empty(&mt_tree));
|
||||
|
||||
printv(1, "regression test 2, done\n");
|
||||
}
|
||||
|
@ -34,21 +34,21 @@ void regression3_test(void)
|
||||
void **slot;
|
||||
bool first;
|
||||
|
||||
printf("running regression test 3 (should take milliseconds)\n");
|
||||
printv(1, "running regression test 3 (should take milliseconds)\n");
|
||||
|
||||
radix_tree_insert(&root, 0, ptr0);
|
||||
radix_tree_tag_set(&root, 0, 0);
|
||||
|
||||
first = true;
|
||||
radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) {
|
||||
printf("tagged %ld %p\n", iter.index, *slot);
|
||||
printv(2, "tagged %ld %p\n", iter.index, *slot);
|
||||
if (first) {
|
||||
radix_tree_insert(&root, 1, ptr);
|
||||
radix_tree_tag_set(&root, 1, 0);
|
||||
first = false;
|
||||
}
|
||||
if (radix_tree_deref_retry(*slot)) {
|
||||
printf("retry at %ld\n", iter.index);
|
||||
printv(2, "retry at %ld\n", iter.index);
|
||||
slot = radix_tree_iter_retry(&iter);
|
||||
continue;
|
||||
}
|
||||
@ -57,13 +57,13 @@ void regression3_test(void)
|
||||
|
||||
first = true;
|
||||
radix_tree_for_each_slot(slot, &root, &iter, 0) {
|
||||
printf("slot %ld %p\n", iter.index, *slot);
|
||||
printv(2, "slot %ld %p\n", iter.index, *slot);
|
||||
if (first) {
|
||||
radix_tree_insert(&root, 1, ptr);
|
||||
first = false;
|
||||
}
|
||||
if (radix_tree_deref_retry(*slot)) {
|
||||
printk("retry at %ld\n", iter.index);
|
||||
printv(2, "retry at %ld\n", iter.index);
|
||||
slot = radix_tree_iter_retry(&iter);
|
||||
continue;
|
||||
}
|
||||
@ -72,30 +72,30 @@ void regression3_test(void)
|
||||
|
||||
first = true;
|
||||
radix_tree_for_each_contig(slot, &root, &iter, 0) {
|
||||
printk("contig %ld %p\n", iter.index, *slot);
|
||||
printv(2, "contig %ld %p\n", iter.index, *slot);
|
||||
if (first) {
|
||||
radix_tree_insert(&root, 1, ptr);
|
||||
first = false;
|
||||
}
|
||||
if (radix_tree_deref_retry(*slot)) {
|
||||
printk("retry at %ld\n", iter.index);
|
||||
printv(2, "retry at %ld\n", iter.index);
|
||||
slot = radix_tree_iter_retry(&iter);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
radix_tree_for_each_slot(slot, &root, &iter, 0) {
|
||||
printf("slot %ld %p\n", iter.index, *slot);
|
||||
printv(2, "slot %ld %p\n", iter.index, *slot);
|
||||
if (!iter.index) {
|
||||
printf("next at %ld\n", iter.index);
|
||||
printv(2, "next at %ld\n", iter.index);
|
||||
slot = radix_tree_iter_resume(slot, &iter);
|
||||
}
|
||||
}
|
||||
|
||||
radix_tree_for_each_contig(slot, &root, &iter, 0) {
|
||||
printf("contig %ld %p\n", iter.index, *slot);
|
||||
printv(2, "contig %ld %p\n", iter.index, *slot);
|
||||
if (!iter.index) {
|
||||
printf("next at %ld\n", iter.index);
|
||||
printv(2, "next at %ld\n", iter.index);
|
||||
slot = radix_tree_iter_resume(slot, &iter);
|
||||
}
|
||||
}
|
||||
@ -103,9 +103,9 @@ void regression3_test(void)
|
||||
radix_tree_tag_set(&root, 0, 0);
|
||||
radix_tree_tag_set(&root, 1, 0);
|
||||
radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) {
|
||||
printf("tagged %ld %p\n", iter.index, *slot);
|
||||
printv(2, "tagged %ld %p\n", iter.index, *slot);
|
||||
if (!iter.index) {
|
||||
printf("next at %ld\n", iter.index);
|
||||
printv(2, "next at %ld\n", iter.index);
|
||||
slot = radix_tree_iter_resume(slot, &iter);
|
||||
}
|
||||
}
|
||||
@ -113,5 +113,5 @@ void regression3_test(void)
|
||||
radix_tree_delete(&root, 0);
|
||||
radix_tree_delete(&root, 1);
|
||||
|
||||
printf("regression test 3 passed\n");
|
||||
printv(1, "regression test 3 passed\n");
|
||||
}
|
||||
|
@ -49,10 +49,10 @@ void simple_checks(void)
|
||||
}
|
||||
verify_tag_consistency(&tree, 0);
|
||||
verify_tag_consistency(&tree, 1);
|
||||
printf("before item_kill_tree: %d allocated\n", nr_allocated);
|
||||
printv(2, "before item_kill_tree: %d allocated\n", nr_allocated);
|
||||
item_kill_tree(&tree);
|
||||
rcu_barrier();
|
||||
printf("after item_kill_tree: %d allocated\n", nr_allocated);
|
||||
printv(2, "after item_kill_tree: %d allocated\n", nr_allocated);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -257,7 +257,7 @@ static void do_thrash(struct radix_tree_root *tree, char *thrash_state, int tag)
|
||||
|
||||
gang_check(tree, thrash_state, tag);
|
||||
|
||||
printf("%d(%d) %d(%d) %d(%d) %d(%d) / "
|
||||
printv(2, "%d(%d) %d(%d) %d(%d) %d(%d) / "
|
||||
"%d(%d) present, %d(%d) tagged\n",
|
||||
insert_chunk, nr_inserted,
|
||||
delete_chunk, nr_deleted,
|
||||
@ -296,13 +296,13 @@ static void __leak_check(void)
|
||||
{
|
||||
RADIX_TREE(tree, GFP_KERNEL);
|
||||
|
||||
printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated);
|
||||
printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated);
|
||||
item_insert(&tree, 1000000);
|
||||
printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated);
|
||||
printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated);
|
||||
item_delete(&tree, 1000000);
|
||||
printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated);
|
||||
printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated);
|
||||
item_kill_tree(&tree);
|
||||
printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated);
|
||||
printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated);
|
||||
}
|
||||
|
||||
static void single_check(void)
|
||||
@ -336,15 +336,15 @@ void tag_check(void)
|
||||
extend_checks();
|
||||
contract_checks();
|
||||
rcu_barrier();
|
||||
printf("after extend_checks: %d allocated\n", nr_allocated);
|
||||
printv(2, "after extend_checks: %d allocated\n", nr_allocated);
|
||||
__leak_check();
|
||||
leak_check();
|
||||
rcu_barrier();
|
||||
printf("after leak_check: %d allocated\n", nr_allocated);
|
||||
printv(2, "after leak_check: %d allocated\n", nr_allocated);
|
||||
simple_checks();
|
||||
rcu_barrier();
|
||||
printf("after simple_checks: %d allocated\n", nr_allocated);
|
||||
printv(2, "after simple_checks: %d allocated\n", nr_allocated);
|
||||
thrash_tags();
|
||||
rcu_barrier();
|
||||
printf("after thrash_tags: %d allocated\n", nr_allocated);
|
||||
printv(2, "after thrash_tags: %d allocated\n", nr_allocated);
|
||||
}
|
||||
|
@ -29,15 +29,28 @@ int __item_insert(struct radix_tree_root *root, struct item *item)
|
||||
return __radix_tree_insert(root, item->index, item->order, item);
|
||||
}
|
||||
|
||||
int item_insert(struct radix_tree_root *root, unsigned long index)
|
||||
struct item *item_create(unsigned long index, unsigned int order)
|
||||
{
|
||||
return __item_insert(root, item_create(index, 0));
|
||||
struct item *ret = malloc(sizeof(*ret));
|
||||
|
||||
ret->index = index;
|
||||
ret->order = order;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int item_insert_order(struct radix_tree_root *root, unsigned long index,
|
||||
unsigned order)
|
||||
{
|
||||
return __item_insert(root, item_create(index, order));
|
||||
struct item *item = item_create(index, order);
|
||||
int err = __item_insert(root, item);
|
||||
if (err)
|
||||
free(item);
|
||||
return err;
|
||||
}
|
||||
|
||||
int item_insert(struct radix_tree_root *root, unsigned long index)
|
||||
{
|
||||
return item_insert_order(root, index, 0);
|
||||
}
|
||||
|
||||
void item_sanity(struct item *item, unsigned long index)
|
||||
@ -61,15 +74,6 @@ int item_delete(struct radix_tree_root *root, unsigned long index)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct item *item_create(unsigned long index, unsigned int order)
|
||||
{
|
||||
struct item *ret = malloc(sizeof(*ret));
|
||||
|
||||
ret->index = index;
|
||||
ret->order = order;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void item_check_present(struct radix_tree_root *root, unsigned long index)
|
||||
{
|
||||
struct item *item;
|
||||
|
@ -34,6 +34,8 @@ void tag_check(void);
|
||||
void multiorder_checks(void);
|
||||
void iteration_test(unsigned order, unsigned duration);
|
||||
void benchmark(void);
|
||||
void idr_checks(void);
|
||||
void ida_checks(void);
|
||||
|
||||
struct item *
|
||||
item_tag_set(struct radix_tree_root *root, unsigned long index, int tag);
|
||||
|
Loading…
Reference in New Issue
Block a user