summaryrefslogtreecommitdiffstats
path: root/lib/idr.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-28 20:29:41 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-28 20:29:41 -0800
commitcf393195c3ba5d4c0a8e237eb00f7ef104876ee5 (patch)
tree8aa515ca0e0c00bffbc8dccb9d36ea319f251a12 /lib/idr.c
parent5ecc5ac215bc4d88243a2f4909e70ccc1bda710f (diff)
parentc6ce3e2fe3dacda5e8afb0036c814ae9c3fee9b9 (diff)
downloadlinux-cf393195c3ba5d4c0a8e237eb00f7ef104876ee5.tar.gz
linux-cf393195c3ba5d4c0a8e237eb00f7ef104876ee5.tar.xz
Merge branch 'idr-4.11' of git://git.infradead.org/users/willy/linux-dax
Pull IDR rewrite from Matthew Wilcox: "The most significant part of the following is the patch to rewrite the IDR & IDA to be clients of the radix tree. But there's much more, including an enhancement of the IDA to be significantly more space efficient, an IDR & IDA test suite, some improvements to the IDR API (and driver changes to take advantage of those improvements), several improvements to the radix tree test suite and RCU annotations. The IDR & IDA rewrite had a good spin in linux-next and Andrew's tree for most of the last cycle. Coupled with the IDR test suite, I feel pretty confident that any remaining bugs are quite hard to hit. 0-day did a great job of watching my git tree and pointing out problems; as it hit them, I added new test-cases to be sure not to be caught the same way twice" Willy goes on to expand a bit on the IDR rewrite rationale: "The radix tree and the IDR use very similar data structures. Merging the two codebases lets us share the memory allocation pools, and results in a net deletion of 500 lines of code. It also opens up the possibility of exposing more of the features of the radix tree to users of the IDR (and I have some interesting patches along those lines waiting for 4.12) It also shrinks the size of the 'struct idr' from 40 bytes to 24 which will shrink a fair few data structures that embed an IDR" * 'idr-4.11' of git://git.infradead.org/users/willy/linux-dax: (32 commits) radix tree test suite: Add config option for map shift idr: Add missing __rcu annotations radix-tree: Fix __rcu annotations radix-tree: Add rcu_dereference and rcu_assign_pointer calls radix tree test suite: Run iteration tests for longer radix tree test suite: Fix split/join memory leaks radix tree test suite: Fix leaks in regression2.c radix tree test suite: Fix leaky tests radix tree test suite: Enable address sanitizer radix_tree_iter_resume: Fix out of bounds error radix-tree: Store a pointer to the root in each node radix-tree: Chain preallocated nodes through ->parent radix tree test suite: Dial down verbosity with -v radix tree test suite: Introduce kmalloc_verbose idr: Return the deleted entry from idr_remove radix tree test suite: Build separate binaries for some tests ida: Use exceptional entries for small IDAs ida: Move ida_bitmap to a percpu variable Reimplement IDR and IDA using the radix tree radix-tree: Add radix_tree_iter_delete ...
Diffstat (limited to 'lib/idr.c')
-rw-r--r--lib/idr.c1242
1 files changed, 284 insertions, 958 deletions
diff --git a/lib/idr.c b/lib/idr.c
index 52d2979a05e8..b13682bb0a1c 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -1,1068 +1,409 @@
-/*
- * 2002-10-18 written by Jim Houston jim.houston@ccur.com
- * Copyright (C) 2002 by Concurrent Computer Corporation
- * Distributed under the GNU GPL license version 2.
- *
- * Modified by George Anzinger to reuse immediately and to use
- * find bit instructions. Also removed _irq on spinlocks.
- *
- * Modified by Nadia Derbey to make it RCU safe.
- *
- * Small id to pointer translation service.
- *
- * It uses a radix tree like structure as a sparse array indexed
- * by the id to obtain the pointer. The bitmap makes allocating
- * a new id quick.
- *
- * You call it to allocate an id (an int) an associate with that id a
- * pointer or what ever, we treat it as a (void *). You can pass this
- * id to a user for him to pass back at a later time. You then pass
- * that id to this code and it returns your pointer.
- */
-
-#ifndef TEST // to test in user space...
-#include <linux/slab.h>
-#include <linux/init.h>
+#include <linux/bitmap.h>
#include <linux/export.h>
-#endif
-#include <linux/err.h>
-#include <linux/string.h>
#include <linux/idr.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/percpu.h>
-
-#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
-#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
-
-/* Leave the possibility of an incomplete final layer */
-#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
-/* Number of id_layer structs to leave in free list */
-#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
-
-static struct kmem_cache *idr_layer_cache;
-static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
-static DEFINE_PER_CPU(int, idr_preload_cnt);
+DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
static DEFINE_SPINLOCK(simple_ida_lock);
-/* the maximum ID which can be allocated given idr->layers */
-static int idr_max(int layers)
-{
- int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
-
- return (1 << bits) - 1;
-}
-
-/*
- * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is
- * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and
- * so on.
- */
-static int idr_layer_prefix_mask(int layer)
-{
- return ~idr_max(layer + 1);
-}
-
-static struct idr_layer *get_from_free_list(struct idr *idp)
-{
- struct idr_layer *p;
- unsigned long flags;
-
- spin_lock_irqsave(&idp->lock, flags);
- if ((p = idp->id_free)) {
- idp->id_free = p->ary[0];
- idp->id_free_cnt--;
- p->ary[0] = NULL;
- }
- spin_unlock_irqrestore(&idp->lock, flags);
- return(p);
-}
-
/**
- * idr_layer_alloc - allocate a new idr_layer
- * @gfp_mask: allocation mask
- * @layer_idr: optional idr to allocate from
- *
- * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
- * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
- * an idr_layer from @idr->id_free.
- *
- * @layer_idr is to maintain backward compatibility with the old alloc
- * interface - idr_pre_get() and idr_get_new*() - and will be removed
- * together with per-pool preload buffer.
- */
-static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
-{
- struct idr_layer *new;
-
- /* this is the old path, bypass to get_from_free_list() */
- if (layer_idr)
- return get_from_free_list(layer_idr);
-
- /*
- * Try to allocate directly from kmem_cache. We want to try this
- * before preload buffer; otherwise, non-preloading idr_alloc()
- * users will end up taking advantage of preloading ones. As the
- * following is allowed to fail for preloaded cases, suppress
- * warning this time.
- */
- new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
- if (new)
- return new;
-
- /*
- * Try to fetch one from the per-cpu preload buffer if in process
- * context. See idr_preload() for details.
- */
- if (!in_interrupt()) {
- preempt_disable();
- new = __this_cpu_read(idr_preload_head);
- if (new) {
- __this_cpu_write(idr_preload_head, new->ary[0]);
- __this_cpu_dec(idr_preload_cnt);
- new->ary[0] = NULL;
- }
- preempt_enable();
- if (new)
- return new;
- }
-
- /*
- * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so
- * that memory allocation failure warning is printed as intended.
- */
- return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
-}
-
-static void idr_layer_rcu_free(struct rcu_head *head)
-{
- struct idr_layer *layer;
-
- layer = container_of(head, struct idr_layer, rcu_head);
- kmem_cache_free(idr_layer_cache, layer);
-}
-
-static inline void free_layer(struct idr *idr, struct idr_layer *p)
-{
- if (idr->hint == p)
- RCU_INIT_POINTER(idr->hint, NULL);
- call_rcu(&p->rcu_head, idr_layer_rcu_free);
-}
-
-/* only called when idp->lock is held */
-static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
-{
- p->ary[0] = idp->id_free;
- idp->id_free = p;
- idp->id_free_cnt++;
-}
-
-static void move_to_free_list(struct idr *idp, struct idr_layer *p)
-{
- unsigned long flags;
-
- /*
- * Depends on the return element being zeroed.
- */
- spin_lock_irqsave(&idp->lock, flags);
- __move_to_free_list(idp, p);
- spin_unlock_irqrestore(&idp->lock, flags);
-}
-
-static void idr_mark_full(struct idr_layer **pa, int id)
-{
- struct idr_layer *p = pa[0];
- int l = 0;
-
- __set_bit(id & IDR_MASK, p->bitmap);
- /*
- * If this layer is full mark the bit in the layer above to
- * show that this part of the radix tree is full. This may
- * complete the layer above and require walking up the radix
- * tree.
- */
- while (bitmap_full(p->bitmap, IDR_SIZE)) {
- if (!(p = pa[++l]))
- break;
- id = id >> IDR_BITS;
- __set_bit((id & IDR_MASK), p->bitmap);
- }
-}
-
-static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
-{
- while (idp->id_free_cnt < MAX_IDR_FREE) {
- struct idr_layer *new;
- new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
- if (new == NULL)
- return (0);
- move_to_free_list(idp, new);
- }
- return 1;
-}
-
-/**
- * sub_alloc - try to allocate an id without growing the tree depth
- * @idp: idr handle
- * @starting_id: id to start search at
- * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
- * @gfp_mask: allocation mask for idr_layer_alloc()
- * @layer_idr: optional idr passed to idr_layer_alloc()
- *
- * Allocate an id in range [@starting_id, INT_MAX] from @idp without
- * growing its depth. Returns
- *
- * the allocated id >= 0 if successful,
- * -EAGAIN if the tree needs to grow for allocation to succeed,
- * -ENOSPC if the id space is exhausted,
- * -ENOMEM if more idr_layers need to be allocated.
- */
-static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
- gfp_t gfp_mask, struct idr *layer_idr)
-{
- int n, m, sh;
- struct idr_layer *p, *new;
- int l, id, oid;
-
- id = *starting_id;
- restart:
- p = idp->top;
- l = idp->layers;
- pa[l--] = NULL;
- while (1) {
- /*
- * We run around this while until we reach the leaf node...
- */
- n = (id >> (IDR_BITS*l)) & IDR_MASK;
- m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
- if (m == IDR_SIZE) {
- /* no space available go back to previous layer. */
- l++;
- oid = id;
- id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
-
- /* if already at the top layer, we need to grow */
- if (id > idr_max(idp->layers)) {
- *starting_id = id;
- return -EAGAIN;
- }
- p = pa[l];
- BUG_ON(!p);
-
- /* If we need to go up one layer, continue the
- * loop; otherwise, restart from the top.
- */
- sh = IDR_BITS * (l + 1);
- if (oid >> sh == id >> sh)
- continue;
- else
- goto restart;
- }
- if (m != n) {
- sh = IDR_BITS*l;
- id = ((id >> sh) ^ n ^ m) << sh;
- }
- if ((id >= MAX_IDR_BIT) || (id < 0))
- return -ENOSPC;
- if (l == 0)
- break;
- /*
- * Create the layer below if it is missing.
- */
- if (!p->ary[m]) {
- new = idr_layer_alloc(gfp_mask, layer_idr);
- if (!new)
- return -ENOMEM;
- new->layer = l-1;
- new->prefix = id & idr_layer_prefix_mask(new->layer);
- rcu_assign_pointer(p->ary[m], new);
- p->count++;
- }
- pa[l--] = p;
- p = p->ary[m];
- }
-
- pa[l] = p;
- return id;
-}
-
-static int idr_get_empty_slot(struct idr *idp, int starting_id,
- struct idr_layer **pa, gfp_t gfp_mask,
- struct idr *layer_idr)
-{
- struct idr_layer *p, *new;
- int layers, v, id;
- unsigned long flags;
-
- id = starting_id;
-build_up:
- p = idp->top;
- layers = idp->layers;
- if (unlikely(!p)) {
- if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
- return -ENOMEM;
- p->layer = 0;
- layers = 1;
- }
- /*
- * Add a new layer to the top of the tree if the requested
- * id is larger than the currently allocated space.
- */
- while (id > idr_max(layers)) {
- layers++;
- if (!p->count) {
- /* special case: if the tree is currently empty,
- * then we grow the tree by moving the top node
- * upwards.
- */
- p->layer++;
- WARN_ON_ONCE(p->prefix);
- continue;
- }
- if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
- /*
- * The allocation failed. If we built part of
- * the structure tear it down.
- */
- spin_lock_irqsave(&idp->lock, flags);
- for (new = p; p && p != idp->top; new = p) {
- p = p->ary[0];
- new->ary[0] = NULL;
- new->count = 0;
- bitmap_clear(new->bitmap, 0, IDR_SIZE);
- __move_to_free_list(idp, new);
- }
- spin_unlock_irqrestore(&idp->lock, flags);
- return -ENOMEM;
- }
- new->ary[0] = p;
- new->count = 1;
- new->layer = layers-1;
- new->prefix = id & idr_layer_prefix_mask(new->layer);
- if (bitmap_full(p->bitmap, IDR_SIZE))
- __set_bit(0, new->bitmap);
- p = new;
- }
- rcu_assign_pointer(idp->top, p);
- idp->layers = layers;
- v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
- if (v == -EAGAIN)
- goto build_up;
- return(v);
-}
-
-/*
- * @id and @pa are from a successful allocation from idr_get_empty_slot().
- * Install the user pointer @ptr and mark the slot full.
- */
-static void idr_fill_slot(struct idr *idr, void *ptr, int id,
- struct idr_layer **pa)
-{
- /* update hint used for lookup, cleared from free_layer() */
- rcu_assign_pointer(idr->hint, pa[0]);
-
- rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
- pa[0]->count++;
- idr_mark_full(pa, id);
-}
-
-
-/**
- * idr_preload - preload for idr_alloc()
- * @gfp_mask: allocation mask to use for preloading
- *
- * Preload per-cpu layer buffer for idr_alloc(). Can only be used from
- * process context and each idr_preload() invocation should be matched with
- * idr_preload_end(). Note that preemption is disabled while preloaded.
- *
- * The first idr_alloc() in the preloaded section can be treated as if it
- * were invoked with @gfp_mask used for preloading. This allows using more
- * permissive allocation masks for idrs protected by spinlocks.
- *
- * For example, if idr_alloc() below fails, the failure can be treated as
- * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
- *
- * idr_preload(GFP_KERNEL);
- * spin_lock(lock);
- *
- * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
- *
- * spin_unlock(lock);
- * idr_preload_end();
- * if (id < 0)
- * error;
- */
-void idr_preload(gfp_t gfp_mask)
-{
- /*
- * Consuming preload buffer from non-process context breaks preload
- * allocation guarantee. Disallow usage from those contexts.
- */
- WARN_ON_ONCE(in_interrupt());
- might_sleep_if(gfpflags_allow_blocking(gfp_mask));
-
- preempt_disable();
-
- /*
- * idr_alloc() is likely to succeed w/o full idr_layer buffer and
- * return value from idr_alloc() needs to be checked for failure
- * anyway. Silently give up if allocation fails. The caller can
- * treat failures from idr_alloc() as if idr_alloc() were called
- * with @gfp_mask which should be enough.
- */
- while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
- struct idr_layer *new;
-
- preempt_enable();
- new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
- preempt_disable();
- if (!new)
- break;
-
- /* link the new one to per-cpu preload list */
- new->ary[0] = __this_cpu_read(idr_preload_head);
- __this_cpu_write(idr_preload_head, new);
- __this_cpu_inc(idr_preload_cnt);
- }
-}
-EXPORT_SYMBOL(idr_preload);
-
-/**
- * idr_alloc - allocate new idr entry
- * @idr: the (initialized) idr
+ * idr_alloc - allocate an id
+ * @idr: idr handle
* @ptr: pointer to be associated with the new id
* @start: the minimum id (inclusive)
- * @end: the maximum id (exclusive, <= 0 for max)
- * @gfp_mask: memory allocation flags
+ * @end: the maximum id (exclusive)
+ * @gfp: memory allocation flags
*
- * Allocate an id in [start, end) and associate it with @ptr. If no ID is
- * available in the specified range, returns -ENOSPC. On memory allocation
- * failure, returns -ENOMEM.
+ * Allocates an unused ID in the range [start, end). Returns -ENOSPC
+ * if there are no unused IDs in that range.
*
* Note that @end is treated as max when <= 0. This is to always allow
* using @start + N as @end as long as N is inside integer range.
*
- * The user is responsible for exclusively synchronizing all operations
- * which may modify @idr. However, read-only accesses such as idr_find()
- * or iteration can be performed under RCU read lock provided the user
- * destroys @ptr in RCU-safe way after removal from idr.
+ * Simultaneous modifications to the @idr are not allowed and should be
+ * prevented by the user, usually with a lock. idr_alloc() may be called
+ * concurrently with read-only accesses to the @idr, such as idr_find() and
+ * idr_for_each_entry().
*/
-int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
+int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
{
- int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */
- struct idr_layer *pa[MAX_IDR_LEVEL + 1];
- int id;
+ void __rcu **slot;
+ struct radix_tree_iter iter;
- might_sleep_if(gfpflags_allow_blocking(gfp_mask));
-
- /* sanity checks */
if (WARN_ON_ONCE(start < 0))
return -EINVAL;
- if (unlikely(max < start))
- return -ENOSPC;
+ if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
+ return -EINVAL;
- /* allocate id */
- id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
- if (unlikely(id < 0))
- return id;
- if (unlikely(id > max))
- return -ENOSPC;
+ radix_tree_iter_init(&iter, start);
+ slot = idr_get_free(&idr->idr_rt, &iter, gfp, end);
+ if (IS_ERR(slot))
+ return PTR_ERR(slot);
- idr_fill_slot(idr, ptr, id, pa);
- return id;
+ radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
+ radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
+ return iter.index;
}
EXPORT_SYMBOL_GPL(idr_alloc);
/**
* idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
- * @idr: the (initialized) idr
+ * @idr: idr handle
* @ptr: pointer to be associated with the new id
* @start: the minimum id (inclusive)
- * @end: the maximum id (exclusive, <= 0 for max)
- * @gfp_mask: memory allocation flags
- *
- * Essentially the same as idr_alloc, but prefers to allocate progressively
- * higher ids if it can. If the "cur" counter wraps, then it will start again
- * at the "start" end of the range and allocate one that has already been used.
- */
-int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
- gfp_t gfp_mask)
-{
- int id;
-
- id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
- if (id == -ENOSPC)
- id = idr_alloc(idr, ptr, start, end, gfp_mask);
-
- if (likely(id >= 0))
- idr->cur = id + 1;
- return id;
-}
-EXPORT_SYMBOL(idr_alloc_cyclic);
-
-static void idr_remove_warning(int id)
-{
- WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
-}
-
-static void sub_remove(struct idr *idp, int shift, int id)
-{
- struct idr_layer *p = idp->top;
- struct idr_layer **pa[MAX_IDR_LEVEL + 1];
- struct idr_layer ***paa = &pa[0];
- struct idr_layer *to_free;
- int n;
-
- *paa = NULL;
- *++paa = &idp->top;
-
- while ((shift > 0) && p) {
- n = (id >> shift) & IDR_MASK;
- __clear_bit(n, p->bitmap);
- *++paa = &p->ary[n];
- p = p->ary[n];
- shift -= IDR_BITS;
- }
- n = id & IDR_MASK;
- if (likely(p != NULL && test_bit(n, p->bitmap))) {
- __clear_bit(n, p->bitmap);
- RCU_INIT_POINTER(p->ary[n], NULL);
- to_free = NULL;
- while(*paa && ! --((**paa)->count)){
- if (to_free)
- free_layer(idp, to_free);
- to_free = **paa;
- **paa-- = NULL;
- }
- if (!*paa)
- idp->layers = 0;
- if (to_free)
- free_layer(idp, to_free);
- } else
- idr_remove_warning(id);
-}
-
-/**
- * idr_remove - remove the given id and free its slot
- * @idp: idr handle
- * @id: unique key
- */
-void idr_remove(struct idr *idp, int id)
-{
- struct idr_layer *p;
- struct idr_layer *to_free;
-
- if (id < 0)
- return;
-
- if (id > idr_max(idp->layers)) {
- idr_remove_warning(id);
- return;
- }
-
- sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
- if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
- idp->top->ary[0]) {
- /*
- * Single child at leftmost slot: we can shrink the tree.
- * This level is not needed anymore since when layers are
- * inserted, they are inserted at the top of the existing
- * tree.
- */
- to_free = idp->top;
- p = idp->top->ary[0];
- rcu_assign_pointer(idp->top, p);
- --idp->layers;
- to_free->count = 0;
- bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
- free_layer(idp, to_free);
- }
-}
-EXPORT_SYMBOL(idr_remove);
-
-static void __idr_remove_all(struct idr *idp)
-{
- int n, id, max;
- int bt_mask;
- struct idr_layer *p;
- struct idr_layer *pa[MAX_IDR_LEVEL + 1];
- struct idr_layer **paa = &pa[0];
-
- n = idp->layers * IDR_BITS;
- *paa = idp->top;
- RCU_INIT_POINTER(idp->top, NULL);
- max = idr_max(idp->layers);
-
- id = 0;
- while (id >= 0 && id <= max) {
- p = *paa;
- while (n > IDR_BITS && p) {
- n -= IDR_BITS;
- p = p->ary[(id >> n) & IDR_MASK];
- *++paa = p;
- }
-
- bt_mask = id;
- id += 1 << n;
- /* Get the highest bit that the above add changed from 0->1. */
- while (n < fls(id ^ bt_mask)) {
- if (*paa)
- free_layer(idp, *paa);
- n += IDR_BITS;
- --paa;
- }
- }
- idp->layers = 0;
-}
-
-/**
- * idr_destroy - release all cached layers within an idr tree
- * @idp: idr handle
- *
- * Free all id mappings and all idp_layers. After this function, @idp is
- * completely unused and can be freed / recycled. The caller is
- * responsible for ensuring that no one else accesses @idp during or after
- * idr_destroy().
+ * @end: the maximum id (exclusive)
+ * @gfp: memory allocation flags
*
- * A typical clean-up sequence for objects stored in an idr tree will use
- * idr_for_each() to free all objects, if necessary, then idr_destroy() to
- * free up the id mappings and cached idr_layers.
+ * Allocates an ID larger than the last ID allocated if one is available.
+ * If not, it will attempt to allocate the smallest ID that is larger or
+ * equal to @start.
*/
-void idr_destroy(struct idr *idp)
+int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
{
- __idr_remove_all(idp);
+ int id, curr = idr->idr_next;
- while (idp->id_free_cnt) {
- struct idr_layer *p = get_from_free_list(idp);
- kmem_cache_free(idr_layer_cache, p);
- }
-}
-EXPORT_SYMBOL(idr_destroy);
+ if (curr < start)
+ curr = start;
-void *idr_find_slowpath(struct idr *idp, int id)
-{
- int n;
- struct idr_layer *p;
-
- if (id < 0)
- return NULL;
-
- p = rcu_dereference_raw(idp->top);
- if (!p)
- return NULL;
- n = (p->layer+1) * IDR_BITS;
+ id = idr_alloc(idr, ptr, curr, end, gfp);
+ if ((id == -ENOSPC) && (curr > start))
+ id = idr_alloc(idr, ptr, start, curr, gfp);
- if (id > idr_max(p->layer + 1))
- return NULL;
- BUG_ON(n == 0);
+ if (id >= 0)
+ idr->idr_next = id + 1U;
- while (n > 0 && p) {
- n -= IDR_BITS;
- BUG_ON(n != p->layer*IDR_BITS);
- p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
- }
- return((void *)p);
+ return id;
}
-EXPORT_SYMBOL(idr_find_slowpath);
+EXPORT_SYMBOL(idr_alloc_cyclic);
/**
* idr_for_each - iterate through all stored pointers
- * @idp: idr handle
+ * @idr: idr handle
* @fn: function to be called for each pointer
- * @data: data passed back to callback function
+ * @data: data passed to callback function
*
- * Iterate over the pointers registered with the given idr. The
- * callback function will be called for each pointer currently
- * registered, passing the id, the pointer and the data pointer passed
- * to this function. It is not safe to modify the idr tree while in
- * the callback, so functions such as idr_get_new and idr_remove are
- * not allowed.
+ * The callback function will be called for each entry in @idr, passing
+ * the id, the pointer and the data pointer passed to this function.
*
- * We check the return of @fn each time. If it returns anything other
- * than %0, we break out and return that value.
+ * If @fn returns anything other than %0, the iteration stops and that
+ * value is returned from this function.
*
- * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
+ * idr_for_each() can be called concurrently with idr_alloc() and
+ * idr_remove() if protected by RCU. Newly added entries may not be
+ * seen and deleted entries may be seen, but adding and removing entries
+ * will not cause other entries to be skipped, nor spurious ones to be seen.
*/
-int idr_for_each(struct idr *idp,
- int (*fn)(int id, void *p, void *data), void *data)
+int idr_for_each(const struct idr *idr,
+ int (*fn)(int id, void *p, void *data), void *data)
{
- int n, id, max, error = 0;
- struct idr_layer *p;
- struct idr_layer *pa[MAX_IDR_LEVEL + 1];
- struct idr_layer **paa = &pa[0];
-
- n = idp->layers * IDR_BITS;
- *paa = rcu_dereference_raw(idp->top);
- max = idr_max(idp->layers);
+ struct radix_tree_iter iter;
+ void __rcu **slot;
- id = 0;
- while (id >= 0 && id <= max) {
- p = *paa;
- while (n > 0 && p) {
- n -= IDR_BITS;
- p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
- *++paa = p;
- }
-
- if (p) {
- error = fn(id, (void *)p, data);
- if (error)
- break;
- }
-
- id += 1 << n;
- while (n < fls(id)) {
- n += IDR_BITS;
- --paa;
- }
+ radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
+ int ret = fn(iter.index, rcu_dereference_raw(*slot), data);
+ if (ret)
+ return ret;
}
- return error;
+ return 0;
}
EXPORT_SYMBOL(idr_for_each);
/**
- * idr_get_next - lookup next object of id to given id.
- * @idp: idr handle
- * @nextidp: pointer to lookup key
- *
- * Returns pointer to registered object with id, which is next number to
- * given id. After being looked up, *@nextidp will be updated for the next
- * iteration.
- *
- * This function can be called under rcu_read_lock(), given that the leaf
- * pointers lifetimes are correctly managed.
+ * idr_get_next - Find next populated entry
+ * @idr: idr handle
+ * @nextid: Pointer to lowest possible ID to return
+ *
+ * Returns the next populated entry in the tree with an ID greater than
+ * or equal to the value pointed to by @nextid. On exit, @nextid is updated
+ * to the ID of the found value. To use in a loop, the value pointed to by
+ * nextid must be incremented by the user.
*/
-void *idr_get_next(struct idr *idp, int *nextidp)
+void *idr_get_next(struct idr *idr, int *nextid)
{
- struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
- struct idr_layer **paa = &pa[0];
- int id = *nextidp;
- int n, max;
+ struct radix_tree_iter iter;
+ void __rcu **slot;
- /* find first ent */
- p = *paa = rcu_dereference_raw(idp->top);
- if (!p)
+ slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid);
+ if (!slot)
return NULL;
- n = (p->layer + 1) * IDR_BITS;
- max = idr_max(p->layer + 1);
-
- while (id >= 0 && id <= max) {
- p = *paa;
- while (n > 0 && p) {
- n -= IDR_BITS;
- p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
- *++paa = p;
- }
-
- if (p) {
- *nextidp = id;
- return p;
- }
- /*
- * Proceed to the next layer at the current level. Unlike
- * idr_for_each(), @id isn't guaranteed to be aligned to
- * layer boundary at this point and adding 1 << n may
- * incorrectly skip IDs. Make sure we jump to the
- * beginning of the next layer using round_up().
- */
- id = round_up(id + 1, 1 << n);
- while (n < fls(id)) {
- n += IDR_BITS;
- --paa;
- }
- }
- return NULL;
+ *nextid = iter.index;
+ return rcu_dereference_raw(*slot);
}
EXPORT_SYMBOL(idr_get_next);
-
/**
* idr_replace - replace pointer for given id
- * @idp: idr handle
- * @ptr: pointer you want associated with the id
- * @id: lookup key
+ * @idr: idr handle
+ * @ptr: New pointer to associate with the ID
+ * @id: Lookup key
*
- * Replace the pointer registered with an id and return the old value.
- * A %-ENOENT return indicates that @id was not found.
- * A %-EINVAL return indicates that @id was not within valid constraints.
+ * Replace the pointer registered with an ID and return the old value.
+ * This function can be called under the RCU read lock concurrently with
+ * idr_alloc() and idr_remove() (as long as the ID being removed is not
+ * the one being replaced!).
*
- * The caller must serialize with writers.
+ * Returns: 0 on success. %-ENOENT indicates that @id was not found.
+ * %-EINVAL indicates that @id or @ptr were not valid.
*/
-void *idr_replace(struct idr *idp, void *ptr, int id)
+void *idr_replace(struct idr *idr, void *ptr, int id)
{
- int n;
- struct idr_layer *p, *old_p;
+ struct radix_tree_node *node;
+ void __rcu **slot = NULL;
+ void *entry;
- if (id < 0)
+ if (WARN_ON_ONCE(id < 0))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
return ERR_PTR(-EINVAL);
- p = idp->top;
- if (!p)
- return ERR_PTR(-ENOENT);
-
- if (id > idr_max(p->layer + 1))
- return ERR_PTR(-ENOENT);
-
- n = p->layer * IDR_BITS;
- while ((n > 0) && p) {
- p = p->ary[(id >> n) & IDR_MASK];
- n -= IDR_BITS;
- }
-
- n = id & IDR_MASK;
- if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
+ entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
+ if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
return ERR_PTR(-ENOENT);
- old_p = p->ary[n];
- rcu_assign_pointer(p->ary[n], ptr);
+ __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL, NULL);
- return old_p;
+ return entry;
}
EXPORT_SYMBOL(idr_replace);
-void __init idr_init_cache(void)
-{
- idr_layer_cache = kmem_cache_create("idr_layer_cache",
- sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
-}
-
-/**
- * idr_init - initialize idr handle
- * @idp: idr handle
- *
- * This function is use to set up the handle (@idp) that you will pass
- * to the rest of the functions.
- */
-void idr_init(struct idr *idp)
-{
- memset(idp, 0, sizeof(struct idr));
- spin_lock_init(&idp->lock);
-}
-EXPORT_SYMBOL(idr_init);
-
-static int idr_has_entry(int id, void *p, void *data)
-{
- return 1;
-}
-
-bool idr_is_empty(struct idr *idp)
-{
- return !idr_for_each(idp, idr_has_entry, NULL);
-}
-EXPORT_SYMBOL(idr_is_empty);
-
/**
* DOC: IDA description
- * IDA - IDR based ID allocator
*
- * This is id allocator without id -> pointer translation. Memory
- * usage is much lower than full blown idr because each id only
- * occupies a bit. ida uses a custom leaf node which contains
- * IDA_BITMAP_BITS slots.
- *
- * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
+ * The IDA is an ID allocator which does not provide the ability to
+ * associate an ID with a pointer. As such, it only needs to store one
+ * bit per ID, and so is more space efficient than an IDR. To use an IDA,
+ * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
+ * then initialise it using ida_init()). To allocate a new ID, call
+ * ida_simple_get(). To free an ID, call ida_simple_remove().
+ *
+ * If you have more complex locking requirements, use a loop around
+ * ida_pre_get() and ida_get_new() to allocate a new ID. Then use
+ * ida_remove() to free an ID. You must make sure that ida_get_new() and
+ * ida_remove() cannot be called at the same time as each other for the
+ * same IDA.
+ *
+ * You can also use ida_get_new_above() if you need an ID to be allocated
+ * above a particular number. ida_destroy() can be used to dispose of an
+ * IDA without needing to free the individual IDs in it. You can use
+ * ida_is_empty() to find out whether the IDA has any IDs currently allocated.
+ *
+ * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
+ * limitation, it should be quite straightforward to raise the maximum.
*/
-static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
-{
- unsigned long flags;
-
- if (!ida->free_bitmap) {
- spin_lock_irqsave(&ida->idr.lock, flags);
- if (!ida->free_bitmap) {
- ida->free_bitmap = bitmap;
- bitmap = NULL;
- }
- spin_unlock_irqrestore(&ida->idr.lock, flags);
- }
-
- kfree(bitmap);
-}
-
-/**
- * ida_pre_get - reserve resources for ida allocation
- * @ida: ida handle
- * @gfp_mask: memory allocation flag
- *
- * This function should be called prior to locking and calling the
- * following function. It preallocates enough memory to satisfy the
- * worst possible allocation.
- *
- * If the system is REALLY out of memory this function returns %0,
- * otherwise %1.
+/*
+ * Developer's notes:
+ *
+ * The IDA uses the functionality provided by the IDR & radix tree to store
+ * bitmaps in each entry. The IDR_FREE tag means there is at least one bit
+ * free, unlike the IDR where it means at least one entry is free.
+ *
+ * I considered telling the radix tree that each slot is an order-10 node
+ * and storing the bit numbers in the radix tree, but the radix tree can't
+ * allow a single multiorder entry at index 0, which would significantly
+ * increase memory consumption for the IDA. So instead we divide the index
+ * by the number of bits in the leaf bitmap before doing a radix tree lookup.
+ *
+ * As an optimisation, if there are only a few low bits set in any given
+ * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional
+ * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits
+ * directly in the entry. By being really tricksy, we could store
+ * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising
+ * for 0-3 allocated IDs.
+ *
+ * We allow the radix tree 'exceptional' count to get out of date. Nothing
+ * in the IDA nor the radix tree code checks it. If it becomes important
+ * to maintain an accurate exceptional count, switch the rcu_assign_pointer()
+ * calls to radix_tree_iter_replace() which will correct the exceptional
+ * count.
+ *
+ * The IDA always requires a lock to alloc/free. If we add a 'test_bit'
+ * equivalent, it will still need locking. Going to RCU lookup would require
+ * using RCU to free bitmaps, and that's not trivial without embedding an
+ * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
+ * bitmap, which is excessive.
*/
-int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
-{
- /* allocate idr_layers */
- if (!__idr_pre_get(&ida->idr, gfp_mask))
- return 0;
- /* allocate free_bitmap */
- if (!ida->free_bitmap) {
- struct ida_bitmap *bitmap;
-
- bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
- if (!bitmap)
- return 0;
-
- free_bitmap(ida, bitmap);
- }
-
- return 1;
-}
-EXPORT_SYMBOL(ida_pre_get);
+#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS)
/**
* ida_get_new_above - allocate new ID above or equal to a start id
- * @ida: ida handle
- * @starting_id: id to start search at
- * @p_id: pointer to the allocated handle
+ * @ida: ida handle
+ * @start: id to start search at
+ * @id: pointer to the allocated handle
*
- * Allocate new ID above or equal to @starting_id. It should be called
- * with any required locks.
+ * Allocate new ID above or equal to @start. It should be called
+ * with any required locks to ensure that concurrent calls to
+ * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed.
+ * Consider using ida_simple_get() if you do not have complex locking
+ * requirements.
*
* If memory is required, it will return %-EAGAIN, you should unlock
* and go back to the ida_pre_get() call. If the ida is full, it will
- * return %-ENOSPC.
- *
- * Note that callers must ensure that concurrent access to @ida is not possible.
- * See ida_simple_get() for a varaint which takes care of locking.
+ * return %-ENOSPC. On success, it will return 0.
*
- * @p_id returns a value in the range @starting_id ... %0x7fffffff.
+ * @id returns a value in the range @start ... %0x7fffffff.
*/
-int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
+int ida_get_new_above(struct ida *ida, int start, int *id)
{
- struct idr_layer *pa[MAX_IDR_LEVEL + 1];
+ struct radix_tree_root *root = &ida->ida_rt;
+ void __rcu **slot;
+ struct radix_tree_iter iter;
struct ida_bitmap *bitmap;
- unsigned long flags;
- int idr_id = starting_id / IDA_BITMAP_BITS;
- int offset = starting_id % IDA_BITMAP_BITS;
- int t, id;
-
- restart:
- /* get vacant slot */
- t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
- if (t < 0)
- return t == -ENOMEM ? -EAGAIN : t;
-
- if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
- return -ENOSPC;
-
- if (t != idr_id)
- offset = 0;
- idr_id = t;
-
- /* if bitmap isn't there, create a new one */
- bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
- if (!bitmap) {
- spin_lock_irqsave(&ida->idr.lock, flags);
- bitmap = ida->free_bitmap;
- ida->free_bitmap = NULL;
- spin_unlock_irqrestore(&ida->idr.lock, flags);
-
- if (!bitmap)
- return -EAGAIN;
-
- memset(bitmap, 0, sizeof(struct ida_bitmap));
- rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
- (void *)bitmap);
- pa[0]->count++;
- }
-
- /* lookup for empty slot */
- t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
- if (t == IDA_BITMAP_BITS) {
- /* no empty slot after offset, continue to the next chunk */
- idr_id++;
- offset = 0;
- goto restart;
- }
-
- id = idr_id * IDA_BITMAP_BITS + t;
- if (id >= MAX_IDR_BIT)
- return -ENOSPC;
+ unsigned long index;
+ unsigned bit, ebit;
+ int new;
+
+ index = start / IDA_BITMAP_BITS;
+ bit = start % IDA_BITMAP_BITS;
+ ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT;
+
+ slot = radix_tree_iter_init(&iter, index);
+ for (;;) {
+ if (slot)
+ slot = radix_tree_next_slot(slot, &iter,
+ RADIX_TREE_ITER_TAGGED);
+ if (!slot) {
+ slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX);
+ if (IS_ERR(slot)) {
+ if (slot == ERR_PTR(-ENOMEM))
+ return -EAGAIN;
+ return PTR_ERR(slot);
+ }
+ }
+ if (iter.index > index) {
+ bit = 0;
+ ebit = RADIX_TREE_EXCEPTIONAL_SHIFT;
+ }
+ new = iter.index * IDA_BITMAP_BITS;
+ bitmap = rcu_dereference_raw(*slot);
+ if (radix_tree_exception(bitmap)) {
+ unsigned long tmp = (unsigned long)bitmap;
+ ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit);
+ if (ebit < BITS_PER_LONG) {
+ tmp |= 1UL << ebit;
+ rcu_assign_pointer(*slot, (void *)tmp);
+ *id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT;
+ return 0;
+ }
+ bitmap = this_cpu_xchg(ida_bitmap, NULL);
+ if (!bitmap)
+ return -EAGAIN;
+ memset(bitmap, 0, sizeof(*bitmap));
+ bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+ rcu_assign_pointer(*slot, bitmap);
+ }
- __set_bit(t, bitmap->bitmap);
- if (++bitmap->nr_busy == IDA_BITMAP_BITS)
- idr_mark_full(pa, idr_id);
+ if (bitmap) {
+ bit = find_next_zero_bit(bitmap->bitmap,
+ IDA_BITMAP_BITS, bit);
+ new += bit;
+ if (new < 0)
+ return -ENOSPC;
+ if (bit == IDA_BITMAP_BITS)
+ continue;
- *p_id = id;
+ __set_bit(bit, bitmap->bitmap);
+ if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
+ radix_tree_iter_tag_clear(root, &iter,
+ IDR_FREE);
+ } else {
+ new += bit;
+ if (new < 0)
+ return -ENOSPC;
+ if (ebit < BITS_PER_LONG) {
+ bitmap = (void *)((1UL << ebit) |
+ RADIX_TREE_EXCEPTIONAL_ENTRY);
+ radix_tree_iter_replace(root, &iter, slot,
+ bitmap);
+ *id = new;
+ return 0;
+ }
+ bitmap = this_cpu_xchg(ida_bitmap, NULL);
+ if (!bitmap)
+ return -EAGAIN;
+ memset(bitmap, 0, sizeof(*bitmap));
+ __set_bit(bit, bitmap->bitmap);
+ radix_tree_iter_replace(root, &iter, slot, bitmap);
+ }
- /* Each leaf node can handle nearly a thousand slots and the
- * whole idea of ida is to have small memory foot print.
- * Throw away extra resources one by one after each successful
- * allocation.
- */
- if (ida->idr.id_free_cnt || ida->free_bitmap) {
- struct idr_layer *p = get_from_free_list(&ida->idr);
- if (p)
- kmem_cache_free(idr_layer_cache, p);
+ *id = new;
+ return 0;
}
-
- return 0;
}
EXPORT_SYMBOL(ida_get_new_above);
/**
- * ida_remove - remove the given ID
- * @ida: ida handle
- * @id: ID to free
+ * ida_remove - Free the given ID
+ * @ida: ida handle
+ * @id: ID to free
+ *
+ * This function should not be called at the same time as ida_get_new_above().
*/
void ida_remove(struct ida *ida, int id)
{
- struct idr_layer *p = ida->idr.top;
- int shift = (ida->idr.layers - 1) * IDR_BITS;
- int idr_id = id / IDA_BITMAP_BITS;
- int offset = id % IDA_BITMAP_BITS;
- int n;
+ unsigned long index = id / IDA_BITMAP_BITS;
+ unsigned offset = id % IDA_BITMAP_BITS;
struct ida_bitmap *bitmap;
+ unsigned long *btmp;
+ struct radix_tree_iter iter;
+ void __rcu **slot;
- if (idr_id > idr_max(ida->idr.layers))
+ slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index);
+ if (!slot)
goto err;
- /* clear full bits while looking up the leaf idr_layer */
- while ((shift > 0) && p) {
- n = (idr_id >> shift) & IDR_MASK;
- __clear_bit(n, p->bitmap);
- p = p->ary[n];
- shift -= IDR_BITS;
+ bitmap = rcu_dereference_raw(*slot);
+ if (radix_tree_exception(bitmap)) {
+ btmp = (unsigned long *)slot;
+ offset += RADIX_TREE_EXCEPTIONAL_SHIFT;
+ if (offset >= BITS_PER_LONG)
+ goto err;
+ } else {
+ btmp = bitmap->bitmap;
}
-
- if (p == NULL)
- goto err;
-
- n = idr_id & IDR_MASK;
- __clear_bit(n, p->bitmap);
-
- bitmap = (void *)p->ary[n];
- if (!bitmap || !test_bit(offset, bitmap->bitmap))
+ if (!test_bit(offset, btmp))
goto err;
- /* update bitmap and remove it if empty */
- __clear_bit(offset, bitmap->bitmap);
- if (--bitmap->nr_busy == 0) {
- __set_bit(n, p->bitmap); /* to please idr_remove() */
- idr_remove(&ida->idr, idr_id);
- free_bitmap(ida, bitmap);
+ __clear_bit(offset, btmp);
+ radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE);
+ if (radix_tree_exception(bitmap)) {
+ if (rcu_dereference_raw(*slot) ==
+ (void *)RADIX_TREE_EXCEPTIONAL_ENTRY)
+ radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
+ } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) {
+ kfree(bitmap);
+ radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
}
-
return;
-
err:
WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
}
EXPORT_SYMBOL(ida_remove);
/**
- * ida_destroy - release all cached layers within an ida tree
- * @ida: ida handle
+ * ida_destroy - Free the contents of an ida
+ * @ida: ida handle
+ *
+ * Calling this function releases all resources associated with an IDA. When
+ * this call returns, the IDA is empty and can be reused or freed. The caller
+ * should not allow ida_remove() or ida_get_new_above() to be called at the
+ * same time.
*/
void ida_destroy(struct ida *ida)
{
- idr_destroy(&ida->idr);
- kfree(ida->free_bitmap);
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) {
+ struct ida_bitmap *bitmap = rcu_dereference_raw(*slot);
+ if (!radix_tree_exception(bitmap))
+ kfree(bitmap);
+ radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
+ }
}
EXPORT_SYMBOL(ida_destroy);
@@ -1141,18 +482,3 @@ void ida_simple_remove(struct ida *ida, unsigned int id)
spin_unlock_irqrestore(&simple_ida_lock, flags);
}
EXPORT_SYMBOL(ida_simple_remove);
-
-/**
- * ida_init - initialize ida handle
- * @ida: ida handle
- *
- * This function is use to set up the handle (@ida) that you will pass
- * to the rest of the functions.
- */
-void ida_init(struct ida *ida)
-{
- memset(ida, 0, sizeof(struct ida));
- idr_init(&ida->idr);
-
-}
-EXPORT_SYMBOL(ida_init);