From 7d9311aa84a9509b0b8cc9c44299200076c38118 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 8 Jul 2014 08:21:00 +0200 Subject: dlmalloc: remove unused functions remove unused function declarations and definitions: - vallocc is #if 0 in source code - pvalloc is defined but not used - cfree is defined but not used - malloc_usable_size is unused - mallopt is not defined - mallinfo is not defined Signed-off-by: Sascha Hauer --- include/malloc.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'include') diff --git a/include/malloc.h b/include/malloc.h index 7b9b062ada..46147bc885 100644 --- a/include/malloc.h +++ b/include/malloc.h @@ -9,15 +9,8 @@ void* malloc(size_t); void free(void*); void* realloc(void*, size_t); void* memalign(size_t, size_t); -void* vallocc(size_t); -void* pvalloc(size_t); void* calloc(size_t, size_t); -void cfree(void*); -int malloc_trim(size_t); -size_t malloc_usable_size(void*); void malloc_stats(void); -int mallopt(int, int); -struct mallinfo mallinfo(void); void *sbrk(ptrdiff_t increment); #endif -- cgit v1.2.3 From 6acc6bc6275a3a7d648a04cede77618f7597ee5b Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 8 Jul 2014 08:33:35 +0200 Subject: malloc.h: fix codingstyle - Remove trailing newline - use void *fn instead of void* fn Signed-off-by: Sascha Hauer --- include/malloc.h | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/malloc.h b/include/malloc.h index 46147bc885..a36f3c0de4 100644 --- a/include/malloc.h +++ b/include/malloc.h @@ -3,15 +3,12 @@ #include -/* Public routines */ - -void* malloc(size_t); -void free(void*); -void* realloc(void*, size_t); -void* memalign(size_t, size_t); -void* calloc(size_t, size_t); +void *malloc(size_t); +void free(void *); +void *realloc(void *, size_t); +void *memalign(size_t, size_t); +void *calloc(size_t, size_t); void malloc_stats(void); void *sbrk(ptrdiff_t increment); -#endif - +#endif /* __MALLOC_H */ -- cgit v1.2.3 From 54889bff1e981c555c377ac5aa4e263b388bef91 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Thu, 10 Jul 2014 08:17:53 +0200 Subject: introduce verbose debug During debugging it sometimes helps to me even more verbose. This adds an additional debug level for this. Also we remove an already existing pr_vdebug definition in the USB gadget u_serial driver. Signed-off-by: Sascha Hauer --- common/Kconfig | 2 ++ drivers/usb/gadget/u_serial.c | 8 -------- include/printk.h | 8 +++++++- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/common/Kconfig b/common/Kconfig index bba7f159c1..e7c22e5bae 100644 --- a/common/Kconfig +++ b/common/Kconfig @@ -676,6 +676,7 @@ config COMPILE_LOGLEVEL 5 normal but significant condition (notice) 6 informational (info) 7 debug-level messages (debug) + 8 verbose debug messages (vdebug) config DEFAULT_LOGLEVEL int "default loglevel" @@ -692,6 +693,7 @@ config DEFAULT_LOGLEVEL 5 normal but significant condition (notice) 6 informational (info) 7 debug-level messages (debug) + 8 verbose debug messages (vdebug) config DEBUG_INFO bool diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c index c2072dc6e9..574e0975b9 100644 --- a/drivers/usb/gadget/u_serial.c +++ b/drivers/usb/gadget/u_serial.c @@ -109,14 +109,6 @@ static unsigned n_ports; #define GS_CLOSE_TIMEOUT 15 /* seconds */ -#ifdef VERBOSE_DEBUG -#define pr_vdebug(fmt, arg...) \ - pr_debug(fmt, ##arg) -#else -#define pr_vdebug(fmt, arg...) \ - ({ if (0) pr_debug(fmt, ##arg); }) -#endif - static unsigned gs_start_rx(struct gs_port *port) { struct list_head *pool = &port->read_pool; diff --git a/include/printk.h b/include/printk.h index f550f07bb8..454315632b 100644 --- a/include/printk.h +++ b/include/printk.h @@ -9,8 +9,11 @@ #define MSG_NOTICE 5 /* normal but significant condition */ #define MSG_INFO 6 /* informational */ #define MSG_DEBUG 7 /* debug-level messages */ +#define MSG_VDEBUG 8 /* verbose debug messages */ -#ifdef DEBUG +#ifdef VERBOSE_DEBUG +#define LOGLEVEL MSG_VDEBUG +#elif defined DEBUG #define LOGLEVEL MSG_DEBUG #else #define LOGLEVEL CONFIG_COMPILE_LOGLEVEL @@ -46,6 +49,8 @@ int dev_printf(int level, const struct device_d *dev, const char *format, ...) __dev_printf(6, (dev) , format , ## arg) #define dev_dbg(dev, format, arg...) \ __dev_printf(7, (dev) , format , ## arg) +#define dev_vdbg(dev, format, arg...) \ + __dev_printf(8, (dev) , format , ## arg) #define __pr_printk(level, format, args...) \ ({ \ @@ -65,5 +70,6 @@ int dev_printf(int level, const struct device_d *dev, const char *format, ...) #define pr_info(fmt, arg...) __pr_printk(6, pr_fmt(fmt), ##arg) #define pr_debug(fmt, arg...) __pr_printk(7, pr_fmt(fmt), ##arg) #define debug(fmt, arg...) __pr_printk(7, pr_fmt(fmt), ##arg) +#define pr_vdebug(fmt, arg...) __pr_printk(8, pr_fmt(fmt), ##arg) #endif -- cgit v1.2.3 From 36d837d791c52f28565325d30494118de65986ce Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Thu, 10 Jul 2014 08:53:21 +0200 Subject: include: Add DECLARE_BITMAP from kernel Some code operating on bitmaps needs it. Signed-off-by: Sascha Hauer --- include/linux/types.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/types.h b/include/linux/types.h index 14f8315410..c11e148c90 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -4,6 +4,9 @@ #include #include +#define DECLARE_BITMAP(name,bits) \ + unsigned long name[BITS_TO_LONGS(bits)] + #ifndef __KERNEL_STRICT_NAMES typedef __u32 __kernel_dev_t; -- cgit v1.2.3 From 77bd64d1fd37704bbd9e78b10b3847381299889a Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Thu, 10 Jul 2014 09:12:02 +0200 Subject: include: Add round_up/round_down macros from kernel Yes, we already have roundup/rounddown. Unlike these macros round_up/round_down are optimized to (and only work with) power-of-2 arguments. It's a poor name, but kernel code needs it, so add it to barebox aswell. Signed-off-by: Sascha Hauer --- include/linux/kernel.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 4322f01580..b90d8f7e04 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -4,6 +4,19 @@ #include #include +/* + * This looks more complex than it should be. But we need to + * get the type for the ~ right in round_down (it needs to be + * as wide as the result!), and we want to evaluate the macro + * arguments just once each. + * + * NOTE these functions only round to power-of-2 arguments. Use + * roundup/rounddown for non power-of-2-arguments. + */ +#define __round_mask(x, y) ((__typeof__(x))((y)-1)) +#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) +#define round_down(x, y) ((x) & ~__round_mask(x, y)) + /* * min()/max()/clamp() macros that also do * strict type-checking.. See the -- cgit v1.2.3 From 90ff524be3846acbc2a7f9d5d0c2a2199db3d0a5 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Thu, 10 Jul 2014 09:15:08 +0200 Subject: include: Add foreign endianess adding functions from kernel Signed-off-by: Sascha Hauer --- include/linux/byteorder/generic.h | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'include') diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h index 2d68d99fa4..e59ba455e3 100644 --- a/include/linux/byteorder/generic.h +++ b/include/linux/byteorder/generic.h @@ -175,5 +175,34 @@ extern unsigned short int htons(unsigned short int); #endif /* OPTIMIZE */ +static inline void le16_add_cpu(__le16 *var, u16 val) +{ + *var = cpu_to_le16(le16_to_cpu(*var) + val); +} + +static inline void le32_add_cpu(__le32 *var, u32 val) +{ + *var = cpu_to_le32(le32_to_cpu(*var) + val); +} + +static inline void le64_add_cpu(__le64 *var, u64 val) +{ + *var = cpu_to_le64(le64_to_cpu(*var) + val); +} + +static inline void be16_add_cpu(__be16 *var, u16 val) +{ + *var = cpu_to_be16(be16_to_cpu(*var) + val); +} + +static inline void be32_add_cpu(__be32 *var, u32 val) +{ + *var = cpu_to_be32(be32_to_cpu(*var) + val); +} + +static inline void be64_add_cpu(__be64 *var, u64 val) +{ + *var = cpu_to_be64(be64_to_cpu(*var) + val); +} #endif /* _LINUX_BYTEORDER_GENERIC_H */ -- cgit v1.2.3 From 92df1ff5f1443cec9c43f6826b99a43e110a67ed Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Thu, 10 Jul 2014 09:21:50 +0200 Subject: include: update bitop functions from kernel Updates the bitop functions from v3.16-rc4 Signed-off-by: Sascha Hauer --- include/asm-generic/bitops/hweight.h | 15 +++ include/linux/bitops.h | 214 ++++++++++++++++++++++++++++++++++- 2 files changed, 228 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h index af9770de4e..7268c8b9ab 100644 --- a/include/asm-generic/bitops/hweight.h +++ b/include/asm-generic/bitops/hweight.h @@ -32,4 +32,19 @@ static inline unsigned int hweight8(unsigned int w) return (res & 0x0F) + ((res >> 4) & 0x0F); } +static inline unsigned long hweight64(__u64 w) +{ +#if BITS_PER_LONG == 32 + return hweight32((unsigned int)(w >> 32)) + + hweight32((unsigned int)w); +#elif BITS_PER_LONG == 64 + __u64 res = w - ((w >> 1) & 0x5555555555555555ul); + res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); + res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; + res = res + (res >> 8); + res = res + (res >> 16); + return (res + (res >> 32)) & 0x00000000000000FFul; +#endif +} + #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 127c1619fe..be5fd38bd5 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -1,15 +1,227 @@ #ifndef _LINUX_BITOPS_H #define _LINUX_BITOPS_H +#include -#ifdef __BAREBOX__ +#ifdef __KERNEL__ #define BIT(nr) (1UL << (nr)) +#define BIT_ULL(nr) (1ULL << (nr)) #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) +#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) #define BITS_PER_BYTE 8 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) #endif +/* + * Create a contiguous bitmask starting at bit position @l and ending at + * position @h. For example + * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. + */ +#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) +#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) + +extern unsigned int __sw_hweight8(unsigned int w); +extern unsigned int __sw_hweight16(unsigned int w); +extern unsigned int __sw_hweight32(unsigned int w); +extern unsigned long __sw_hweight64(__u64 w); + +/* + * Include this here because some architectures need generic_ffs/fls in + * scope + */ #include +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + +/* same as for_each_set_bit() but use bit as value to start with */ +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + +#define for_each_clear_bit(bit, addr, size) \ + for ((bit) = find_first_zero_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) + +/* same as for_each_clear_bit() but use bit as value to start with */ +#define for_each_clear_bit_from(bit, addr, size) \ + for ((bit) = find_next_zero_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) + +static __inline__ int get_bitmask_order(unsigned int count) +{ + int order; + + order = fls(count); + return order; /* We could be slightly more clever with -1 here... */ +} + +static __inline__ int get_count_order(unsigned int count) +{ + int order; + + order = fls(count) - 1; + if (count & (count - 1)) + order++; + return order; +} + +static inline unsigned long hweight_long(unsigned long w) +{ + return sizeof(w) == 4 ? hweight32(w) : hweight64(w); +} + +/** + * rol64 - rotate a 64-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u64 rol64(__u64 word, unsigned int shift) +{ + return (word << shift) | (word >> (64 - shift)); +} + +/** + * ror64 - rotate a 64-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u64 ror64(__u64 word, unsigned int shift) +{ + return (word >> shift) | (word << (64 - shift)); +} + +/** + * rol32 - rotate a 32-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u32 rol32(__u32 word, unsigned int shift) +{ + return (word << shift) | (word >> (32 - shift)); +} + +/** + * ror32 - rotate a 32-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u32 ror32(__u32 word, unsigned int shift) +{ + return (word >> shift) | (word << (32 - shift)); +} + +/** + * rol16 - rotate a 16-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u16 rol16(__u16 word, unsigned int shift) +{ + return (word << shift) | (word >> (16 - shift)); +} + +/** + * ror16 - rotate a 16-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u16 ror16(__u16 word, unsigned int shift) +{ + return (word >> shift) | (word << (16 - shift)); +} + +/** + * rol8 - rotate an 8-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u8 rol8(__u8 word, unsigned int shift) +{ + return (word << shift) | (word >> (8 - shift)); +} + +/** + * ror8 - rotate an 8-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u8 ror8(__u8 word, unsigned int shift) +{ + return (word >> shift) | (word << (8 - shift)); +} + +/** + * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit + * @value: value to sign extend + * @index: 0 based bit index (0<=index<32) to sign bit + */ +static inline __s32 sign_extend32(__u32 value, int index) +{ + __u8 shift = 31 - index; + return (__s32)(value << shift) >> shift; +} + +static inline unsigned fls_long(unsigned long l) +{ + if (sizeof(l) == 4) + return fls(l); + return fls64(l); +} + +/** + * __ffs64 - find first set bit in a 64 bit word + * @word: The 64 bit word + * + * On 64 bit arches this is a synomyn for __ffs + * The result is not defined if no bits are set, so check that @word + * is non-zero before calling this. + */ +static inline unsigned long __ffs64(u64 word) +{ +#if BITS_PER_LONG == 32 + if (((u32)word) == 0UL) + return __ffs((u32)(word >> 32)) + 32; +#elif BITS_PER_LONG != 64 +#error BITS_PER_LONG not 32 or 64 +#endif + return __ffs((unsigned long)word); +} + +#ifdef __KERNEL__ + +#ifndef set_mask_bits +#define set_mask_bits(ptr, _mask, _bits) \ +({ \ + const typeof(*ptr) mask = (_mask), bits = (_bits); \ + typeof(*ptr) old, new; \ + \ + do { \ + old = ACCESS_ONCE(*ptr); \ + new = (old & ~mask) | bits; \ + } while (cmpxchg(ptr, old, new) != old); \ + \ + new; \ +}) +#endif + +#ifndef find_last_bit +/** + * find_last_bit - find the last set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit, or size. + */ +extern unsigned long find_last_bit(const unsigned long *addr, + unsigned long size); +#endif +#endif /* __KERNEL__ */ #endif -- cgit v1.2.3 From 757377a9ab31689f4927466c58c63a37964573f0 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Thu, 10 Jul 2014 09:19:38 +0200 Subject: lib: Add bitmap functions from kernel Signed-off-by: Sascha Hauer --- arch/x86/Kconfig | 1 + include/linux/bitmap.h | 285 +++++++++++++++++ lib/Makefile | 1 + lib/bitmap.c | 839 +++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 1126 insertions(+) create mode 100644 include/linux/bitmap.h create mode 100644 lib/bitmap.c (limited to 'include') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d8d7f0e651..346640dcda 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -20,6 +20,7 @@ config X86 select HAS_MODULES select HAVE_CONFIGURABLE_MEMORY_LAYOUT select HAVE_CONFIGURABLE_TEXT_BASE + select GENERIC_FIND_NEXT_BIT default y config X86_BOOTLOADER diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h new file mode 100644 index 0000000000..4b98521a83 --- /dev/null +++ b/include/linux/bitmap.h @@ -0,0 +1,285 @@ +#ifndef __LINUX_BITMAP_H +#define __LINUX_BITMAP_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +/* + * bitmaps provide bit arrays that consume one or more unsigned + * longs. The bitmap interface and available operations are listed + * here, in bitmap.h + * + * Function implementations generic to all architectures are in + * lib/bitmap.c. Functions implementations that are architecture + * specific are in various include/asm-/bitops.h headers + * and other arch/ specific files. + * + * See lib/bitmap.c for more details. + */ + +/* + * The available bitmap operations and their rough meaning in the + * case that the bitmap is a single unsigned long are thus: + * + * Note that nbits should be always a compile time evaluable constant. + * Otherwise many inlines will generate horrible code. + * + * bitmap_zero(dst, nbits) *dst = 0UL + * bitmap_fill(dst, nbits) *dst = ~0UL + * bitmap_copy(dst, src, nbits) *dst = *src + * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 + * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 + * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 + * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) + * bitmap_complement(dst, src, nbits) *dst = ~(*src) + * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? + * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? + * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2? + * bitmap_empty(src, nbits) Are all bits zero in *src? + * bitmap_full(src, nbits) Are all bits set in *src? + * bitmap_weight(src, nbits) Hamming Weight: number set bits + * bitmap_set(dst, pos, nbits) Set specified bit area + * bitmap_clear(dst, pos, nbits) Clear specified bit area + * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area + * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n + * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n + * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) + * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) + * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap + * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz + * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region + * bitmap_release_region(bitmap, pos, order) Free specified bit region + * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region + */ + +/* + * Also the following operations in asm/bitops.h apply to bitmaps. + * + * set_bit(bit, addr) *addr |= bit + * clear_bit(bit, addr) *addr &= ~bit + * change_bit(bit, addr) *addr ^= bit + * test_bit(bit, addr) Is bit set in *addr? + * test_and_set_bit(bit, addr) Set bit and return old value + * test_and_clear_bit(bit, addr) Clear bit and return old value + * test_and_change_bit(bit, addr) Change bit and return old value + * find_first_zero_bit(addr, nbits) Position first zero bit in *addr + * find_first_bit(addr, nbits) Position first set bit in *addr + * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit + * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit + */ + +/* + * The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used + * to declare an array named 'name' of just enough unsigned longs to + * contain all bit positions from 0 to 'bits' - 1. + */ + +/* + * lib/bitmap.c provides these functions: + */ + +extern int __bitmap_empty(const unsigned long *bitmap, int bits); +extern int __bitmap_full(const unsigned long *bitmap, int bits); +extern int __bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, + int bits); +extern void __bitmap_shift_right(unsigned long *dst, + const unsigned long *src, int shift, int bits); +extern void __bitmap_shift_left(unsigned long *dst, + const unsigned long *src, int shift, int bits); +extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +extern int __bitmap_intersects(const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +extern int __bitmap_subset(const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); +extern int __bitmap_weight(const unsigned long *bitmap, int bits); + +extern void bitmap_set(unsigned long *map, int i, int len); +extern void bitmap_clear(unsigned long *map, int start, int nr); +extern unsigned long bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask); + +extern void bitmap_remap(unsigned long *dst, const unsigned long *src, + const unsigned long *old, const unsigned long *new, int bits); +extern int bitmap_bitremap(int oldbit, + const unsigned long *old, const unsigned long *new, int bits); +extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, + const unsigned long *relmap, int bits); +extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, + int sz, int bits); +extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); +extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); +extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); +extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); +extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); + +#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) +#define BITMAP_LAST_WORD_MASK(nbits) \ +( \ + ((nbits) % BITS_PER_LONG) ? \ + (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ +) + +#define small_const_nbits(nbits) \ + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) + +static inline void bitmap_zero(unsigned long *dst, int nbits) +{ + if (small_const_nbits(nbits)) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} + +static inline void bitmap_fill(unsigned long *dst, int nbits) +{ + size_t nlongs = BITS_TO_LONGS(nbits); + if (!small_const_nbits(nbits)) { + int len = (nlongs - 1) * sizeof(unsigned long); + memset(dst, 0xff, len); + } + dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); +} + +static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, + int nbits) +{ + if (small_const_nbits(nbits)) + *dst = *src; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memcpy(dst, src, len); + } +} + +static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + return (*dst = *src1 & *src2) != 0; + return __bitmap_and(dst, src1, src2, nbits); +} + +static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + *dst = *src1 | *src2; + else + __bitmap_or(dst, src1, src2, nbits); +} + +static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + *dst = *src1 ^ *src2; + else + __bitmap_xor(dst, src1, src2, nbits); +} + +static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + return (*dst = *src1 & ~(*src2)) != 0; + return __bitmap_andnot(dst, src1, src2, nbits); +} + +static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, + int nbits) +{ + if (small_const_nbits(nbits)) + *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); + else + __bitmap_complement(dst, src, nbits); +} + +static inline int bitmap_equal(const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); + else + return __bitmap_equal(src1, src2, nbits); +} + +static inline int bitmap_intersects(const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; + else + return __bitmap_intersects(src1, src2, nbits); +} + +static inline int bitmap_subset(const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); + else + return __bitmap_subset(src1, src2, nbits); +} + +static inline int bitmap_empty(const unsigned long *src, int nbits) +{ + if (small_const_nbits(nbits)) + return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); + else + return __bitmap_empty(src, nbits); +} + +static inline int bitmap_full(const unsigned long *src, int nbits) +{ + if (small_const_nbits(nbits)) + return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); + else + return __bitmap_full(src, nbits); +} + +static inline int bitmap_weight(const unsigned long *src, int nbits) +{ + if (small_const_nbits(nbits)) + return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); + return __bitmap_weight(src, nbits); +} + +static inline void bitmap_shift_right(unsigned long *dst, + const unsigned long *src, int n, int nbits) +{ + if (small_const_nbits(nbits)) + *dst = *src >> n; + else + __bitmap_shift_right(dst, src, n, nbits); +} + +static inline void bitmap_shift_left(unsigned long *dst, + const unsigned long *src, int n, int nbits) +{ + if (small_const_nbits(nbits)) + *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits); + else + __bitmap_shift_left(dst, src, n, nbits); +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __LINUX_BITMAP_H */ diff --git a/lib/Makefile b/lib/Makefile index e8769a9be2..78f724a095 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -44,3 +44,4 @@ obj-y += gui/ obj-$(CONFIG_XYMODEM) += xymodem.o obj-y += unlink-recursive.o obj-$(CONFIG_STMP_DEVICE) += stmp-device.o +obj-y += bitmap.o diff --git a/lib/bitmap.c b/lib/bitmap.c new file mode 100644 index 0000000000..5be6651941 --- /dev/null +++ b/lib/bitmap.c @@ -0,0 +1,839 @@ +/* + * lib/bitmap.c + * Helper functions for bitmap.h. + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ +#include +#include +#include +#include + +/* + * bitmaps provide an array of bits, implemented using an an + * array of unsigned longs. The number of valid bits in a + * given bitmap does _not_ need to be an exact multiple of + * BITS_PER_LONG. + * + * The possible unused bits in the last, partially used word + * of a bitmap are 'don't care'. The implementation makes + * no particular effort to keep them zero. It ensures that + * their value will not affect the results of any operation. + * The bitmap operations that return Boolean (bitmap_empty, + * for example) or scalar (bitmap_weight, for example) results + * carefully filter out these unused bits from impacting their + * results. + * + * These operations actually hold to a slightly stronger rule: + * if you don't input any bitmaps to these ops that have some + * unused bits set, then they won't output any set unused bits + * in output bitmaps. + * + * The byte ordering of bitmaps is more natural on little + * endian architectures. See the big-endian headers + * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h + * for the best explanations of this ordering. + */ + +int __bitmap_empty(const unsigned long *bitmap, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (bitmap[k]) + return 0; + + if (bits % BITS_PER_LONG) + if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) + return 0; + + return 1; +} +EXPORT_SYMBOL(__bitmap_empty); + +int __bitmap_full(const unsigned long *bitmap, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (~bitmap[k]) + return 0; + + if (bits % BITS_PER_LONG) + if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) + return 0; + + return 1; +} +EXPORT_SYMBOL(__bitmap_full); + +int __bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (bitmap1[k] != bitmap2[k]) + return 0; + + if (bits % BITS_PER_LONG) + if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) + return 0; + + return 1; +} +EXPORT_SYMBOL(__bitmap_equal); + +void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + dst[k] = ~src[k]; + + if (bits % BITS_PER_LONG) + dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits); +} +EXPORT_SYMBOL(__bitmap_complement); + +/** + * __bitmap_shift_right - logical right shift of the bits in a bitmap + * @dst : destination bitmap + * @src : source bitmap + * @shift : shift by this many bits + * @bits : bitmap size, in bits + * + * Shifting right (dividing) means moving bits in the MS -> LS bit + * direction. Zeros are fed into the vacated MS positions and the + * LS bits shifted off the bottom are lost. + */ +void __bitmap_shift_right(unsigned long *dst, + const unsigned long *src, int shift, int bits) +{ + int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; + int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; + unsigned long mask = (1UL << left) - 1; + for (k = 0; off + k < lim; ++k) { + unsigned long upper, lower; + + /* + * If shift is not word aligned, take lower rem bits of + * word above and make them the top rem bits of result. + */ + if (!rem || off + k + 1 >= lim) + upper = 0; + else { + upper = src[off + k + 1]; + if (off + k + 1 == lim - 1 && left) + upper &= mask; + } + lower = src[off + k]; + if (left && off + k == lim - 1) + lower &= mask; + dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem; + if (left && k == lim - 1) + dst[k] &= mask; + } + if (off) + memset(&dst[lim - off], 0, off*sizeof(unsigned long)); +} +EXPORT_SYMBOL(__bitmap_shift_right); + + +/** + * __bitmap_shift_left - logical left shift of the bits in a bitmap + * @dst : destination bitmap + * @src : source bitmap + * @shift : shift by this many bits + * @bits : bitmap size, in bits + * + * Shifting left (multiplying) means moving bits in the LS -> MS + * direction. Zeros are fed into the vacated LS bit positions + * and those MS bits shifted off the top are lost. + */ + +void __bitmap_shift_left(unsigned long *dst, + const unsigned long *src, int shift, int bits) +{ + int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; + int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; + for (k = lim - off - 1; k >= 0; --k) { + unsigned long upper, lower; + + /* + * If shift is not word aligned, take upper rem bits of + * word below and make them the bottom rem bits of result. + */ + if (rem && k > 0) + lower = src[k - 1]; + else + lower = 0; + upper = src[k]; + if (left && k == lim - 1) + upper &= (1UL << left) - 1; + dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem; + if (left && k + off == lim - 1) + dst[k + off] &= (1UL << left) - 1; + } + if (off) + memset(dst, 0, off*sizeof(unsigned long)); +} +EXPORT_SYMBOL(__bitmap_shift_left); + +int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k; + int nr = BITS_TO_LONGS(bits); + unsigned long result = 0; + + for (k = 0; k < nr; k++) + result |= (dst[k] = bitmap1[k] & bitmap2[k]); + return result != 0; +} +EXPORT_SYMBOL(__bitmap_and); + +void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k; + int nr = BITS_TO_LONGS(bits); + + for (k = 0; k < nr; k++) + dst[k] = bitmap1[k] | bitmap2[k]; +} +EXPORT_SYMBOL(__bitmap_or); + +void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k; + int nr = BITS_TO_LONGS(bits); + + for (k = 0; k < nr; k++) + dst[k] = bitmap1[k] ^ bitmap2[k]; +} +EXPORT_SYMBOL(__bitmap_xor); + +int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k; + int nr = BITS_TO_LONGS(bits); + unsigned long result = 0; + + for (k = 0; k < nr; k++) + result |= (dst[k] = bitmap1[k] & ~bitmap2[k]); + return result != 0; +} +EXPORT_SYMBOL(__bitmap_andnot); + +int __bitmap_intersects(const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (bitmap1[k] & bitmap2[k]) + return 1; + + if (bits % BITS_PER_LONG) + if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) + return 1; + return 0; +} +EXPORT_SYMBOL(__bitmap_intersects); + +int __bitmap_subset(const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (bitmap1[k] & ~bitmap2[k]) + return 0; + + if (bits % BITS_PER_LONG) + if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) + return 0; + return 1; +} +EXPORT_SYMBOL(__bitmap_subset); + +int __bitmap_weight(const unsigned long *bitmap, int bits) +{ + int k, w = 0, lim = bits/BITS_PER_LONG; + + for (k = 0; k < lim; k++) + w += hweight_long(bitmap[k]); + + if (bits % BITS_PER_LONG) + w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); + + return w; +} +EXPORT_SYMBOL(__bitmap_weight); + +void bitmap_set(unsigned long *map, int start, int nr) +{ + unsigned long *p = map + BIT_WORD(start); + const int size = start + nr; + int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); + + while (nr - bits_to_set >= 0) { + *p |= mask_to_set; + nr -= bits_to_set; + bits_to_set = BITS_PER_LONG; + mask_to_set = ~0UL; + p++; + } + if (nr) { + mask_to_set &= BITMAP_LAST_WORD_MASK(size); + *p |= mask_to_set; + } +} +EXPORT_SYMBOL(bitmap_set); + +void bitmap_clear(unsigned long *map, int start, int nr) +{ + unsigned long *p = map + BIT_WORD(start); + const int size = start + nr; + int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); + + while (nr - bits_to_clear >= 0) { + *p &= ~mask_to_clear; + nr -= bits_to_clear; + bits_to_clear = BITS_PER_LONG; + mask_to_clear = ~0UL; + p++; + } + if (nr) { + mask_to_clear &= BITMAP_LAST_WORD_MASK(size); + *p &= ~mask_to_clear; + } +} +EXPORT_SYMBOL(bitmap_clear); + +/* + * bitmap_find_next_zero_area - find a contiguous aligned zero area + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @align_mask: Alignment mask for zero area + * + * The @align_mask should be one less than a power of 2; the effect is that + * the bit offset of all zero areas this function finds is multiples of that + * power of 2. A @align_mask of 0 means no alignment is required. + */ +unsigned long bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask) +{ + unsigned long index, end, i; +again: + index = find_next_zero_bit(map, size, start); + + /* Align allocation */ + index = __ALIGN_MASK(index, align_mask); + + end = index + nr; + if (end > size) + return end; + i = find_next_bit(map, end, index); + if (i < end) { + start = i + 1; + goto again; + } + return index; +} +EXPORT_SYMBOL(bitmap_find_next_zero_area); + +/* + * Bitmap printing & parsing functions: first version by Nadia Yvette Chambers, + * second version by Paul Jackson, third by Joe Korty. + */ + +#define CHUNKSZ 32 +#define nbits_to_hold_value(val) fls(val) +#define BASEDEC 10 /* fancier cpuset lists input in decimal */ + +/** + * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap + * @buf: pointer to a bitmap + * @pos: a bit position in @buf (0 <= @pos < @bits) + * @bits: number of valid bit positions in @buf + * + * Map the bit at position @pos in @buf (of length @bits) to the + * ordinal of which set bit it is. If it is not set or if @pos + * is not a valid bit position, map to -1. + * + * If for example, just bits 4 through 7 are set in @buf, then @pos + * values 4 through 7 will get mapped to 0 through 3, respectively, + * and other @pos values will get mapped to 0. When @pos value 7 + * gets mapped to (returns) @ord value 3 in this example, that means + * that bit 7 is the 3rd (starting with 0th) set bit in @buf. + * + * The bit positions 0 through @bits are valid positions in @buf. + */ +static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) +{ + int i, ord; + + if (pos < 0 || pos >= bits || !test_bit(pos, buf)) + return -1; + + i = find_first_bit(buf, bits); + ord = 0; + while (i < pos) { + i = find_next_bit(buf, bits, i + 1); + ord++; + } + BUG_ON(i != pos); + + return ord; +} + +/** + * bitmap_ord_to_pos - find position of n-th set bit in bitmap + * @buf: pointer to bitmap + * @ord: ordinal bit position (n-th set bit, n >= 0) + * @bits: number of valid bit positions in @buf + * + * Map the ordinal offset of bit @ord in @buf to its position in @buf. + * Value of @ord should be in range 0 <= @ord < weight(buf), else + * results are undefined. + * + * If for example, just bits 4 through 7 are set in @buf, then @ord + * values 0 through 3 will get mapped to 4 through 7, respectively, + * and all other @ord values return undefined values. When @ord value 3 + * gets mapped to (returns) @pos value 7 in this example, that means + * that the 3rd set bit (starting with 0th) is at position 7 in @buf. + * + * The bit positions 0 through @bits are valid positions in @buf. + */ +int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) +{ + int pos = 0; + + if (ord >= 0 && ord < bits) { + int i; + + for (i = find_first_bit(buf, bits); + i < bits && ord > 0; + i = find_next_bit(buf, bits, i + 1)) + ord--; + if (i < bits && ord == 0) + pos = i; + } + + return pos; +} + +/** + * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap + * @dst: remapped result + * @src: subset to be remapped + * @old: defines domain of map + * @new: defines range of map + * @bits: number of bits in each of these bitmaps + * + * Let @old and @new define a mapping of bit positions, such that + * whatever position is held by the n-th set bit in @old is mapped + * to the n-th set bit in @new. In the more general case, allowing + * for the possibility that the weight 'w' of @new is less than the + * weight of @old, map the position of the n-th set bit in @old to + * the position of the m-th set bit in @new, where m == n % w. + * + * If either of the @old and @new bitmaps are empty, or if @src and + * @dst point to the same location, then this routine copies @src + * to @dst. + * + * The positions of unset bits in @old are mapped to themselves + * (the identify map). + * + * Apply the above specified mapping to @src, placing the result in + * @dst, clearing any bits previously set in @dst. + * + * For example, lets say that @old has bits 4 through 7 set, and + * @new has bits 12 through 15 set. This defines the mapping of bit + * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other + * bit positions unchanged. So if say @src comes into this routine + * with bits 1, 5 and 7 set, then @dst should leave with bits 1, + * 13 and 15 set. + */ +void bitmap_remap(unsigned long *dst, const unsigned long *src, + const unsigned long *old, const unsigned long *new, + int bits) +{ + int oldbit, w; + + if (dst == src) /* following doesn't handle inplace remaps */ + return; + bitmap_zero(dst, bits); + + w = bitmap_weight(new, bits); + for_each_set_bit(oldbit, src, bits) { + int n = bitmap_pos_to_ord(old, oldbit, bits); + + if (n < 0 || w == 0) + set_bit(oldbit, dst); /* identity map */ + else + set_bit(bitmap_ord_to_pos(new, n % w, bits), dst); + } +} +EXPORT_SYMBOL(bitmap_remap); + +/** + * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit + * @oldbit: bit position to be mapped + * @old: defines domain of map + * @new: defines range of map + * @bits: number of bits in each of these bitmaps + * + * Let @old and @new define a mapping of bit positions, such that + * whatever position is held by the n-th set bit in @old is mapped + * to the n-th set bit in @new. In the more general case, allowing + * for the possibility that the weight 'w' of @new is less than the + * weight of @old, map the position of the n-th set bit in @old to + * the position of the m-th set bit in @new, where m == n % w. + * + * The positions of unset bits in @old are mapped to themselves + * (the identify map). + * + * Apply the above specified mapping to bit position @oldbit, returning + * the new bit position. + * + * For example, lets say that @old has bits 4 through 7 set, and + * @new has bits 12 through 15 set. This defines the mapping of bit + * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other + * bit positions unchanged. So if say @oldbit is 5, then this routine + * returns 13. + */ +int bitmap_bitremap(int oldbit, const unsigned long *old, + const unsigned long *new, int bits) +{ + int w = bitmap_weight(new, bits); + int n = bitmap_pos_to_ord(old, oldbit, bits); + if (n < 0 || w == 0) + return oldbit; + else + return bitmap_ord_to_pos(new, n % w, bits); +} +EXPORT_SYMBOL(bitmap_bitremap); + +/** + * bitmap_onto - translate one bitmap relative to another + * @dst: resulting translated bitmap + * @orig: original untranslated bitmap + * @relmap: bitmap relative to which translated + * @bits: number of bits in each of these bitmaps + * + * Set the n-th bit of @dst iff there exists some m such that the + * n-th bit of @relmap is set, the m-th bit of @orig is set, and + * the n-th bit of @relmap is also the m-th _set_ bit of @relmap. + * (If you understood the previous sentence the first time your + * read it, you're overqualified for your current job.) + * + * In other words, @orig is mapped onto (surjectively) @dst, + * using the the map { | the n-th bit of @relmap is the + * m-th set bit of @relmap }. + * + * Any set bits in @orig above bit number W, where W is the + * weight of (number of set bits in) @relmap are mapped nowhere. + * In particular, if for all bits m set in @orig, m >= W, then + * @dst will end up empty. In situations where the possibility + * of such an empty result is not desired, one way to avoid it is + * to use the bitmap_fold() operator, below, to first fold the + * @orig bitmap over itself so that all its set bits x are in the + * range 0 <= x < W. The bitmap_fold() operator does this by + * setting the bit (m % W) in @dst, for each bit (m) set in @orig. + * + * Example [1] for bitmap_onto(): + * Let's say @relmap has bits 30-39 set, and @orig has bits + * 1, 3, 5, 7, 9 and 11 set. Then on return from this routine, + * @dst will have bits 31, 33, 35, 37 and 39 set. + * + * When bit 0 is set in @orig, it means turn on the bit in + * @dst corresponding to whatever is the first bit (if any) + * that is turned on in @relmap. Since bit 0 was off in the + * above example, we leave off that bit (bit 30) in @dst. + * + * When bit 1 is set in @orig (as in the above example), it + * means turn on the bit in @dst corresponding to whatever + * is the second bit that is turned on in @relmap. The second + * bit in @relmap that was turned on in the above example was + * bit 31, so we turned on bit 31 in @dst. + * + * Similarly, we turned on bits 33, 35, 37 and 39 in @dst, + * because they were the 4th, 6th, 8th and 10th set bits + * set in @relmap, and the 4th, 6th, 8th and 10th bits of + * @orig (i.e. bits 3, 5, 7 and 9) were also set. + * + * When bit 11 is set in @orig, it means turn on the bit in + * @dst corresponding to whatever is the twelfth bit that is + * turned on in @relmap. In the above example, there were + * only ten bits turned on in @relmap (30..39), so that bit + * 11 was set in @orig had no affect on @dst. + * + * Example [2] for bitmap_fold() + bitmap_onto(): + * Let's say @relmap has these ten bits set: + * 40 41 42 43 45 48 53 61 74 95 + * (for the curious, that's 40 plus the first ten terms of the + * Fibonacci sequence.) + * + * Further lets say we use the following code, invoking + * bitmap_fold() then bitmap_onto, as suggested above to + * avoid the possitility of an empty @dst result: + * + * unsigned long *tmp; // a temporary bitmap's bits + * + * bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits); + * bitmap_onto(dst, tmp, relmap, bits); + * + * Then this table shows what various values of @dst would be, for + * various @orig's. I list the zero-based positions of each set bit. + * The tmp column shows the intermediate result, as computed by + * using bitmap_fold() to fold the @orig bitmap modulo ten + * (the weight of @relmap). + * + * @orig tmp @dst + * 0 0 40 + * 1 1 41 + * 9 9 95 + * 10 0 40 (*) + * 1 3 5 7 1 3 5 7 41 43 48 61 + * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45 + * 0 9 18 27 0 9 8 7 40 61 74 95 + * 0 10 20 30 0 40 + * 0 11 22 33 0 1 2 3 40 41 42 43 + * 0 12 24 36 0 2 4 6 40 42 45 53 + * 78 102 211 1 2 8 41 42 74 (*) + * + * (*) For these marked lines, if we hadn't first done bitmap_fold() + * into tmp, then the @dst result would have been empty. + * + * If either of @orig or @relmap is empty (no set bits), then @dst + * will be returned empty. + * + * If (as explained above) the only set bits in @orig are in positions + * m where m >= W, (where W is the weight of @relmap) then @dst will + * once again be returned empty. + * + * All bits in @dst not set by the above rule are cleared. + */ +void bitmap_onto(unsigned long *dst, const unsigned long *orig, + const unsigned long *relmap, int bits) +{ + int n, m; /* same meaning as in above comment */ + + if (dst == orig) /* following doesn't handle inplace mappings */ + return; + bitmap_zero(dst, bits); + + /* + * The following code is a more efficient, but less + * obvious, equivalent to the loop: + * for (m = 0; m < bitmap_weight(relmap, bits); m++) { + * n = bitmap_ord_to_pos(orig, m, bits); + * if (test_bit(m, orig)) + * set_bit(n, dst); + * } + */ + + m = 0; + for_each_set_bit(n, relmap, bits) { + /* m == bitmap_pos_to_ord(relmap, n, bits) */ + if (test_bit(m, orig)) + set_bit(n, dst); + m++; + } +} +EXPORT_SYMBOL(bitmap_onto); + +/** + * bitmap_fold - fold larger bitmap into smaller, modulo specified size + * @dst: resulting smaller bitmap + * @orig: original larger bitmap + * @sz: specified size + * @bits: number of bits in each of these bitmaps + * + * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst. + * Clear all other bits in @dst. See further the comment and + * Example [2] for bitmap_onto() for why and how to use this. + */ +void bitmap_fold(unsigned long *dst, const unsigned long *orig, + int sz, int bits) +{ + int oldbit; + + if (dst == orig) /* following doesn't handle inplace mappings */ + return; + bitmap_zero(dst, bits); + + for_each_set_bit(oldbit, orig, bits) + set_bit(oldbit % sz, dst); +} +EXPORT_SYMBOL(bitmap_fold); + +/* + * Common code for bitmap_*_region() routines. + * bitmap: array of unsigned longs corresponding to the bitmap + * pos: the beginning of the region + * order: region size (log base 2 of number of bits) + * reg_op: operation(s) to perform on that region of bitmap + * + * Can set, verify and/or release a region of bits in a bitmap, + * depending on which combination of REG_OP_* flag bits is set. + * + * A region of a bitmap is a sequence of bits in the bitmap, of + * some size '1 << order' (a power of two), aligned to that same + * '1 << order' power of two. + * + * Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits). + * Returns 0 in all other cases and reg_ops. + */ + +enum { + REG_OP_ISFREE, /* true if region is all zero bits */ + REG_OP_ALLOC, /* set all bits in region */ + REG_OP_RELEASE, /* clear all bits in region */ +}; + +static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op) +{ + int nbits_reg; /* number of bits in region */ + int index; /* index first long of region in bitmap */ + int offset; /* bit offset region in bitmap[index] */ + int nlongs_reg; /* num longs spanned by region in bitmap */ + int nbitsinlong; /* num bits of region in each spanned long */ + unsigned long mask; /* bitmask for one long of region */ + int i; /* scans bitmap by longs */ + int ret = 0; /* return value */ + + /* + * Either nlongs_reg == 1 (for small orders that fit in one long) + * or (offset == 0 && mask == ~0UL) (for larger multiword orders.) + */ + nbits_reg = 1 << order; + index = pos / BITS_PER_LONG; + offset = pos - (index * BITS_PER_LONG); + nlongs_reg = BITS_TO_LONGS(nbits_reg); + nbitsinlong = min(nbits_reg, BITS_PER_LONG); + + /* + * Can't do "mask = (1UL << nbitsinlong) - 1", as that + * overflows if nbitsinlong == BITS_PER_LONG. + */ + mask = (1UL << (nbitsinlong - 1)); + mask += mask - 1; + mask <<= offset; + + switch (reg_op) { + case REG_OP_ISFREE: + for (i = 0; i < nlongs_reg; i++) { + if (bitmap[index + i] & mask) + goto done; + } + ret = 1; /* all bits in region free (zero) */ + break; + + case REG_OP_ALLOC: + for (i = 0; i < nlongs_reg; i++) + bitmap[index + i] |= mask; + break; + + case REG_OP_RELEASE: + for (i = 0; i < nlongs_reg; i++) + bitmap[index + i] &= ~mask; + break; + } +done: + return ret; +} + +/** + * bitmap_find_free_region - find a contiguous aligned mem region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @bits: number of bits in the bitmap + * @order: region size (log base 2 of number of bits) to find + * + * Find a region of free (zero) bits in a @bitmap of @bits bits and + * allocate them (set them to one). Only consider regions of length + * a power (@order) of two, aligned to that power of two, which + * makes the search algorithm much faster. + * + * Return the bit offset in bitmap of the allocated region, + * or -errno on failure. + */ +int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) +{ + int pos, end; /* scans bitmap by regions of size order */ + + for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) { + if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) + continue; + __reg_op(bitmap, pos, order, REG_OP_ALLOC); + return pos; + } + return -ENOMEM; +} +EXPORT_SYMBOL(bitmap_find_free_region); + +/** + * bitmap_release_region - release allocated bitmap region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @pos: beginning of bit region to release + * @order: region size (log base 2 of number of bits) to release + * + * This is the complement to __bitmap_find_free_region() and releases + * the found region (by clearing it in the bitmap). + * + * No return value. + */ +void bitmap_release_region(unsigned long *bitmap, int pos, int order) +{ + __reg_op(bitmap, pos, order, REG_OP_RELEASE); +} +EXPORT_SYMBOL(bitmap_release_region); + +/** + * bitmap_allocate_region - allocate bitmap region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @pos: beginning of bit region to allocate + * @order: region size (log base 2 of number of bits) to allocate + * + * Allocate (set bits in) a specified region of a bitmap. + * + * Return 0 on success, or %-EBUSY if specified region wasn't + * free (not all bits were zero). + */ +int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) +{ + if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) + return -EBUSY; + __reg_op(bitmap, pos, order, REG_OP_ALLOC); + return 0; +} +EXPORT_SYMBOL(bitmap_allocate_region); + +/** + * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order. + * @dst: destination buffer + * @src: bitmap to copy + * @nbits: number of bits in the bitmap + * + * Require nbits % BITS_PER_LONG == 0. + */ +void bitmap_copy_le(void *dst, const unsigned long *src, int nbits) +{ + unsigned long *d = dst; + int i; + + for (i = 0; i < nbits/BITS_PER_LONG; i++) { + if (BITS_PER_LONG == 64) + d[i] = cpu_to_le64(src[i]); + else + d[i] = cpu_to_le32(src[i]); + } +} +EXPORT_SYMBOL(bitmap_copy_le); -- cgit v1.2.3