From 60a7a88dbb9fc9adcca78a10a3ecf36966b5a45c Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Wed, 3 May 2017 14:53:51 -0700 Subject: mm/sparse: refine usemap_size() a little The current implementation calculates usemap_size in two steps: * calculate number of bytes to cover these bits * calculate number of "unsigned long" to cover these bytes It would be more clear by: * calculate number of "unsigned long" to cover these bits * multiple it with sizeof(unsigned long) This patch refine usemap_size() a little to make it more easy to understand. Link: http://lkml.kernel.org/r/20170310043713.96871-1-richard.weiyang@gmail.com Signed-off-by: Wei Yang Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/sparse.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'mm/sparse.c') diff --git a/mm/sparse.c b/mm/sparse.c index db6bf3c97ea2c..6903c8fc30850 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -248,10 +248,7 @@ static int __meminit sparse_init_one_section(struct mem_section *ms, unsigned long usemap_size(void) { - unsigned long size_bytes; - size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; - size_bytes = roundup(size_bytes, sizeof(unsigned long)); - return size_bytes; + return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); } #ifdef CONFIG_MEMORY_HOTPLUG -- cgit v1.2.3