summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-02-03 11:03:31 +0100
committerIngo Molnar <mingo@kernel.org>2017-03-02 08:42:25 +0100
commit314ff7851fc8ea66cbf48eaa93d8ebfb5ca084a9 (patch)
tree16567343faf3f9e94a56bbf431c1e54414cdbb6d
parent780de9dd2720debc14c501dab4dc80d1f75ad50e (diff)
downloadlinux-314ff7851fc8ea66cbf48eaa93d8ebfb5ca084a9.tar.gz
linux-314ff7851fc8ea66cbf48eaa93d8ebfb5ca084a9.tar.xz
mm/vmacache, sched/headers: Introduce 'struct vmacache' and move it from <linux/sched.h> to <linux/mm_types>
The <linux/sched.h> header includes various vmacache related defines, which are arguably misplaced. Move them to mm_types.h and minimize the sched.h impact by putting all task vmacache state into a new 'struct vmacache' structure. No change in functionality. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/mm_types.h12
-rw-r--r--include/linux/sched.h11
-rw-r--r--include/linux/vmacache.h2
-rw-r--r--kernel/debug/debug_core.c4
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/vmacache.c10
6 files changed, 25 insertions, 16 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 4f6d440ad785..137797cd7b50 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -360,6 +360,18 @@ struct vm_area_struct {
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
};
+/*
+ * The per task VMA cache array:
+ */
+#define VMACACHE_BITS 2
+#define VMACACHE_SIZE (1U << VMACACHE_BITS)
+#define VMACACHE_MASK (VMACACHE_SIZE - 1)
+
+struct vmacache {
+ u32 seqnum;
+ struct vm_area_struct *vmas[VMACACHE_SIZE];
+};
+
struct core_thread {
struct task_struct *task;
struct core_thread *next;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3f61baac928b..e87c97e1a947 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -134,10 +134,6 @@ struct blk_plug;
struct filename;
struct nameidata;
-#define VMACACHE_BITS 2
-#define VMACACHE_SIZE (1U << VMACACHE_BITS)
-#define VMACACHE_MASK (VMACACHE_SIZE - 1)
-
/*
* These are the constant used to fake the fixed-point load-average
* counting. Some notes:
@@ -1550,9 +1546,10 @@ struct task_struct {
#endif
struct mm_struct *mm, *active_mm;
- /* per-thread vma caching */
- u32 vmacache_seqnum;
- struct vm_area_struct *vmacache[VMACACHE_SIZE];
+
+ /* Per-thread vma caching: */
+ struct vmacache vmacache;
+
#if defined(SPLIT_RSS_COUNTING)
struct task_rss_stat rss_stat;
#endif
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
index c3fa0fd43949..1081db987391 100644
--- a/include/linux/vmacache.h
+++ b/include/linux/vmacache.h
@@ -12,7 +12,7 @@
static inline void vmacache_flush(struct task_struct *tsk)
{
- memset(tsk->vmacache, 0, sizeof(tsk->vmacache));
+ memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
}
extern void vmacache_flush_all(struct mm_struct *mm);
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 79517e5549f1..a603ef28f70c 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -232,9 +232,9 @@ static void kgdb_flush_swbreak_addr(unsigned long addr)
int i;
for (i = 0; i < VMACACHE_SIZE; i++) {
- if (!current->vmacache[i])
+ if (!current->vmacache.vmas[i])
continue;
- flush_cache_range(current->vmacache[i],
+ flush_cache_range(current->vmacache.vmas[i],
addr, addr + BREAK_INSTR_SIZE);
}
}
diff --git a/mm/nommu.c b/mm/nommu.c
index fe9f4fa4a7a7..aae06e854552 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -757,7 +757,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
mm->map_count--;
for (i = 0; i < VMACACHE_SIZE; i++) {
/* if the vma is cached, invalidate the entire cache */
- if (curr->vmacache[i] == vma) {
+ if (curr->vmacache.vmas[i] == vma) {
vmacache_invalidate(mm);
break;
}
diff --git a/mm/vmacache.c b/mm/vmacache.c
index 035fdeb35b43..7c233f8e20ee 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -60,7 +60,7 @@ static inline bool vmacache_valid_mm(struct mm_struct *mm)
void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
{
if (vmacache_valid_mm(newvma->vm_mm))
- current->vmacache[VMACACHE_HASH(addr)] = newvma;
+ current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma;
}
static bool vmacache_valid(struct mm_struct *mm)
@@ -71,12 +71,12 @@ static bool vmacache_valid(struct mm_struct *mm)
return false;
curr = current;
- if (mm->vmacache_seqnum != curr->vmacache_seqnum) {
+ if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
/*
* First attempt will always be invalid, initialize
* the new cache for this task here.
*/
- curr->vmacache_seqnum = mm->vmacache_seqnum;
+ curr->vmacache.seqnum = mm->vmacache_seqnum;
vmacache_flush(curr);
return false;
}
@@ -93,7 +93,7 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
return NULL;
for (i = 0; i < VMACACHE_SIZE; i++) {
- struct vm_area_struct *vma = current->vmacache[i];
+ struct vm_area_struct *vma = current->vmacache.vmas[i];
if (!vma)
continue;
@@ -121,7 +121,7 @@ struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
return NULL;
for (i = 0; i < VMACACHE_SIZE; i++) {
- struct vm_area_struct *vma = current->vmacache[i];
+ struct vm_area_struct *vma = current->vmacache.vmas[i];
if (vma && vma->vm_start == start && vma->vm_end == end) {
count_vm_vmacache_event(VMACACHE_FIND_HITS);