summaryrefslogtreecommitdiffstats
path: root/arch/x86/power
diff options
context:
space:
mode:
authorShaohua Li <shaohua.li@intel.com>2010-03-05 08:59:32 +0800
committerH. Peter Anvin <hpa@zytor.com>2010-03-30 11:46:02 -0700
commit8ae06d223f8203c72104e5c0c4ee49a000aedb42 (patch)
tree42bf4c176833a42e21009d8bd0a61e653d4c3418 /arch/x86/power
parenteed63519e3e74d515d2007ecd895338d0ba2a85c (diff)
downloadlinux-8ae06d223f8203c72104e5c0c4ee49a000aedb42.tar.gz
linux-8ae06d223f8203c72104e5c0c4ee49a000aedb42.tar.xz
x86-32, resume: do a global tlb flush in S4 resume
Colin King reported a strange oops in S4 resume code path (see below). The test system has i5/i7 CPU. The kernel doesn't open PAE, so 4M page table is used. The oops always happen a virtual address 0xc03ff000, which is mapped to the last 4k of first 4M memory. Doing a global tlb flush fixes the issue. EIP: 0060:[<c0493a01>] EFLAGS: 00010086 CPU: 0 EIP is at copy_loop+0xe/0x15 EAX: 36aeb000 EBX: 00000000 ECX: 00000400 EDX: f55ad46c ESI: 0f800000 EDI: c03ff000 EBP: f67fbec4 ESP: f67fbea8 DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 ... ... CR2: 00000000c03ff000 Tested-by: Colin Ian King <colin.king@canonical.com> Signed-off-by: Shaohua Li <shaohua.li@intel.com> LKML-Reference: <20100305005932.GA22675@sli10-desk.sh.intel.com> Acked-by: Rafael J. Wysocki <rjw@sisk.pl> Signed-off-by: H. Peter Anvin <hpa@zytor.com> Cc: <stable@kernel.org>
Diffstat (limited to 'arch/x86/power')
-rw-r--r--arch/x86/power/hibernate_asm_32.S15
1 files changed, 7 insertions, 8 deletions
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index b641388d8286..ad47daeafa4e 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -27,10 +27,17 @@ ENTRY(swsusp_arch_suspend)
ret
ENTRY(restore_image)
+ movl mmu_cr4_features, %ecx
movl resume_pg_dir, %eax
subl $__PAGE_OFFSET, %eax
movl %eax, %cr3
+ jecxz 1f # cr4 Pentium and higher, skip if zero
+ andl $~(X86_CR4_PGE), %ecx
+ movl %ecx, %cr4; # turn off PGE
+ movl %cr3, %eax; # flush TLB
+ movl %eax, %cr3
+1:
movl restore_pblist, %edx
.p2align 4,,7
@@ -54,16 +61,8 @@ done:
movl $swapper_pg_dir, %eax
subl $__PAGE_OFFSET, %eax
movl %eax, %cr3
- /* Flush TLB, including "global" things (vmalloc) */
movl mmu_cr4_features, %ecx
jecxz 1f # cr4 Pentium and higher, skip if zero
- movl %ecx, %edx
- andl $~(X86_CR4_PGE), %edx
- movl %edx, %cr4; # turn off PGE
-1:
- movl %cr3, %eax; # flush TLB
- movl %eax, %cr3
- jecxz 1f # cr4 Pentium and higher, skip if zero
movl %ecx, %cr4; # turn PGE back on
1: