summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_32_sr.S
blob: 7e06a6fc8d07cdf34c72801b74db507607aaa40f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright SUSE Linux Products GmbH 2009
 *
 * Authors: Alexander Graf <agraf@suse.de>
 */

/******************************************************************************
 *                                                                            *
 *                               Entry code                                   *
 *                                                                            *
 *****************************************************************************/

.macro LOAD_GUEST_SEGMENTS

	/* Required state:
	 *
	 * MSR = ~IR|DR
	 * R1 = host R1
	 * R2 = host R2
	 * R3 = shadow vcpu
	 * all other volatile GPRS = free except R4, R6
	 * SVCPU[CR]  = guest CR
	 * SVCPU[XER] = guest XER
	 * SVCPU[CTR] = guest CTR
	 * SVCPU[LR]  = guest LR
	 */

#define XCHG_SR(n)	lwz	r9, (SVCPU_SR+(n*4))(r3);  \
			mtsr	n, r9

	XCHG_SR(0)
	XCHG_SR(1)
	XCHG_SR(2)
	XCHG_SR(3)
	XCHG_SR(4)
	XCHG_SR(5)
	XCHG_SR(6)
	XCHG_SR(7)
	XCHG_SR(8)
	XCHG_SR(9)
	XCHG_SR(10)
	XCHG_SR(11)
	XCHG_SR(12)
	XCHG_SR(13)
	XCHG_SR(14)
	XCHG_SR(15)

	/* Clear BATs. */

#define KVM_KILL_BAT(n, reg)		\
        mtspr   SPRN_IBAT##n##U,reg;	\
        mtspr   SPRN_IBAT##n##L,reg;	\
        mtspr   SPRN_DBAT##n##U,reg;	\
        mtspr   SPRN_DBAT##n##L,reg;	\

        li	r9, 0
	KVM_KILL_BAT(0, r9)
	KVM_KILL_BAT(1, r9)
	KVM_KILL_BAT(2, r9)
	KVM_KILL_BAT(3, r9)

.endm

/******************************************************************************
 *                                                                            *
 *                               Exit code                                    *
 *                                                                            *
 *****************************************************************************/

.macro LOAD_HOST_SEGMENTS

	/* Register usage at this point:
	 *
	 * R1         = host R1
	 * R2         = host R2
	 * R12        = exit handler id
	 * R13        = shadow vcpu - SHADOW_VCPU_OFF
	 * SVCPU.*    = guest *
	 * SVCPU[CR]  = guest CR
	 * SVCPU[XER] = guest XER
	 * SVCPU[CTR] = guest CTR
	 * SVCPU[LR]  = guest LR
	 *
	 */

	/* Restore BATs */

	/* We only overwrite the upper part, so we only restoree
	   the upper part. */
#define KVM_LOAD_BAT(n, reg, RA, RB)	\
	lwz	RA,(n*16)+0(reg);	\
	lwz	RB,(n*16)+4(reg);	\
	mtspr	SPRN_IBAT##n##U,RA;	\
	mtspr	SPRN_IBAT##n##L,RB;	\
	lwz	RA,(n*16)+8(reg);	\
	lwz	RB,(n*16)+12(reg);	\
	mtspr	SPRN_DBAT##n##U,RA;	\
	mtspr	SPRN_DBAT##n##L,RB;	\

	lis     r9, BATS@ha
	addi    r9, r9, BATS@l
	tophys(r9, r9)
	KVM_LOAD_BAT(0, r9, r10, r11)
	KVM_LOAD_BAT(1, r9, r10, r11)
	KVM_LOAD_BAT(2, r9, r10, r11)
	KVM_LOAD_BAT(3, r9, r10, r11)

	/* Restore Segment Registers */

	/* 0xc - 0xf */

        li      r0, 4
        mtctr   r0
	LOAD_REG_IMMEDIATE(r3, 0x20000000 | (0x111 * 0xc))
        lis     r4, 0xc000
3:      mtsrin  r3, r4
        addi    r3, r3, 0x111     /* increment VSID */
        addis   r4, r4, 0x1000    /* address of next segment */
        bdnz    3b

	/* 0x0 - 0xb */

	/* 'current->mm' needs to be in r4 */
	tophys(r4, r2)
	lwz	r4, MM(r4)
	tophys(r4, r4)
	/* This only clobbers r0, r3, r4 and r5 */
	bl	switch_mmu_context

.endm