Skip to content

Commit 9a32a7e

Browse files
npigginmpe
authored andcommitted
powerpc/64s: flush L1D after user accesses
IBM Power9 processors can speculatively operate on data in the L1 cache before it has been completely validated, via a way-prediction mechanism. It is not possible for an attacker to determine the contents of impermissible memory using this method, since these systems implement a combination of hardware and software security measures to prevent scenarios where protected data could be leaked. However these measures don't address the scenario where an attacker induces the operating system to speculatively execute instructions using data that the attacker controls. This can be used for example to speculatively bypass "kernel user access prevention" techniques, as discovered by Anthony Steinhauser of Google's Safeside Project. This is not an attack by itself, but there is a possibility it could be used in conjunction with side-channels or other weaknesses in the privileged code to construct an attack. This issue can be mitigated by flushing the L1 cache between privilege boundaries of concern. This patch flushes the L1 cache after user accesses. This is part of the fix for CVE-2020-4788. Signed-off-by: Nicholas Piggin <[email protected]> Signed-off-by: Daniel Axtens <[email protected]> Signed-off-by: Michael Ellerman <[email protected]>
1 parent f796437 commit 9a32a7e

File tree

13 files changed

+233
-90
lines changed

13 files changed

+233
-90
lines changed

Documentation/admin-guide/kernel-parameters.txt

+4
Original file line numberDiff line numberDiff line change
@@ -2859,6 +2859,7 @@
28592859
tsx_async_abort=off [X86]
28602860
kvm.nx_huge_pages=off [X86]
28612861
no_entry_flush [PPC]
2862+
no_uaccess_flush [PPC]
28622863

28632864
Exceptions:
28642865
This does not have any effect on
@@ -3238,6 +3239,9 @@
32383239
nospec_store_bypass_disable
32393240
[HW] Disable all mitigations for the Speculative Store Bypass vulnerability
32403241

3242+
no_uaccess_flush
3243+
[PPC] Don't flush the L1-D cache after accessing user data.
3244+
32413245
noxsave [BUGS=X86] Disables x86 extended register state save
32423246
and restore using xsave. The kernel will fallback to
32433247
enabling legacy floating-point and sse state.

arch/powerpc/include/asm/book3s/64/kup-radix.h

+42-24
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,8 @@
6161

6262
#else /* !__ASSEMBLY__ */
6363

64+
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
65+
6466
#ifdef CONFIG_PPC_KUAP
6567

6668
#include <asm/mmu.h>
@@ -103,8 +105,16 @@ static inline void kuap_check_amr(void)
103105

104106
static inline unsigned long get_kuap(void)
105107
{
108+
/*
109+
* We return AMR_KUAP_BLOCKED when we don't support KUAP because
110+
* prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
111+
* cause restore_user_access to do a flush.
112+
*
113+
* This has no effect in terms of actually blocking things on hash,
114+
* so it doesn't break anything.
115+
*/
106116
if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
107-
return 0;
117+
return AMR_KUAP_BLOCKED;
108118

109119
return mfspr(SPRN_AMR);
110120
}
@@ -123,6 +133,31 @@ static inline void set_kuap(unsigned long value)
123133
isync();
124134
}
125135

136+
static inline bool
137+
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
138+
{
139+
return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
140+
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
141+
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
142+
}
143+
#else /* CONFIG_PPC_KUAP */
144+
static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { }
145+
146+
static inline unsigned long kuap_get_and_check_amr(void)
147+
{
148+
return 0UL;
149+
}
150+
151+
static inline void kuap_check_amr(void) { }
152+
153+
static inline unsigned long get_kuap(void)
154+
{
155+
return AMR_KUAP_BLOCKED;
156+
}
157+
158+
static inline void set_kuap(unsigned long value) { }
159+
#endif /* !CONFIG_PPC_KUAP */
160+
126161
static __always_inline void allow_user_access(void __user *to, const void __user *from,
127162
unsigned long size, unsigned long dir)
128163
{
@@ -142,44 +177,27 @@ static inline void prevent_user_access(void __user *to, const void __user *from,
142177
unsigned long size, unsigned long dir)
143178
{
144179
set_kuap(AMR_KUAP_BLOCKED);
180+
if (static_branch_unlikely(&uaccess_flush_key))
181+
do_uaccess_flush();
145182
}
146183

147184
static inline unsigned long prevent_user_access_return(void)
148185
{
149186
unsigned long flags = get_kuap();
150187

151188
set_kuap(AMR_KUAP_BLOCKED);
189+
if (static_branch_unlikely(&uaccess_flush_key))
190+
do_uaccess_flush();
152191

153192
return flags;
154193
}
155194

156195
static inline void restore_user_access(unsigned long flags)
157196
{
158197
set_kuap(flags);
198+
if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
199+
do_uaccess_flush();
159200
}
160-
161-
static inline bool
162-
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
163-
{
164-
return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
165-
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
166-
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
167-
}
168-
#else /* CONFIG_PPC_KUAP */
169-
static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
170-
{
171-
}
172-
173-
static inline void kuap_check_amr(void)
174-
{
175-
}
176-
177-
static inline unsigned long kuap_get_and_check_amr(void)
178-
{
179-
return 0;
180-
}
181-
#endif /* CONFIG_PPC_KUAP */
182-
183201
#endif /* __ASSEMBLY__ */
184202

185203
#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */

arch/powerpc/include/asm/exception-64s.h

+3
Original file line numberDiff line numberDiff line change
@@ -144,6 +144,9 @@
144144
RFSCV; \
145145
b rfscv_flush_fallback
146146

147+
#else /* __ASSEMBLY__ */
148+
/* Prototype for function defined in exceptions-64s.S */
149+
void do_uaccess_flush(void);
147150
#endif /* __ASSEMBLY__ */
148151

149152
#endif /* _ASM_POWERPC_EXCEPTION_H */

arch/powerpc/include/asm/feature-fixups.h

+9
Original file line numberDiff line numberDiff line change
@@ -205,6 +205,14 @@ label##3: \
205205
FTR_ENTRY_OFFSET 955b-956b; \
206206
.popsection;
207207

208+
#define UACCESS_FLUSH_FIXUP_SECTION \
209+
959: \
210+
.pushsection __uaccess_flush_fixup,"a"; \
211+
.align 2; \
212+
960: \
213+
FTR_ENTRY_OFFSET 959b-960b; \
214+
.popsection;
215+
208216
#define ENTRY_FLUSH_FIXUP_SECTION \
209217
957: \
210218
.pushsection __entry_flush_fixup,"a"; \
@@ -248,6 +256,7 @@ extern long stf_barrier_fallback;
248256
extern long entry_flush_fallback;
249257
extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
250258
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
259+
extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
251260
extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
252261
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
253262
extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;

arch/powerpc/include/asm/kup.h

+14-5
Original file line numberDiff line numberDiff line change
@@ -53,17 +53,26 @@ static inline void setup_kuep(bool disabled) { }
5353
void setup_kuap(bool disabled);
5454
#else
5555
static inline void setup_kuap(bool disabled) { }
56+
57+
static inline bool
58+
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
59+
{
60+
return false;
61+
}
62+
63+
/*
64+
* book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
65+
* the L1D cache after user accesses. Only include the empty stubs for other
66+
* platforms.
67+
*/
68+
#ifndef CONFIG_PPC64
5669
static inline void allow_user_access(void __user *to, const void __user *from,
5770
unsigned long size, unsigned long dir) { }
5871
static inline void prevent_user_access(void __user *to, const void __user *from,
5972
unsigned long size, unsigned long dir) { }
6073
static inline unsigned long prevent_user_access_return(void) { return 0UL; }
6174
static inline void restore_user_access(unsigned long flags) { }
62-
static inline bool
63-
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
64-
{
65-
return false;
66-
}
75+
#endif /* CONFIG_PPC64 */
6776
#endif /* CONFIG_PPC_KUAP */
6877

6978
static inline void allow_read_from_user(const void __user *from, unsigned long size)

arch/powerpc/include/asm/security_features.h

+3
Original file line numberDiff line numberDiff line change
@@ -89,13 +89,16 @@ static inline bool security_ftr_enabled(u64 feature)
8989
// The L1-D cache should be flushed when entering the kernel
9090
#define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull
9191

92+
// The L1-D cache should be flushed after user accesses from the kernel
93+
#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull
9294

9395
// Features enabled by default
9496
#define SEC_FTR_DEFAULT \
9597
(SEC_FTR_L1D_FLUSH_HV | \
9698
SEC_FTR_L1D_FLUSH_PR | \
9799
SEC_FTR_BNDS_CHK_SPEC_BAR | \
98100
SEC_FTR_L1D_FLUSH_ENTRY | \
101+
SEC_FTR_L1D_FLUSH_UACCESS | \
99102
SEC_FTR_FAVOUR_SECURITY)
100103

101104
#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */

arch/powerpc/include/asm/setup.h

+1
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ void setup_barrier_nospec(void);
6060
#else
6161
static inline void setup_barrier_nospec(void) { };
6262
#endif
63+
void do_uaccess_flush_fixups(enum l1d_flush_type types);
6364
void do_entry_flush_fixups(enum l1d_flush_type types);
6465
void do_barrier_nospec_fixups(bool enable);
6566
extern bool barrier_nospec_enabled;

arch/powerpc/kernel/exceptions-64s.S

+26-59
Original file line numberDiff line numberDiff line change
@@ -2951,11 +2951,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
29512951
.endr
29522952
blr
29532953

2954-
TRAMP_REAL_BEGIN(entry_flush_fallback)
2955-
std r9,PACA_EXRFI+EX_R9(r13)
2956-
std r10,PACA_EXRFI+EX_R10(r13)
2957-
std r11,PACA_EXRFI+EX_R11(r13)
2958-
mfctr r9
2954+
/* Clobbers r10, r11, ctr */
2955+
.macro L1D_DISPLACEMENT_FLUSH
29592956
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
29602957
ld r11,PACA_L1D_FLUSH_SIZE(r13)
29612958
srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
@@ -2981,7 +2978,14 @@ TRAMP_REAL_BEGIN(entry_flush_fallback)
29812978
ld r11,(0x80 + 8)*7(r10)
29822979
addi r10,r10,0x80*8
29832980
bdnz 1b
2981+
.endm
29842982

2983+
TRAMP_REAL_BEGIN(entry_flush_fallback)
2984+
std r9,PACA_EXRFI+EX_R9(r13)
2985+
std r10,PACA_EXRFI+EX_R10(r13)
2986+
std r11,PACA_EXRFI+EX_R11(r13)
2987+
mfctr r9
2988+
L1D_DISPLACEMENT_FLUSH
29852989
mtctr r9
29862990
ld r9,PACA_EXRFI+EX_R9(r13)
29872991
ld r10,PACA_EXRFI+EX_R10(r13)
@@ -2997,32 +3001,7 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
29973001
std r10,PACA_EXRFI+EX_R10(r13)
29983002
std r11,PACA_EXRFI+EX_R11(r13)
29993003
mfctr r9
3000-
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
3001-
ld r11,PACA_L1D_FLUSH_SIZE(r13)
3002-
srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
3003-
mtctr r11
3004-
DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
3005-
3006-
/* order ld/st prior to dcbt stop all streams with flushing */
3007-
sync
3008-
3009-
/*
3010-
* The load adresses are at staggered offsets within cachelines,
3011-
* which suits some pipelines better (on others it should not
3012-
* hurt).
3013-
*/
3014-
1:
3015-
ld r11,(0x80 + 8)*0(r10)
3016-
ld r11,(0x80 + 8)*1(r10)
3017-
ld r11,(0x80 + 8)*2(r10)
3018-
ld r11,(0x80 + 8)*3(r10)
3019-
ld r11,(0x80 + 8)*4(r10)
3020-
ld r11,(0x80 + 8)*5(r10)
3021-
ld r11,(0x80 + 8)*6(r10)
3022-
ld r11,(0x80 + 8)*7(r10)
3023-
addi r10,r10,0x80*8
3024-
bdnz 1b
3025-
3004+
L1D_DISPLACEMENT_FLUSH
30263005
mtctr r9
30273006
ld r9,PACA_EXRFI+EX_R9(r13)
30283007
ld r10,PACA_EXRFI+EX_R10(r13)
@@ -3040,32 +3019,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
30403019
std r10,PACA_EXRFI+EX_R10(r13)
30413020
std r11,PACA_EXRFI+EX_R11(r13)
30423021
mfctr r9
3043-
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
3044-
ld r11,PACA_L1D_FLUSH_SIZE(r13)
3045-
srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
3046-
mtctr r11
3047-
DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
3048-
3049-
/* order ld/st prior to dcbt stop all streams with flushing */
3050-
sync
3051-
3052-
/*
3053-
* The load adresses are at staggered offsets within cachelines,
3054-
* which suits some pipelines better (on others it should not
3055-
* hurt).
3056-
*/
3057-
1:
3058-
ld r11,(0x80 + 8)*0(r10)
3059-
ld r11,(0x80 + 8)*1(r10)
3060-
ld r11,(0x80 + 8)*2(r10)
3061-
ld r11,(0x80 + 8)*3(r10)
3062-
ld r11,(0x80 + 8)*4(r10)
3063-
ld r11,(0x80 + 8)*5(r10)
3064-
ld r11,(0x80 + 8)*6(r10)
3065-
ld r11,(0x80 + 8)*7(r10)
3066-
addi r10,r10,0x80*8
3067-
bdnz 1b
3068-
3022+
L1D_DISPLACEMENT_FLUSH
30693023
mtctr r9
30703024
ld r9,PACA_EXRFI+EX_R9(r13)
30713025
ld r10,PACA_EXRFI+EX_R10(r13)
@@ -3116,8 +3070,21 @@ TRAMP_REAL_BEGIN(rfscv_flush_fallback)
31163070
RFSCV
31173071

31183072
USE_TEXT_SECTION()
3119-
MASKED_INTERRUPT
3120-
MASKED_INTERRUPT hsrr=1
3073+
3074+
_GLOBAL(do_uaccess_flush)
3075+
UACCESS_FLUSH_FIXUP_SECTION
3076+
nop
3077+
nop
3078+
nop
3079+
blr
3080+
L1D_DISPLACEMENT_FLUSH
3081+
blr
3082+
_ASM_NOKPROBE_SYMBOL(do_uaccess_flush)
3083+
EXPORT_SYMBOL(do_uaccess_flush)
3084+
3085+
3086+
MASKED_INTERRUPT
3087+
MASKED_INTERRUPT hsrr=1
31213088

31223089
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
31233090
kvmppc_skip_interrupt:

0 commit comments

Comments
 (0)