Changes in 4.9.310 arm64: errata: Provide macro for major and minor cpu revisions arm64: Remove useless UAO IPI and describe how this gets enabled arm64: Add MIDR encoding for Arm Cortex-A55 and Cortex-A35 arm64: capabilities: Update prototype for enable call back arm64: capabilities: Move errata work around check on boot CPU arm64: capabilities: Move errata processing code arm64: capabilities: Prepare for fine grained capabilities arm64: capabilities: Add flags to handle the conflicts on late CPU arm64: capabilities: Clean up midr range helpers arm64: Add helpers for checking CPU MIDR against a range arm64: capabilities: Add support for checks based on a list of MIDRs clocksource/drivers/arm_arch_timer: Remove fsl-a008585 parameter clocksource/drivers/arm_arch_timer: Introduce generic errata handling infrastructure arm64: arch_timer: Add infrastructure for multiple erratum detection methods arm64: arch_timer: Add erratum handler for CPU-specific capability arm64: arch_timer: Add workaround for ARM erratum 1188873 arm64: arch_timer: avoid unused function warning arm64: Add silicon-errata.txt entry for ARM erratum 1188873 arm64: Make ARM64_ERRATUM_1188873 depend on COMPAT arm64: Add part number for Neoverse N1 arm64: Add part number for Arm Cortex-A77 arm64: Add Neoverse-N2, Cortex-A710 CPU part definition arm64: Add Cortex-X2 CPU part definition arm64: Add helper to decode register from instruction arm64: entry.S: Add ventry overflow sanity checks arm64: entry: Make the trampoline cleanup optional arm64: entry: Free up another register on kpti's tramp_exit path arm64: entry: Move the trampoline data page before the text page arm64: entry: Allow tramp_alias to access symbols after the 4K boundary arm64: entry: Don't assume tramp_vectors is the start of the vectors arm64: entry: Move trampoline macros out of ifdef'd section arm64: entry: Make the kpti trampoline's kpti sequence optional arm64: entry: Allow the trampoline text to occupy multiple pages arm64: entry: Add non-kpti __bp_harden_el1_vectors for mitigations arm64: Move arm64_update_smccc_conduit() out of SSBD ifdef arm64: entry: Add vectors that have the bhb mitigation sequences arm64: entry: Add macro for reading symbol addresses from the trampoline arm64: Add percpu vectors for EL1 KVM: arm64: Add templates for BHB mitigation sequences arm64: Mitigate spectre style branch history side channels KVM: arm64: Allow SMCCC_ARCH_WORKAROUND_3 to be discovered and migrated arm64: add ID_AA64ISAR2_EL1 sys register arm64: Use the clearbhb instruction in mitigations Linux 4.9.310 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I689d7634aebe9d9ffba8d72d1d76bb237ca228a4
131 lines
2.9 KiB
ArmAsm
131 lines
2.9 KiB
ArmAsm
/*
|
|
* Contains CPU specific branch predictor invalidation sequences
|
|
*
|
|
* Copyright (C) 2018 ARM Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <linux/arm-smccc.h>
|
|
|
|
.macro ventry target
|
|
.rept 31
|
|
nop
|
|
.endr
|
|
b \target
|
|
.endm
|
|
|
|
.macro vectors target
|
|
ventry \target + 0x000
|
|
ventry \target + 0x080
|
|
ventry \target + 0x100
|
|
ventry \target + 0x180
|
|
|
|
ventry \target + 0x200
|
|
ventry \target + 0x280
|
|
ventry \target + 0x300
|
|
ventry \target + 0x380
|
|
|
|
ventry \target + 0x400
|
|
ventry \target + 0x480
|
|
ventry \target + 0x500
|
|
ventry \target + 0x580
|
|
|
|
ventry \target + 0x600
|
|
ventry \target + 0x680
|
|
ventry \target + 0x700
|
|
ventry \target + 0x780
|
|
.endm
|
|
|
|
.align 11
|
|
ENTRY(__bp_harden_hyp_vecs_start)
|
|
.rept 4
|
|
vectors __kvm_hyp_vector
|
|
.endr
|
|
ENTRY(__bp_harden_hyp_vecs_end)
|
|
|
|
.macro smccc_workaround_1 inst
|
|
sub sp, sp, #(8 * 4)
|
|
stp x2, x3, [sp, #(8 * 0)]
|
|
stp x0, x1, [sp, #(8 * 2)]
|
|
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
|
|
\inst #0
|
|
ldp x2, x3, [sp, #(8 * 0)]
|
|
ldp x0, x1, [sp, #(8 * 2)]
|
|
add sp, sp, #(8 * 4)
|
|
.endm
|
|
|
|
ENTRY(__smccc_workaround_1_smc_start)
|
|
smccc_workaround_1 smc
|
|
ENTRY(__smccc_workaround_1_smc_end)
|
|
|
|
ENTRY(__smccc_workaround_1_hvc_start)
|
|
smccc_workaround_1 hvc
|
|
ENTRY(__smccc_workaround_1_hvc_end)
|
|
|
|
ENTRY(__smccc_workaround_3_smc_start)
|
|
sub sp, sp, #(8 * 4)
|
|
stp x2, x3, [sp, #(8 * 0)]
|
|
stp x0, x1, [sp, #(8 * 2)]
|
|
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
|
|
smc #0
|
|
ldp x2, x3, [sp, #(8 * 0)]
|
|
ldp x0, x1, [sp, #(8 * 2)]
|
|
add sp, sp, #(8 * 4)
|
|
ENTRY(__smccc_workaround_3_smc_end)
|
|
|
|
ENTRY(__spectre_bhb_loop_k8_start)
|
|
sub sp, sp, #(8 * 2)
|
|
stp x0, x1, [sp, #(8 * 0)]
|
|
mov x0, #8
|
|
2: b . + 4
|
|
subs x0, x0, #1
|
|
b.ne 2b
|
|
dsb nsh
|
|
isb
|
|
ldp x0, x1, [sp, #(8 * 0)]
|
|
add sp, sp, #(8 * 2)
|
|
ENTRY(__spectre_bhb_loop_k8_end)
|
|
|
|
ENTRY(__spectre_bhb_loop_k24_start)
|
|
sub sp, sp, #(8 * 2)
|
|
stp x0, x1, [sp, #(8 * 0)]
|
|
mov x0, #24
|
|
2: b . + 4
|
|
subs x0, x0, #1
|
|
b.ne 2b
|
|
dsb nsh
|
|
isb
|
|
ldp x0, x1, [sp, #(8 * 0)]
|
|
add sp, sp, #(8 * 2)
|
|
ENTRY(__spectre_bhb_loop_k24_end)
|
|
|
|
ENTRY(__spectre_bhb_loop_k32_start)
|
|
sub sp, sp, #(8 * 2)
|
|
stp x0, x1, [sp, #(8 * 0)]
|
|
mov x0, #32
|
|
2: b . + 4
|
|
subs x0, x0, #1
|
|
b.ne 2b
|
|
dsb nsh
|
|
isb
|
|
ldp x0, x1, [sp, #(8 * 0)]
|
|
add sp, sp, #(8 * 2)
|
|
ENTRY(__spectre_bhb_loop_k32_end)
|
|
|
|
ENTRY(__spectre_bhb_clearbhb_start)
|
|
hint #22 /* aka clearbhb */
|
|
isb
|
|
ENTRY(__spectre_bhb_clearbhb_end)
|