1
0
Files
kernel-49/arch/powerpc/kvm/e500_emulate.c
Greg Kroah-Hartman 7515a820d6 Merge 4.9.169 into android-4.9
Changes in 4.9.169
	x86/power: Fix some ordering bugs in __restore_processor_context()
	x86/power/64: Use struct desc_ptr for the IDT in struct saved_context
	x86/power/32: Move SYSENTER MSR restoration to fix_processor_context()
	x86/power: Make restore_processor_context() sane
	powerpc/tm: Limit TM code inside PPC_TRANSACTIONAL_MEM
	kbuild: clang: choose GCC_TOOLCHAIN_DIR not on LD
	x86: vdso: Use $LD instead of $CC to link
	x86/vdso: Drop implicit common-page-size linker flag
	lib/string.c: implement a basic bcmp
	powerpc: Fix invalid use of register expressions
	powerpc/64s: Add barrier_nospec
	powerpc/64s: Add support for ori barrier_nospec patching
	powerpc: Avoid code patching freed init sections
	powerpc/64s: Patch barrier_nospec in modules
	powerpc/64s: Enable barrier_nospec based on firmware settings
	powerpc: Use barrier_nospec in copy_from_user()
	powerpc/64: Use barrier_nospec in syscall entry
	powerpc/64s: Enhance the information in cpu_show_spectre_v1()
	powerpc64s: Show ori31 availability in spectre_v1 sysfs file not v2
	powerpc/64: Disable the speculation barrier from the command line
	powerpc/64: Make stf barrier PPC_BOOK3S_64 specific.
	powerpc/64: Add CONFIG_PPC_BARRIER_NOSPEC
	powerpc/64: Call setup_barrier_nospec() from setup_arch()
	powerpc/64: Make meltdown reporting Book3S 64 specific
	powerpc/fsl: Add barrier_nospec implementation for NXP PowerPC Book3E
	powerpc/fsl: Sanitize the syscall table for NXP PowerPC 32 bit platforms
	powerpc/asm: Add a patch_site macro & helpers for patching instructions
	powerpc/64s: Add new security feature flags for count cache flush
	powerpc/64s: Add support for software count cache flush
	powerpc/pseries: Query hypervisor for count cache flush settings
	powerpc/powernv: Query firmware for count cache flush settings
	powerpc/fsl: Add infrastructure to fixup branch predictor flush
	powerpc/fsl: Add macro to flush the branch predictor
	powerpc/fsl: Fix spectre_v2 mitigations reporting
	powerpc/fsl: Emulate SPRN_BUCSR register
	powerpc/fsl: Add nospectre_v2 command line argument
	powerpc/fsl: Flush the branch predictor at each kernel entry (64bit)
	powerpc/fsl: Flush the branch predictor at each kernel entry (32 bit)
	powerpc/fsl: Flush branch predictor when entering KVM
	powerpc/fsl: Enable runtime patching if nospectre_v2 boot arg is used
	powerpc/fsl: Update Spectre v2 reporting
	powerpc/fsl: Fixed warning: orphan section `__btb_flush_fixup'
	powerpc/fsl: Fix the flush of branch predictor.
	powerpc/security: Fix spectre_v2 reporting
	arm64: kaslr: Reserve size of ARM64_MEMSTART_ALIGN in linear region
	tty: mark Siemens R3964 line discipline as BROKEN
	tty: ldisc: add sysctl to prevent autoloading of ldiscs
	ipv6: Fix dangling pointer when ipv6 fragment
	ipv6: sit: reset ip header pointer in ipip6_rcv
	kcm: switch order of device registration to fix a crash
	net: rds: force to destroy connection if t_sock is NULL in rds_tcp_kill_sock().
	openvswitch: fix flow actions reallocation
	qmi_wwan: add Olicard 600
	sctp: initialize _pad of sockaddr_in before copying to user memory
	tcp: Ensure DCTCP reacts to losses
	vrf: check accept_source_route on the original netdevice
	bnxt_en: Reset device on RX buffer errors.
	bnxt_en: Improve RX consumer index validity check.
	net/mlx5e: Add a lock on tir list
	netns: provide pure entropy for net_hash_mix()
	net: ethtool: not call vzalloc for zero sized memory request
	ip6_tunnel: Match to ARPHRD_TUNNEL6 for dev type
	ALSA: seq: Fix OOB-reads from strlcpy
	parisc: Detect QEMU earlier in boot process
	include/linux/bitrev.h: fix constant bitrev
	ASoC: fsl_esai: fix channel swap issue when stream starts
	Btrfs: do not allow trimming when a fs is mounted with the nologreplay option
	block: do not leak memory in bio_copy_user_iov()
	genirq: Respect IRQCHIP_SKIP_SET_WAKE in irq_chip_set_wake_parent()
	virtio: Honour 'may_reduce_num' in vring_create_virtqueue
	ARM: dts: at91: Fix typo in ISC_D0 on PC9
	arm64: futex: Fix FUTEX_WAKE_OP atomic ops with non-zero result value
	parisc: Use cr16 interval timers unconditionally on qemu
	xen: Prevent buffer overflow in privcmd ioctl
	sched/fair: Do not re-read ->h_load_next during hierarchical load calculation
	xtensa: fix return_address
	PCI: Add function 1 DMA alias quirk for Marvell 9170 SATA controller
	Linux 4.9.169

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2019-04-18 02:14:10 +03:00

457 lines
9.8 KiB
C

/*
* Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, <yu.liu@freescale.com>
*
* Description:
* This file is derived from arch/powerpc/kvm/44x_emulate.c,
* by Hollis Blanchard <hollisb@us.ibm.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*/
#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
#include <asm/dbell.h>
#include <asm/reg_booke.h>
#include "booke.h"
#include "e500.h"
#define XOP_DCBTLS 166
#define XOP_MSGSND 206
#define XOP_MSGCLR 238
#define XOP_MFTMR 366
#define XOP_TLBIVAX 786
#define XOP_TLBSX 914
#define XOP_TLBRE 946
#define XOP_TLBWE 978
#define XOP_TLBILX 18
#define XOP_EHPRIV 270
#ifdef CONFIG_KVM_E500MC
static int dbell2prio(ulong param)
{
int msg = param & PPC_DBELL_TYPE_MASK;
int prio = -1;
switch (msg) {
case PPC_DBELL_TYPE(PPC_DBELL):
prio = BOOKE_IRQPRIO_DBELL;
break;
case PPC_DBELL_TYPE(PPC_DBELL_CRIT):
prio = BOOKE_IRQPRIO_DBELL_CRIT;
break;
default:
break;
}
return prio;
}
static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
{
ulong param = vcpu->arch.gpr[rb];
int prio = dbell2prio(param);
if (prio < 0)
return EMULATE_FAIL;
clear_bit(prio, &vcpu->arch.pending_exceptions);
return EMULATE_DONE;
}
static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
{
ulong param = vcpu->arch.gpr[rb];
int prio = dbell2prio(rb);
int pir = param & PPC_DBELL_PIR_MASK;
int i;
struct kvm_vcpu *cvcpu;
if (prio < 0)
return EMULATE_FAIL;
kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
int cpir = cvcpu->arch.shared->pir;
if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) {
set_bit(prio, &cvcpu->arch.pending_exceptions);
kvm_vcpu_kick(cvcpu);
}
}
return EMULATE_DONE;
}
#endif
static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
switch (get_oc(inst)) {
case EHPRIV_OC_DEBUG:
run->exit_reason = KVM_EXIT_DEBUG;
run->debug.arch.address = vcpu->arch.pc;
run->debug.arch.status = 0;
kvmppc_account_exit(vcpu, DEBUG_EXITS);
emulated = EMULATE_EXIT_USER;
*advance = 0;
break;
default:
emulated = EMULATE_FAIL;
}
return emulated;
}
static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
/* Always fail to lock the cache */
vcpu_e500->l1csr0 |= L1CSR0_CUL;
return EMULATE_DONE;
}
static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
int rt)
{
/* Expose one thread per vcpu */
if (get_tmrn(inst) == TMRN_TMCFG0) {
kvmppc_set_gpr(vcpu, rt,
1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT));
return EMULATE_DONE;
}
return EMULATE_FAIL;
}
int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int ra = get_ra(inst);
int rb = get_rb(inst);
int rt = get_rt(inst);
gva_t ea;
switch (get_op(inst)) {
case 31:
switch (get_xop(inst)) {
case XOP_DCBTLS:
emulated = kvmppc_e500_emul_dcbtls(vcpu);
break;
#ifdef CONFIG_KVM_E500MC
case XOP_MSGSND:
emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
break;
case XOP_MSGCLR:
emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
break;
#endif
case XOP_TLBRE:
emulated = kvmppc_e500_emul_tlbre(vcpu);
break;
case XOP_TLBWE:
emulated = kvmppc_e500_emul_tlbwe(vcpu);
break;
case XOP_TLBSX:
ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
emulated = kvmppc_e500_emul_tlbsx(vcpu, ea);
break;
case XOP_TLBILX: {
int type = rt & 0x3;
ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea);
break;
}
case XOP_TLBIVAX:
ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
break;
case XOP_MFTMR:
emulated = kvmppc_e500_emul_mftmr(vcpu, inst, rt);
break;
case XOP_EHPRIV:
emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
advance);
break;
default:
emulated = EMULATE_FAIL;
}
break;
default:
emulated = EMULATE_FAIL;
}
if (emulated == EMULATE_FAIL)
emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
return emulated;
}
int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
switch (sprn) {
#ifndef CONFIG_KVM_BOOKE_HV
case SPRN_PID:
kvmppc_set_pid(vcpu, spr_val);
break;
case SPRN_PID1:
if (spr_val != 0)
return EMULATE_FAIL;
vcpu_e500->pid[1] = spr_val;
break;
case SPRN_PID2:
if (spr_val != 0)
return EMULATE_FAIL;
vcpu_e500->pid[2] = spr_val;
break;
case SPRN_MAS0:
vcpu->arch.shared->mas0 = spr_val;
break;
case SPRN_MAS1:
vcpu->arch.shared->mas1 = spr_val;
break;
case SPRN_MAS2:
vcpu->arch.shared->mas2 = spr_val;
break;
case SPRN_MAS3:
vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
vcpu->arch.shared->mas7_3 |= spr_val;
break;
case SPRN_MAS4:
vcpu->arch.shared->mas4 = spr_val;
break;
case SPRN_MAS6:
vcpu->arch.shared->mas6 = spr_val;
break;
case SPRN_MAS7:
vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
break;
#endif
case SPRN_L1CSR0:
vcpu_e500->l1csr0 = spr_val;
vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
break;
case SPRN_L1CSR1:
vcpu_e500->l1csr1 = spr_val;
vcpu_e500->l1csr1 &= ~(L1CSR1_ICFI | L1CSR1_ICLFR);
break;
case SPRN_HID0:
vcpu_e500->hid0 = spr_val;
break;
case SPRN_HID1:
vcpu_e500->hid1 = spr_val;
break;
case SPRN_MMUCSR0:
emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
spr_val);
break;
case SPRN_PWRMGTCR0:
/*
* Guest relies on host power management configurations
* Treat the request as a general store
*/
vcpu->arch.pwrmgtcr0 = spr_val;
break;
case SPRN_BUCSR:
/*
* If we are here, it means that we have already flushed the
* branch predictor, so just return to guest.
*/
break;
/* extra exceptions */
#ifdef CONFIG_SPE_POSSIBLE
case SPRN_IVOR32:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
break;
case SPRN_IVOR33:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
break;
case SPRN_IVOR34:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
break;
#endif
#ifdef CONFIG_ALTIVEC
case SPRN_IVOR32:
vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val;
break;
case SPRN_IVOR33:
vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val;
break;
#endif
case SPRN_IVOR35:
vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
break;
#ifdef CONFIG_KVM_BOOKE_HV
case SPRN_IVOR36:
vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val;
break;
case SPRN_IVOR37:
vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val;
break;
#endif
default:
emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
}
return emulated;
}
int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
switch (sprn) {
#ifndef CONFIG_KVM_BOOKE_HV
case SPRN_PID:
*spr_val = vcpu_e500->pid[0];
break;
case SPRN_PID1:
*spr_val = vcpu_e500->pid[1];
break;
case SPRN_PID2:
*spr_val = vcpu_e500->pid[2];
break;
case SPRN_MAS0:
*spr_val = vcpu->arch.shared->mas0;
break;
case SPRN_MAS1:
*spr_val = vcpu->arch.shared->mas1;
break;
case SPRN_MAS2:
*spr_val = vcpu->arch.shared->mas2;
break;
case SPRN_MAS3:
*spr_val = (u32)vcpu->arch.shared->mas7_3;
break;
case SPRN_MAS4:
*spr_val = vcpu->arch.shared->mas4;
break;
case SPRN_MAS6:
*spr_val = vcpu->arch.shared->mas6;
break;
case SPRN_MAS7:
*spr_val = vcpu->arch.shared->mas7_3 >> 32;
break;
#endif
case SPRN_DECAR:
*spr_val = vcpu->arch.decar;
break;
case SPRN_TLB0CFG:
*spr_val = vcpu->arch.tlbcfg[0];
break;
case SPRN_TLB1CFG:
*spr_val = vcpu->arch.tlbcfg[1];
break;
case SPRN_TLB0PS:
if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
return EMULATE_FAIL;
*spr_val = vcpu->arch.tlbps[0];
break;
case SPRN_TLB1PS:
if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
return EMULATE_FAIL;
*spr_val = vcpu->arch.tlbps[1];
break;
case SPRN_L1CSR0:
*spr_val = vcpu_e500->l1csr0;
break;
case SPRN_L1CSR1:
*spr_val = vcpu_e500->l1csr1;
break;
case SPRN_HID0:
*spr_val = vcpu_e500->hid0;
break;
case SPRN_HID1:
*spr_val = vcpu_e500->hid1;
break;
case SPRN_SVR:
*spr_val = vcpu_e500->svr;
break;
case SPRN_MMUCSR0:
*spr_val = 0;
break;
case SPRN_MMUCFG:
*spr_val = vcpu->arch.mmucfg;
break;
case SPRN_EPTCFG:
if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
return EMULATE_FAIL;
/*
* Legacy Linux guests access EPTCFG register even if the E.PT
* category is disabled in the VM. Give them a chance to live.
*/
*spr_val = vcpu->arch.eptcfg;
break;
case SPRN_PWRMGTCR0:
*spr_val = vcpu->arch.pwrmgtcr0;
break;
/* extra exceptions */
#ifdef CONFIG_SPE_POSSIBLE
case SPRN_IVOR32:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
break;
case SPRN_IVOR33:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
break;
case SPRN_IVOR34:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
break;
#endif
#ifdef CONFIG_ALTIVEC
case SPRN_IVOR32:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL];
break;
case SPRN_IVOR33:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST];
break;
#endif
case SPRN_IVOR35:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
break;
#ifdef CONFIG_KVM_BOOKE_HV
case SPRN_IVOR36:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
break;
case SPRN_IVOR37:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
break;
#endif
default:
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
}
return emulated;
}