Support 1T segments on ppc

Traditionally, the "segments" used for the two-stage translation used on
powerpc MMUs were 256MB in size.  This was the only option on all hash
page table based 32-bit powerpc cpus, and on the earlier 64-bit hash page
table based cpus.  However, newer 64-bit cpus also permit 1TB segments

This patch adds support for 1TB segment translation to the qemu code.

Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
David Gibson 2011-04-01 15:15:18 +11:00 committed by Alexander Graf
parent 256cebe5d1
commit cdaee00633
2 changed files with 45 additions and 12 deletions

View File

@ -114,6 +114,7 @@ enum powerpc_mmu_t {
POWERPC_MMU_601 = 0x0000000A, POWERPC_MMU_601 = 0x0000000A,
#if defined(TARGET_PPC64) #if defined(TARGET_PPC64)
#define POWERPC_MMU_64 0x00010000 #define POWERPC_MMU_64 0x00010000
#define POWERPC_MMU_1TSEG 0x00020000
/* 64 bits PowerPC MMU */ /* 64 bits PowerPC MMU */
POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001, POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001,
/* 620 variant (no segment exceptions) */ /* 620 variant (no segment exceptions) */
@ -382,9 +383,11 @@ struct ppc_slb_t {
/* Bits in the SLB VSID word */ /* Bits in the SLB VSID word */
#define SLB_VSID_SHIFT 12 #define SLB_VSID_SHIFT 12
#define SLB_VSID_SHIFT_1T 24
#define SLB_VSID_SSIZE_SHIFT 62 #define SLB_VSID_SSIZE_SHIFT 62
#define SLB_VSID_B 0xc000000000000000ULL #define SLB_VSID_B 0xc000000000000000ULL
#define SLB_VSID_B_256M 0x0000000000000000ULL #define SLB_VSID_B_256M 0x0000000000000000ULL
#define SLB_VSID_B_1T 0x4000000000000000ULL
#define SLB_VSID_VSID 0x3FFFFFFFFFFFF000ULL #define SLB_VSID_VSID 0x3FFFFFFFFFFFF000ULL
#define SLB_VSID_PTEM (SLB_VSID_B | SLB_VSID_VSID) #define SLB_VSID_PTEM (SLB_VSID_B | SLB_VSID_VSID)
#define SLB_VSID_KS 0x0000000000000800ULL #define SLB_VSID_KS 0x0000000000000800ULL
@ -398,6 +401,10 @@ struct ppc_slb_t {
#define SEGMENT_SHIFT_256M 28 #define SEGMENT_SHIFT_256M 28
#define SEGMENT_MASK_256M (~((1ULL << SEGMENT_SHIFT_256M) - 1)) #define SEGMENT_MASK_256M (~((1ULL << SEGMENT_SHIFT_256M) - 1))
#define SEGMENT_SHIFT_1T 40
#define SEGMENT_MASK_1T (~((1ULL << SEGMENT_SHIFT_1T) - 1))
/*****************************************************************************/ /*****************************************************************************/
/* Machine state register bits definition */ /* Machine state register bits definition */
#define MSR_SF 63 /* Sixty-four-bit mode hflags */ #define MSR_SF 63 /* Sixty-four-bit mode hflags */

View File

@ -675,19 +675,26 @@ static inline int find_pte(CPUState *env, mmu_ctx_t *ctx, int h, int rw,
#if defined(TARGET_PPC64) #if defined(TARGET_PPC64)
static inline ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr) static inline ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr)
{ {
uint64_t esid; uint64_t esid_256M, esid_1T;
int n; int n;
LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
esid = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
for (n = 0; n < env->slb_nr; n++) { for (n = 0; n < env->slb_nr; n++) {
ppc_slb_t *slb = &env->slb[n]; ppc_slb_t *slb = &env->slb[n];
LOG_SLB("%s: slot %d %016" PRIx64 " %016" LOG_SLB("%s: slot %d %016" PRIx64 " %016"
PRIx64 "\n", __func__, n, slb->esid, slb->vsid); PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
if (slb->esid == esid) { /* We check for 1T matches on all MMUs here - if the MMU
* doesn't have 1T segment support, we will have prevented 1T
* entries from being inserted in the slbmte code. */
if (((slb->esid == esid_256M) &&
((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
|| ((slb->esid == esid_1T) &&
((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
return slb; return slb;
} }
} }
@ -740,14 +747,20 @@ void ppc_slb_invalidate_one (CPUPPCState *env, uint64_t T0)
int ppc_store_slb (CPUPPCState *env, target_ulong rb, target_ulong rs) int ppc_store_slb (CPUPPCState *env, target_ulong rb, target_ulong rs)
{ {
int slot = rb & 0xfff; int slot = rb & 0xfff;
uint64_t esid = rb & ~0xfff;
ppc_slb_t *slb = &env->slb[slot]; ppc_slb_t *slb = &env->slb[slot];
if (slot >= env->slb_nr) { if (rb & (0x1000 - env->slb_nr)) {
return -1; return -1; /* Reserved bits set or slot too high */
}
if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
return -1; /* Bad segment size */
}
if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
return -1; /* 1T segment on MMU that doesn't support it */
} }
slb->esid = esid; /* Mask out the slot number as we store the entry */
slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
slb->vsid = rs; slb->vsid = rs;
LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64 LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
@ -799,6 +812,7 @@ static inline int get_segment(CPUState *env, mmu_ctx_t *ctx,
if (env->mmu_model & POWERPC_MMU_64) { if (env->mmu_model & POWERPC_MMU_64) {
ppc_slb_t *slb; ppc_slb_t *slb;
target_ulong pageaddr; target_ulong pageaddr;
int segment_bits;
LOG_MMU("Check SLBs\n"); LOG_MMU("Check SLBs\n");
slb = slb_lookup(env, eaddr); slb = slb_lookup(env, eaddr);
@ -806,7 +820,14 @@ static inline int get_segment(CPUState *env, mmu_ctx_t *ctx,
return -5; return -5;
} }
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; if (slb->vsid & SLB_VSID_B) {
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
segment_bits = 40;
} else {
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
segment_bits = 28;
}
target_page_bits = (slb->vsid & SLB_VSID_L) target_page_bits = (slb->vsid & SLB_VSID_L)
? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS; ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS;
ctx->key = !!(pr ? (slb->vsid & SLB_VSID_KP) ctx->key = !!(pr ? (slb->vsid & SLB_VSID_KP)
@ -814,11 +835,16 @@ static inline int get_segment(CPUState *env, mmu_ctx_t *ctx,
ds = 0; ds = 0;
ctx->nx = !!(slb->vsid & SLB_VSID_N); ctx->nx = !!(slb->vsid & SLB_VSID_N);
pageaddr = eaddr & ((1ULL << 28) - (1ULL << target_page_bits)); pageaddr = eaddr & ((1ULL << segment_bits)
/* XXX: this is false for 1 TB segments */ - (1ULL << target_page_bits));
hash = vsid ^ (pageaddr >> target_page_bits); if (slb->vsid & SLB_VSID_B) {
hash = vsid ^ (vsid << 25) ^ (pageaddr >> target_page_bits);
} else {
hash = vsid ^ (pageaddr >> target_page_bits);
}
/* Only 5 bits of the page index are used in the AVPN */ /* Only 5 bits of the page index are used in the AVPN */
ctx->ptem = (slb->vsid & SLB_VSID_PTEM) | ((pageaddr >> 16) & 0x0F80); ctx->ptem = (slb->vsid & SLB_VSID_PTEM) |
((pageaddr >> 16) & ((1ULL << segment_bits) - 0x80));
} else } else
#endif /* defined(TARGET_PPC64) */ #endif /* defined(TARGET_PPC64) */
{ {