mirror of https://github.com/xemu-project/xemu.git
cputlb: Hoist tlb portions in tlb_mmu_resize_locked
No functional change, but the smaller expressions make the code easier to read. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
722a1c1e97
commit
71ccd47ba5
|
@ -115,8 +115,8 @@ static void tlb_dyn_init(CPUArchState *env)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
|
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
|
||||||
* @env: CPU that owns the TLB
|
* @desc: The CPUTLBDesc portion of the TLB
|
||||||
* @mmu_idx: MMU index of the TLB
|
* @fast: The CPUTLBDescFast portion of the same TLB
|
||||||
*
|
*
|
||||||
* Called with tlb_lock_held.
|
* Called with tlb_lock_held.
|
||||||
*
|
*
|
||||||
|
@ -153,10 +153,9 @@ static void tlb_dyn_init(CPUArchState *env)
|
||||||
* high), since otherwise we are likely to have a significant amount of
|
* high), since otherwise we are likely to have a significant amount of
|
||||||
* conflict misses.
|
* conflict misses.
|
||||||
*/
|
*/
|
||||||
static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
|
static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
|
||||||
{
|
{
|
||||||
CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
|
size_t old_size = tlb_n_entries(fast);
|
||||||
size_t old_size = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
|
|
||||||
size_t rate;
|
size_t rate;
|
||||||
size_t new_size = old_size;
|
size_t new_size = old_size;
|
||||||
int64_t now = get_clock_realtime();
|
int64_t now = get_clock_realtime();
|
||||||
|
@ -198,14 +197,15 @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
g_free(env_tlb(env)->f[mmu_idx].table);
|
g_free(fast->table);
|
||||||
g_free(env_tlb(env)->d[mmu_idx].iotlb);
|
g_free(desc->iotlb);
|
||||||
|
|
||||||
tlb_window_reset(desc, now, 0);
|
tlb_window_reset(desc, now, 0);
|
||||||
/* desc->n_used_entries is cleared by the caller */
|
/* desc->n_used_entries is cleared by the caller */
|
||||||
env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
|
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
|
||||||
env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
|
fast->table = g_try_new(CPUTLBEntry, new_size);
|
||||||
env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
|
desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the allocations fail, try smaller sizes. We just freed some
|
* If the allocations fail, try smaller sizes. We just freed some
|
||||||
* memory, so going back to half of new_size has a good chance of working.
|
* memory, so going back to half of new_size has a good chance of working.
|
||||||
|
@ -213,25 +213,24 @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
|
||||||
* allocations to fail though, so we progressively reduce the allocation
|
* allocations to fail though, so we progressively reduce the allocation
|
||||||
* size, aborting if we cannot even allocate the smallest TLB we support.
|
* size, aborting if we cannot even allocate the smallest TLB we support.
|
||||||
*/
|
*/
|
||||||
while (env_tlb(env)->f[mmu_idx].table == NULL ||
|
while (fast->table == NULL || desc->iotlb == NULL) {
|
||||||
env_tlb(env)->d[mmu_idx].iotlb == NULL) {
|
|
||||||
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
|
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
|
||||||
error_report("%s: %s", __func__, strerror(errno));
|
error_report("%s: %s", __func__, strerror(errno));
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
|
new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
|
||||||
env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
|
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
|
||||||
|
|
||||||
g_free(env_tlb(env)->f[mmu_idx].table);
|
g_free(fast->table);
|
||||||
g_free(env_tlb(env)->d[mmu_idx].iotlb);
|
g_free(desc->iotlb);
|
||||||
env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
|
fast->table = g_try_new(CPUTLBEntry, new_size);
|
||||||
env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
|
desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
|
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
|
||||||
{
|
{
|
||||||
tlb_mmu_resize_locked(env, mmu_idx);
|
tlb_mmu_resize_locked(&env_tlb(env)->d[mmu_idx], &env_tlb(env)->f[mmu_idx]);
|
||||||
env_tlb(env)->d[mmu_idx].n_used_entries = 0;
|
env_tlb(env)->d[mmu_idx].n_used_entries = 0;
|
||||||
env_tlb(env)->d[mmu_idx].large_page_addr = -1;
|
env_tlb(env)->d[mmu_idx].large_page_addr = -1;
|
||||||
env_tlb(env)->d[mmu_idx].large_page_mask = -1;
|
env_tlb(env)->d[mmu_idx].large_page_mask = -1;
|
||||||
|
|
Loading…
Reference in New Issue