mirror of https://github.com/xemu-project/xemu.git
Remove another limit to NB_MMU_MODES.
Fix compilation using uclibc. Fix defaulting of -accel parameters. Tidy cputlb basic routines. Adjust git.orderfile for decodetree. -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAl4ntwIdHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV97BAgAjIaYqJZIXOeQldgw fBZT0u8awQD7ml8wubbYoA+8H5wmZYppVsBibkuHj2IWKKjAwZF5erdp9OaxuEMK siFyqVsdZvi3+gSEYoB+iEpe4u1XLFVDw/dWP7nYYMY7/Safq4u1oZhvJzAvuL14 A6tArGWG8whYsB6LkXdWyngEoFqCBvWpGj5TClVXsdI2Ekf4frJe/uYxIsC13RvE RlpGo4pUimC+ZnX4jP1J0zJH3oDodUOn2mNquxeQavtt1V1A4/1t7Vww5v3KumrS RjSuKhUsPLGi+y2enH3QEsnpj0yPmYi80DypG8aXlT619DZwEpFzPrKAq9/lutKO 8dfjjg== =KOTc -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20200121' into staging Remove another limit to NB_MMU_MODES. Fix compilation using uclibc. Fix defaulting of -accel parameters. Tidy cputlb basic routines. Adjust git.orderfile for decodetree. # gpg: Signature made Wed 22 Jan 2020 02:44:18 GMT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-tcg-20200121: scripts/git.orderfile: Display decodetree before C source cputlb: Hoist timestamp outside of loops over tlbs cputlb: Initialize tlbs as flushed cputlb: Partially merge tlb_dyn_init into tlb_init cputlb: Split out tlb_mmu_flush_locked cputlb: Hoist tlb portions in tlb_flush_one_mmuidx_locked cputlb: Hoist tlb portions in tlb_mmu_resize_locked cputlb: Pass CPUTLBDescFast to tlb_n_entries and sizeof_tlb cputlb: Make tlb_n_entries private to cputlb.c cputlb: Merge tlb_table_flush_by_mmuidx into tlb_flush_one_mmuidx_locked vl: Only choose enabled accelerators in configure_accelerators vl: Remove useless test in configure_accelerators vl: Reduce scope of variables in configure_accelerators vl: Remove unused variable in configure_accelerators util/cacheinfo: fix crash when compiling with uClibc cputlb: Handle NB_MMU_MODES > TARGET_PAGE_BITS_MIN Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
be9612e8cb
|
@ -80,9 +80,14 @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
|
||||||
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
|
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
|
||||||
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
|
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
|
||||||
|
|
||||||
static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
|
static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
|
||||||
{
|
{
|
||||||
return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
|
return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
|
||||||
|
{
|
||||||
|
return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
|
static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
|
||||||
|
@ -92,26 +97,10 @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
|
||||||
desc->window_max_entries = max_entries;
|
desc->window_max_entries = max_entries;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tlb_dyn_init(CPUArchState *env)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < NB_MMU_MODES; i++) {
|
|
||||||
CPUTLBDesc *desc = &env_tlb(env)->d[i];
|
|
||||||
size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
|
|
||||||
|
|
||||||
tlb_window_reset(desc, get_clock_realtime(), 0);
|
|
||||||
desc->n_used_entries = 0;
|
|
||||||
env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
|
|
||||||
env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
|
|
||||||
env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
|
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
|
||||||
* @env: CPU that owns the TLB
|
* @desc: The CPUTLBDesc portion of the TLB
|
||||||
* @mmu_idx: MMU index of the TLB
|
* @fast: The CPUTLBDescFast portion of the same TLB
|
||||||
*
|
*
|
||||||
* Called with tlb_lock_held.
|
* Called with tlb_lock_held.
|
||||||
*
|
*
|
||||||
|
@ -148,13 +137,12 @@ static void tlb_dyn_init(CPUArchState *env)
|
||||||
* high), since otherwise we are likely to have a significant amount of
|
* high), since otherwise we are likely to have a significant amount of
|
||||||
* conflict misses.
|
* conflict misses.
|
||||||
*/
|
*/
|
||||||
static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
|
static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
|
||||||
|
int64_t now)
|
||||||
{
|
{
|
||||||
CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
|
size_t old_size = tlb_n_entries(fast);
|
||||||
size_t old_size = tlb_n_entries(env, mmu_idx);
|
|
||||||
size_t rate;
|
size_t rate;
|
||||||
size_t new_size = old_size;
|
size_t new_size = old_size;
|
||||||
int64_t now = get_clock_realtime();
|
|
||||||
int64_t window_len_ms = 100;
|
int64_t window_len_ms = 100;
|
||||||
int64_t window_len_ns = window_len_ms * 1000 * 1000;
|
int64_t window_len_ns = window_len_ms * 1000 * 1000;
|
||||||
bool window_expired = now > desc->window_begin_ns + window_len_ns;
|
bool window_expired = now > desc->window_begin_ns + window_len_ns;
|
||||||
|
@ -193,14 +181,15 @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
g_free(env_tlb(env)->f[mmu_idx].table);
|
g_free(fast->table);
|
||||||
g_free(env_tlb(env)->d[mmu_idx].iotlb);
|
g_free(desc->iotlb);
|
||||||
|
|
||||||
tlb_window_reset(desc, now, 0);
|
tlb_window_reset(desc, now, 0);
|
||||||
/* desc->n_used_entries is cleared by the caller */
|
/* desc->n_used_entries is cleared by the caller */
|
||||||
env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
|
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
|
||||||
env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
|
fast->table = g_try_new(CPUTLBEntry, new_size);
|
||||||
env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
|
desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the allocations fail, try smaller sizes. We just freed some
|
* If the allocations fail, try smaller sizes. We just freed some
|
||||||
* memory, so going back to half of new_size has a good chance of working.
|
* memory, so going back to half of new_size has a good chance of working.
|
||||||
|
@ -208,27 +197,51 @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
|
||||||
* allocations to fail though, so we progressively reduce the allocation
|
* allocations to fail though, so we progressively reduce the allocation
|
||||||
* size, aborting if we cannot even allocate the smallest TLB we support.
|
* size, aborting if we cannot even allocate the smallest TLB we support.
|
||||||
*/
|
*/
|
||||||
while (env_tlb(env)->f[mmu_idx].table == NULL ||
|
while (fast->table == NULL || desc->iotlb == NULL) {
|
||||||
env_tlb(env)->d[mmu_idx].iotlb == NULL) {
|
|
||||||
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
|
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
|
||||||
error_report("%s: %s", __func__, strerror(errno));
|
error_report("%s: %s", __func__, strerror(errno));
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
|
new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
|
||||||
env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
|
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
|
||||||
|
|
||||||
g_free(env_tlb(env)->f[mmu_idx].table);
|
g_free(fast->table);
|
||||||
g_free(env_tlb(env)->d[mmu_idx].iotlb);
|
g_free(desc->iotlb);
|
||||||
env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
|
fast->table = g_try_new(CPUTLBEntry, new_size);
|
||||||
env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
|
desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
|
static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
|
||||||
{
|
{
|
||||||
tlb_mmu_resize_locked(env, mmu_idx);
|
desc->n_used_entries = 0;
|
||||||
memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
|
desc->large_page_addr = -1;
|
||||||
env_tlb(env)->d[mmu_idx].n_used_entries = 0;
|
desc->large_page_mask = -1;
|
||||||
|
desc->vindex = 0;
|
||||||
|
memset(fast->table, -1, sizeof_tlb(fast));
|
||||||
|
memset(desc->vtable, -1, sizeof(desc->vtable));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
|
||||||
|
int64_t now)
|
||||||
|
{
|
||||||
|
CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
|
||||||
|
CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
|
||||||
|
|
||||||
|
tlb_mmu_resize_locked(desc, fast, now);
|
||||||
|
tlb_mmu_flush_locked(desc, fast);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
|
||||||
|
{
|
||||||
|
size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
|
||||||
|
|
||||||
|
tlb_window_reset(desc, now, 0);
|
||||||
|
desc->n_used_entries = 0;
|
||||||
|
fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
|
||||||
|
fast->table = g_new(CPUTLBEntry, n_entries);
|
||||||
|
desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
|
||||||
|
tlb_mmu_flush_locked(desc, fast);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
|
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
|
||||||
|
@ -244,13 +257,17 @@ static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
|
||||||
void tlb_init(CPUState *cpu)
|
void tlb_init(CPUState *cpu)
|
||||||
{
|
{
|
||||||
CPUArchState *env = cpu->env_ptr;
|
CPUArchState *env = cpu->env_ptr;
|
||||||
|
int64_t now = get_clock_realtime();
|
||||||
|
int i;
|
||||||
|
|
||||||
qemu_spin_init(&env_tlb(env)->c.lock);
|
qemu_spin_init(&env_tlb(env)->c.lock);
|
||||||
|
|
||||||
/* Ensure that cpu_reset performs a full flush. */
|
/* All tlbs are initialized flushed. */
|
||||||
env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
|
env_tlb(env)->c.dirty = 0;
|
||||||
|
|
||||||
tlb_dyn_init(env);
|
for (i = 0; i < NB_MMU_MODES; i++) {
|
||||||
|
tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* flush_all_helper: run fn across all cpus
|
/* flush_all_helper: run fn across all cpus
|
||||||
|
@ -289,21 +306,12 @@ void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
|
||||||
*pelide = elide;
|
*pelide = elide;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
|
|
||||||
{
|
|
||||||
tlb_table_flush_by_mmuidx(env, mmu_idx);
|
|
||||||
env_tlb(env)->d[mmu_idx].large_page_addr = -1;
|
|
||||||
env_tlb(env)->d[mmu_idx].large_page_mask = -1;
|
|
||||||
env_tlb(env)->d[mmu_idx].vindex = 0;
|
|
||||||
memset(env_tlb(env)->d[mmu_idx].vtable, -1,
|
|
||||||
sizeof(env_tlb(env)->d[0].vtable));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
|
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
|
||||||
{
|
{
|
||||||
CPUArchState *env = cpu->env_ptr;
|
CPUArchState *env = cpu->env_ptr;
|
||||||
uint16_t asked = data.host_int;
|
uint16_t asked = data.host_int;
|
||||||
uint16_t all_dirty, work, to_clean;
|
uint16_t all_dirty, work, to_clean;
|
||||||
|
int64_t now = get_clock_realtime();
|
||||||
|
|
||||||
assert_cpu_is_self(cpu);
|
assert_cpu_is_self(cpu);
|
||||||
|
|
||||||
|
@ -318,7 +326,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
|
||||||
|
|
||||||
for (work = to_clean; work != 0; work &= work - 1) {
|
for (work = to_clean; work != 0; work &= work - 1) {
|
||||||
int mmu_idx = ctz32(work);
|
int mmu_idx = ctz32(work);
|
||||||
tlb_flush_one_mmuidx_locked(env, mmu_idx);
|
tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_spin_unlock(&env_tlb(env)->c.lock);
|
qemu_spin_unlock(&env_tlb(env)->c.lock);
|
||||||
|
@ -440,7 +448,7 @@ static void tlb_flush_page_locked(CPUArchState *env, int midx,
|
||||||
tlb_debug("forcing full flush midx %d ("
|
tlb_debug("forcing full flush midx %d ("
|
||||||
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
|
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
|
||||||
midx, lp_addr, lp_mask);
|
midx, lp_addr, lp_mask);
|
||||||
tlb_flush_one_mmuidx_locked(env, midx);
|
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
|
||||||
} else {
|
} else {
|
||||||
if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
|
if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
|
||||||
tlb_n_used_entries_dec(env, midx);
|
tlb_n_used_entries_dec(env, midx);
|
||||||
|
@ -449,28 +457,29 @@ static void tlb_flush_page_locked(CPUArchState *env, int midx,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* As we are going to hijack the bottom bits of the page address for a
|
/**
|
||||||
* mmuidx bit mask we need to fail to build if we can't do that
|
* tlb_flush_page_by_mmuidx_async_0:
|
||||||
|
* @cpu: cpu on which to flush
|
||||||
|
* @addr: page of virtual address to flush
|
||||||
|
* @idxmap: set of mmu_idx to flush
|
||||||
|
*
|
||||||
|
* Helper for tlb_flush_page_by_mmuidx and friends, flush one page
|
||||||
|
* at @addr from the tlbs indicated by @idxmap from @cpu.
|
||||||
*/
|
*/
|
||||||
QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
|
static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
|
||||||
|
target_ulong addr,
|
||||||
static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
|
uint16_t idxmap)
|
||||||
run_on_cpu_data data)
|
|
||||||
{
|
{
|
||||||
CPUArchState *env = cpu->env_ptr;
|
CPUArchState *env = cpu->env_ptr;
|
||||||
target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
|
|
||||||
target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
|
|
||||||
unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
|
|
||||||
int mmu_idx;
|
int mmu_idx;
|
||||||
|
|
||||||
assert_cpu_is_self(cpu);
|
assert_cpu_is_self(cpu);
|
||||||
|
|
||||||
tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
|
tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
|
||||||
addr, mmu_idx_bitmap);
|
|
||||||
|
|
||||||
qemu_spin_lock(&env_tlb(env)->c.lock);
|
qemu_spin_lock(&env_tlb(env)->c.lock);
|
||||||
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
|
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
|
||||||
if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
|
if ((idxmap >> mmu_idx) & 1) {
|
||||||
tlb_flush_page_locked(env, mmu_idx, addr);
|
tlb_flush_page_locked(env, mmu_idx, addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -479,22 +488,75 @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
|
||||||
tb_flush_jmp_cache(cpu, addr);
|
tb_flush_jmp_cache(cpu, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* tlb_flush_page_by_mmuidx_async_1:
|
||||||
|
* @cpu: cpu on which to flush
|
||||||
|
* @data: encoded addr + idxmap
|
||||||
|
*
|
||||||
|
* Helper for tlb_flush_page_by_mmuidx and friends, called through
|
||||||
|
* async_run_on_cpu. The idxmap parameter is encoded in the page
|
||||||
|
* offset of the target_ptr field. This limits the set of mmu_idx
|
||||||
|
* that can be passed via this method.
|
||||||
|
*/
|
||||||
|
static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
|
||||||
|
run_on_cpu_data data)
|
||||||
|
{
|
||||||
|
target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
|
||||||
|
target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
|
||||||
|
uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
|
||||||
|
|
||||||
|
tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
target_ulong addr;
|
||||||
|
uint16_t idxmap;
|
||||||
|
} TLBFlushPageByMMUIdxData;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* tlb_flush_page_by_mmuidx_async_2:
|
||||||
|
* @cpu: cpu on which to flush
|
||||||
|
* @data: allocated addr + idxmap
|
||||||
|
*
|
||||||
|
* Helper for tlb_flush_page_by_mmuidx and friends, called through
|
||||||
|
* async_run_on_cpu. The addr+idxmap parameters are stored in a
|
||||||
|
* TLBFlushPageByMMUIdxData structure that has been allocated
|
||||||
|
* specifically for this helper. Free the structure when done.
|
||||||
|
*/
|
||||||
|
static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
|
||||||
|
run_on_cpu_data data)
|
||||||
|
{
|
||||||
|
TLBFlushPageByMMUIdxData *d = data.host_ptr;
|
||||||
|
|
||||||
|
tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
|
||||||
|
g_free(d);
|
||||||
|
}
|
||||||
|
|
||||||
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
|
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
|
||||||
{
|
{
|
||||||
target_ulong addr_and_mmu_idx;
|
|
||||||
|
|
||||||
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
|
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
|
||||||
|
|
||||||
/* This should already be page aligned */
|
/* This should already be page aligned */
|
||||||
addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
|
addr &= TARGET_PAGE_MASK;
|
||||||
addr_and_mmu_idx |= idxmap;
|
|
||||||
|
|
||||||
if (!qemu_cpu_is_self(cpu)) {
|
if (qemu_cpu_is_self(cpu)) {
|
||||||
async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
|
tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
|
||||||
RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
|
} else if (idxmap < TARGET_PAGE_SIZE) {
|
||||||
|
/*
|
||||||
|
* Most targets have only a few mmu_idx. In the case where
|
||||||
|
* we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
|
||||||
|
* allocating memory for this operation.
|
||||||
|
*/
|
||||||
|
async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
|
||||||
|
RUN_ON_CPU_TARGET_PTR(addr | idxmap));
|
||||||
} else {
|
} else {
|
||||||
tlb_flush_page_by_mmuidx_async_work(
|
TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
|
||||||
cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
|
|
||||||
|
/* Otherwise allocate a structure, freed by the worker. */
|
||||||
|
d->addr = addr;
|
||||||
|
d->idxmap = idxmap;
|
||||||
|
async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
|
||||||
|
RUN_ON_CPU_HOST_PTR(d));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -506,17 +568,36 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
|
||||||
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
|
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
|
||||||
uint16_t idxmap)
|
uint16_t idxmap)
|
||||||
{
|
{
|
||||||
const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
|
|
||||||
target_ulong addr_and_mmu_idx;
|
|
||||||
|
|
||||||
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
|
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
|
||||||
|
|
||||||
/* This should already be page aligned */
|
/* This should already be page aligned */
|
||||||
addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
|
addr &= TARGET_PAGE_MASK;
|
||||||
addr_and_mmu_idx |= idxmap;
|
|
||||||
|
|
||||||
flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
|
/*
|
||||||
fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
|
* Allocate memory to hold addr+idxmap only when needed.
|
||||||
|
* See tlb_flush_page_by_mmuidx for details.
|
||||||
|
*/
|
||||||
|
if (idxmap < TARGET_PAGE_SIZE) {
|
||||||
|
flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
|
||||||
|
RUN_ON_CPU_TARGET_PTR(addr | idxmap));
|
||||||
|
} else {
|
||||||
|
CPUState *dst_cpu;
|
||||||
|
|
||||||
|
/* Allocate a separate data block for each destination cpu. */
|
||||||
|
CPU_FOREACH(dst_cpu) {
|
||||||
|
if (dst_cpu != src_cpu) {
|
||||||
|
TLBFlushPageByMMUIdxData *d
|
||||||
|
= g_new(TLBFlushPageByMMUIdxData, 1);
|
||||||
|
|
||||||
|
d->addr = addr;
|
||||||
|
d->idxmap = idxmap;
|
||||||
|
async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
|
||||||
|
RUN_ON_CPU_HOST_PTR(d));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
|
||||||
}
|
}
|
||||||
|
|
||||||
void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
|
void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
|
||||||
|
@ -528,17 +609,41 @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
|
||||||
target_ulong addr,
|
target_ulong addr,
|
||||||
uint16_t idxmap)
|
uint16_t idxmap)
|
||||||
{
|
{
|
||||||
const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
|
|
||||||
target_ulong addr_and_mmu_idx;
|
|
||||||
|
|
||||||
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
|
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
|
||||||
|
|
||||||
/* This should already be page aligned */
|
/* This should already be page aligned */
|
||||||
addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
|
addr &= TARGET_PAGE_MASK;
|
||||||
addr_and_mmu_idx |= idxmap;
|
|
||||||
|
|
||||||
flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
|
/*
|
||||||
async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
|
* Allocate memory to hold addr+idxmap only when needed.
|
||||||
|
* See tlb_flush_page_by_mmuidx for details.
|
||||||
|
*/
|
||||||
|
if (idxmap < TARGET_PAGE_SIZE) {
|
||||||
|
flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
|
||||||
|
RUN_ON_CPU_TARGET_PTR(addr | idxmap));
|
||||||
|
async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
|
||||||
|
RUN_ON_CPU_TARGET_PTR(addr | idxmap));
|
||||||
|
} else {
|
||||||
|
CPUState *dst_cpu;
|
||||||
|
TLBFlushPageByMMUIdxData *d;
|
||||||
|
|
||||||
|
/* Allocate a separate data block for each destination cpu. */
|
||||||
|
CPU_FOREACH(dst_cpu) {
|
||||||
|
if (dst_cpu != src_cpu) {
|
||||||
|
d = g_new(TLBFlushPageByMMUIdxData, 1);
|
||||||
|
d->addr = addr;
|
||||||
|
d->idxmap = idxmap;
|
||||||
|
async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
|
||||||
|
RUN_ON_CPU_HOST_PTR(d));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
d = g_new(TLBFlushPageByMMUIdxData, 1);
|
||||||
|
d->addr = addr;
|
||||||
|
d->idxmap = idxmap;
|
||||||
|
async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
|
||||||
|
RUN_ON_CPU_HOST_PTR(d));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
|
void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
|
||||||
|
@ -622,7 +727,7 @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
|
||||||
qemu_spin_lock(&env_tlb(env)->c.lock);
|
qemu_spin_lock(&env_tlb(env)->c.lock);
|
||||||
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
|
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
unsigned int n = tlb_n_entries(env, mmu_idx);
|
unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
|
||||||
|
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
|
tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
|
||||||
|
|
|
@ -234,11 +234,6 @@ static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
|
||||||
return (addr >> TARGET_PAGE_BITS) & size_mask;
|
return (addr >> TARGET_PAGE_BITS) & size_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
|
|
||||||
{
|
|
||||||
return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
|
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
|
||||||
static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
|
static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
|
||||||
target_ulong addr)
|
target_ulong addr)
|
||||||
|
|
|
@ -25,5 +25,8 @@ qga/*.json
|
||||||
# headers
|
# headers
|
||||||
*.h
|
*.h
|
||||||
|
|
||||||
|
# decoding tree specification
|
||||||
|
*.decode
|
||||||
|
|
||||||
# code
|
# code
|
||||||
*.c
|
*.c
|
||||||
|
|
|
@ -93,10 +93,16 @@ static void sys_cache_info(int *isize, int *dsize)
|
||||||
static void sys_cache_info(int *isize, int *dsize)
|
static void sys_cache_info(int *isize, int *dsize)
|
||||||
{
|
{
|
||||||
# ifdef _SC_LEVEL1_ICACHE_LINESIZE
|
# ifdef _SC_LEVEL1_ICACHE_LINESIZE
|
||||||
*isize = sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
|
int tmp_isize = (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
|
||||||
|
if (tmp_isize > 0) {
|
||||||
|
*isize = tmp_isize;
|
||||||
|
}
|
||||||
# endif
|
# endif
|
||||||
# ifdef _SC_LEVEL1_DCACHE_LINESIZE
|
# ifdef _SC_LEVEL1_DCACHE_LINESIZE
|
||||||
*dsize = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
|
int tmp_dsize = (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
|
||||||
|
if (tmp_dsize > 0) {
|
||||||
|
*dsize = tmp_dsize;
|
||||||
|
}
|
||||||
# endif
|
# endif
|
||||||
}
|
}
|
||||||
#endif /* sys_cache_info */
|
#endif /* sys_cache_info */
|
||||||
|
|
27
vl.c
27
vl.c
|
@ -2755,8 +2755,6 @@ static int do_configure_accelerator(void *opaque, QemuOpts *opts, Error **errp)
|
||||||
static void configure_accelerators(const char *progname)
|
static void configure_accelerators(const char *progname)
|
||||||
{
|
{
|
||||||
const char *accel;
|
const char *accel;
|
||||||
char **accel_list, **tmp;
|
|
||||||
bool accel_initialised = false;
|
|
||||||
bool init_failed = false;
|
bool init_failed = false;
|
||||||
|
|
||||||
qemu_opts_foreach(qemu_find_opts("icount"),
|
qemu_opts_foreach(qemu_find_opts("icount"),
|
||||||
|
@ -2764,26 +2762,33 @@ static void configure_accelerators(const char *progname)
|
||||||
|
|
||||||
accel = qemu_opt_get(qemu_get_machine_opts(), "accel");
|
accel = qemu_opt_get(qemu_get_machine_opts(), "accel");
|
||||||
if (QTAILQ_EMPTY(&qemu_accel_opts.head)) {
|
if (QTAILQ_EMPTY(&qemu_accel_opts.head)) {
|
||||||
|
char **accel_list, **tmp;
|
||||||
|
|
||||||
if (accel == NULL) {
|
if (accel == NULL) {
|
||||||
/* Select the default accelerator */
|
/* Select the default accelerator */
|
||||||
if (!accel_find("tcg") && !accel_find("kvm")) {
|
bool have_tcg = accel_find("tcg");
|
||||||
error_report("No accelerator selected and"
|
bool have_kvm = accel_find("kvm");
|
||||||
" no default accelerator available");
|
|
||||||
exit(1);
|
if (have_tcg && have_kvm) {
|
||||||
} else {
|
if (g_str_has_suffix(progname, "kvm")) {
|
||||||
int pnlen = strlen(progname);
|
|
||||||
if (pnlen >= 3 && g_str_equal(&progname[pnlen - 3], "kvm")) {
|
|
||||||
/* If the program name ends with "kvm", we prefer KVM */
|
/* If the program name ends with "kvm", we prefer KVM */
|
||||||
accel = "kvm:tcg";
|
accel = "kvm:tcg";
|
||||||
} else {
|
} else {
|
||||||
accel = "tcg:kvm";
|
accel = "tcg:kvm";
|
||||||
}
|
}
|
||||||
|
} else if (have_kvm) {
|
||||||
|
accel = "kvm";
|
||||||
|
} else if (have_tcg) {
|
||||||
|
accel = "tcg";
|
||||||
|
} else {
|
||||||
|
error_report("No accelerator selected and"
|
||||||
|
" no default accelerator available");
|
||||||
|
exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
accel_list = g_strsplit(accel, ":", 0);
|
accel_list = g_strsplit(accel, ":", 0);
|
||||||
|
|
||||||
for (tmp = accel_list; !accel_initialised && tmp && *tmp; tmp++) {
|
for (tmp = accel_list; *tmp; tmp++) {
|
||||||
/*
|
/*
|
||||||
* Filter invalid accelerators here, to prevent obscenities
|
* Filter invalid accelerators here, to prevent obscenities
|
||||||
* such as "-machine accel=tcg,,thread=single".
|
* such as "-machine accel=tcg,,thread=single".
|
||||||
|
|
Loading…
Reference in New Issue