mirror of https://github.com/xemu-project/xemu.git
tcg/cputlb: remove other-cpu capability from TLB flushing
Some TLB flush operations can flush other CPUs. The problem with this is they used non-synced variants of flushes (i.e., that return before the destination has completed the flush). Since all TLB flush users need the _synced variants, and that last user (ppc) of the non-synced flush was buggy, this is a footgun waiting to go off. There do not seem to be any callers that flush other CPUs, so remove the capability. Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
This commit is contained in:
parent
99cd12ced1
commit
30933c4fb4
|
@ -418,12 +418,9 @@ void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
|
|||
{
|
||||
tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
|
||||
|
||||
if (cpu->created && !qemu_cpu_is_self(cpu)) {
|
||||
async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
|
||||
RUN_ON_CPU_HOST_INT(idxmap));
|
||||
} else {
|
||||
tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
|
||||
}
|
||||
assert_cpu_is_self(cpu);
|
||||
|
||||
tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
|
||||
}
|
||||
|
||||
void tlb_flush(CPUState *cpu)
|
||||
|
@ -612,28 +609,12 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
|
|||
{
|
||||
tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
|
||||
|
||||
assert_cpu_is_self(cpu);
|
||||
|
||||
/* This should already be page aligned */
|
||||
addr &= TARGET_PAGE_MASK;
|
||||
|
||||
if (qemu_cpu_is_self(cpu)) {
|
||||
tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
|
||||
} else if (idxmap < TARGET_PAGE_SIZE) {
|
||||
/*
|
||||
* Most targets have only a few mmu_idx. In the case where
|
||||
* we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
|
||||
* allocating memory for this operation.
|
||||
*/
|
||||
async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
|
||||
RUN_ON_CPU_TARGET_PTR(addr | idxmap));
|
||||
} else {
|
||||
TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
|
||||
|
||||
/* Otherwise allocate a structure, freed by the worker. */
|
||||
d->addr = addr;
|
||||
d->idxmap = idxmap;
|
||||
async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
|
||||
RUN_ON_CPU_HOST_PTR(d));
|
||||
}
|
||||
tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
|
||||
}
|
||||
|
||||
void tlb_flush_page(CPUState *cpu, vaddr addr)
|
||||
|
@ -796,6 +777,8 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
|
|||
{
|
||||
TLBFlushRangeData d;
|
||||
|
||||
assert_cpu_is_self(cpu);
|
||||
|
||||
/*
|
||||
* If all bits are significant, and len is small,
|
||||
* this devolves to tlb_flush_page.
|
||||
|
@ -816,14 +799,7 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
|
|||
d.idxmap = idxmap;
|
||||
d.bits = bits;
|
||||
|
||||
if (qemu_cpu_is_self(cpu)) {
|
||||
tlb_flush_range_by_mmuidx_async_0(cpu, d);
|
||||
} else {
|
||||
/* Otherwise allocate a structure, freed by the worker. */
|
||||
TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
|
||||
async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
|
||||
RUN_ON_CPU_HOST_PTR(p));
|
||||
}
|
||||
tlb_flush_range_by_mmuidx_async_0(cpu, d);
|
||||
}
|
||||
|
||||
void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
|
||||
|
|
Loading…
Reference in New Issue