mirror of https://github.com/xemu-project/xemu.git
ppc patch queue 2018-02-16
Highlights of this batch: * Conversion to TranslatorOps (Emilio Cota) * Further bugfixes and cleanups to vcpu id allocation for pseries (Greg Kurz) * Another bugfix for HPT resizing (Daniel Henrique-Barboza) * Macintosh CUDA cleanups (Mark Cave-Ayland) * Further tweaks to Spectre/Meltdown mitigations (Suraj Singh) -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEdfRlhq5hpmzETofcbDjKyiDZs5IFAlqGq6IACgkQbDjKyiDZ s5IW5Q//a2217YE+XsCaL2wJkDVGFwg56HIoD7BAgsygbiplxy5QTXSk8GO/H85A ybi4TFnYTt2kc4fYspXPLUDAB39Juv/pDwvHL0TjJCyxnT56YwSuLN+V8U3c1uKr H1cwxlDHjB+NFx94JDf7Ze3iUvShr/NAzlS4+N/7xENc3RewU25gl8z7+W8UGMlb uHTgISxV2F/WkMzFlAyqQDtkurgmtvW/XRp6l804wGecPDg1GeF3EIcKTDrJ9WtS yleQ7hTRdc3ML+66O6pWGz6fVt6IGk7rS0iJTjqmeXqv1zglbFiW5pbX6p/4OyWo S3wsac0tAI2Vvymkh4TcfqtfmEYwC1+fCtEmBbf2QetCchcYrIDsnBEasvOFnBbL utDliSbEQlKKMcG5/8gnIZeXQCvDWaIWUxgM6pcPYG/OU3RP2O5/+QNfpHy2pgYs YnrNmuaiVG4qJeXYK2Y/BqBxrIjQVsJIIZumywpdY/tgmJ2A3zg2Zv83b3LBHmrE d4k+qZmkZTBhKUYyskMDreqMEfR82VCQHjXsvblP0YGJ0M1v1MZVKiQR7goj7mfe TIYqVxmuFwHb5dYe2wgLHKRtlZ3Z34+24Pe+lIAo+DCEAxtEHVYi/za9uNqtbf/i jHQ5WFmaUdgLLAdomFhxSD/hg/bhGTpiJDB/yk4MUrLM76aAb78= =2DYH -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-2.12-20180216' into staging ppc patch queue 2018-02-16 Highlights of this batch: * Conversion to TranslatorOps (Emilio Cota) * Further bugfixes and cleanups to vcpu id allocation for pseries (Greg Kurz) * Another bugfix for HPT resizing (Daniel Henrique-Barboza) * Macintosh CUDA cleanups (Mark Cave-Ayland) * Further tweaks to Spectre/Meltdown mitigations (Suraj Singh) # gpg: Signature made Fri 16 Feb 2018 10:00:02 GMT # gpg: using RSA key 6C38CACA20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" # gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-2.12-20180216: ppc4xx: Add device models found in PPC440 core SoCs ppc/spapr-caps: Disallow setting workaround for spapr-cap-ibs target/ppc: convert to TranslatorOps target/ppc: convert to DisasContextBase spapr: consolidate the VCPU id numbering logic in a single place spapr: rename spapr_vcpu_id() to spapr_get_vcpu_id() spapr: move VCPU calculation to core machine code spapr: use spapr->vsmt to compute VCPU ids ppc/spapr-caps: Change migration macro to take full spapr-cap name hw/char: remove legacy interface escc_init() hw/ppc/spapr_hcall: set htab_shift after kvmppc_resize_hpt_commit cuda: convert to trace-events ppc: move CUDAState and other CUDA-related definitions into separate cuda.h file cuda: convert to use the shared mos6522 device Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
d9c92ae335
|
@ -133,6 +133,7 @@ trace-events-subdirs += hw/net
|
|||
trace-events-subdirs += hw/virtio
|
||||
trace-events-subdirs += hw/audio
|
||||
trace-events-subdirs += hw/misc
|
||||
trace-events-subdirs += hw/misc/macio
|
||||
trace-events-subdirs += hw/usb
|
||||
trace-events-subdirs += hw/scsi
|
||||
trace-events-subdirs += hw/nvram
|
||||
|
|
209
hw/char/escc.c
209
hw/char/escc.c
|
@ -26,10 +26,7 @@
|
|||
#include "hw/hw.h"
|
||||
#include "hw/sysbus.h"
|
||||
#include "hw/char/escc.h"
|
||||
#include "chardev/char-fe.h"
|
||||
#include "chardev/char-serial.h"
|
||||
#include "ui/console.h"
|
||||
#include "ui/input.h"
|
||||
#include "trace.h"
|
||||
|
||||
/*
|
||||
|
@ -64,53 +61,7 @@
|
|||
* 2010-May-23 Artyom Tarasenko: Reworked IUS logic
|
||||
*/
|
||||
|
||||
typedef enum {
|
||||
chn_a, chn_b,
|
||||
} ChnID;
|
||||
|
||||
#define CHN_C(s) ((s)->chn == chn_b? 'b' : 'a')
|
||||
|
||||
typedef enum {
|
||||
ser, kbd, mouse,
|
||||
} ChnType;
|
||||
|
||||
#define SERIO_QUEUE_SIZE 256
|
||||
|
||||
typedef struct {
|
||||
uint8_t data[SERIO_QUEUE_SIZE];
|
||||
int rptr, wptr, count;
|
||||
} SERIOQueue;
|
||||
|
||||
#define SERIAL_REGS 16
|
||||
typedef struct ChannelState {
|
||||
qemu_irq irq;
|
||||
uint32_t rxint, txint, rxint_under_svc, txint_under_svc;
|
||||
struct ChannelState *otherchn;
|
||||
uint32_t reg;
|
||||
uint8_t wregs[SERIAL_REGS], rregs[SERIAL_REGS];
|
||||
SERIOQueue queue;
|
||||
CharBackend chr;
|
||||
int e0_mode, led_mode, caps_lock_mode, num_lock_mode;
|
||||
int disabled;
|
||||
int clock;
|
||||
uint32_t vmstate_dummy;
|
||||
ChnID chn; // this channel, A (base+4) or B (base+0)
|
||||
ChnType type;
|
||||
uint8_t rx, tx;
|
||||
QemuInputHandlerState *hs;
|
||||
} ChannelState;
|
||||
|
||||
#define ESCC(obj) OBJECT_CHECK(ESCCState, (obj), TYPE_ESCC)
|
||||
|
||||
typedef struct ESCCState {
|
||||
SysBusDevice parent_obj;
|
||||
|
||||
struct ChannelState chn[2];
|
||||
uint32_t it_shift;
|
||||
MemoryRegion mmio;
|
||||
uint32_t disabled;
|
||||
uint32_t frequency;
|
||||
} ESCCState;
|
||||
#define CHN_C(s) ((s)->chn == escc_chn_b ? 'b' : 'a')
|
||||
|
||||
#define SERIAL_CTRL 0
|
||||
#define SERIAL_DATA 1
|
||||
|
@ -214,44 +165,47 @@ typedef struct ESCCState {
|
|||
#define R_MISC1I 14
|
||||
#define R_EXTINT 15
|
||||
|
||||
static void handle_kbd_command(ChannelState *s, int val);
|
||||
static void handle_kbd_command(ESCCChannelState *s, int val);
|
||||
static int serial_can_receive(void *opaque);
|
||||
static void serial_receive_byte(ChannelState *s, int ch);
|
||||
static void serial_receive_byte(ESCCChannelState *s, int ch);
|
||||
|
||||
static void clear_queue(void *opaque)
|
||||
{
|
||||
ChannelState *s = opaque;
|
||||
SERIOQueue *q = &s->queue;
|
||||
ESCCChannelState *s = opaque;
|
||||
ESCCSERIOQueue *q = &s->queue;
|
||||
q->rptr = q->wptr = q->count = 0;
|
||||
}
|
||||
|
||||
static void put_queue(void *opaque, int b)
|
||||
{
|
||||
ChannelState *s = opaque;
|
||||
SERIOQueue *q = &s->queue;
|
||||
ESCCChannelState *s = opaque;
|
||||
ESCCSERIOQueue *q = &s->queue;
|
||||
|
||||
trace_escc_put_queue(CHN_C(s), b);
|
||||
if (q->count >= SERIO_QUEUE_SIZE)
|
||||
if (q->count >= ESCC_SERIO_QUEUE_SIZE) {
|
||||
return;
|
||||
}
|
||||
q->data[q->wptr] = b;
|
||||
if (++q->wptr == SERIO_QUEUE_SIZE)
|
||||
if (++q->wptr == ESCC_SERIO_QUEUE_SIZE) {
|
||||
q->wptr = 0;
|
||||
}
|
||||
q->count++;
|
||||
serial_receive_byte(s, 0);
|
||||
}
|
||||
|
||||
static uint32_t get_queue(void *opaque)
|
||||
{
|
||||
ChannelState *s = opaque;
|
||||
SERIOQueue *q = &s->queue;
|
||||
ESCCChannelState *s = opaque;
|
||||
ESCCSERIOQueue *q = &s->queue;
|
||||
int val;
|
||||
|
||||
if (q->count == 0) {
|
||||
return 0;
|
||||
} else {
|
||||
val = q->data[q->rptr];
|
||||
if (++q->rptr == SERIO_QUEUE_SIZE)
|
||||
if (++q->rptr == ESCC_SERIO_QUEUE_SIZE) {
|
||||
q->rptr = 0;
|
||||
}
|
||||
q->count--;
|
||||
}
|
||||
trace_escc_get_queue(CHN_C(s), val);
|
||||
|
@ -260,7 +214,7 @@ static uint32_t get_queue(void *opaque)
|
|||
return val;
|
||||
}
|
||||
|
||||
static int escc_update_irq_chn(ChannelState *s)
|
||||
static int escc_update_irq_chn(ESCCChannelState *s)
|
||||
{
|
||||
if ((((s->wregs[W_INTR] & INTR_TXINT) && (s->txint == 1)) ||
|
||||
// tx ints enabled, pending
|
||||
|
@ -274,7 +228,7 @@ static int escc_update_irq_chn(ChannelState *s)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void escc_update_irq(ChannelState *s)
|
||||
static void escc_update_irq(ESCCChannelState *s)
|
||||
{
|
||||
int irq;
|
||||
|
||||
|
@ -285,12 +239,12 @@ static void escc_update_irq(ChannelState *s)
|
|||
qemu_set_irq(s->irq, irq);
|
||||
}
|
||||
|
||||
static void escc_reset_chn(ChannelState *s)
|
||||
static void escc_reset_chn(ESCCChannelState *s)
|
||||
{
|
||||
int i;
|
||||
|
||||
s->reg = 0;
|
||||
for (i = 0; i < SERIAL_REGS; i++) {
|
||||
for (i = 0; i < ESCC_SERIAL_REGS; i++) {
|
||||
s->rregs[i] = 0;
|
||||
s->wregs[i] = 0;
|
||||
}
|
||||
|
@ -322,13 +276,13 @@ static void escc_reset(DeviceState *d)
|
|||
escc_reset_chn(&s->chn[1]);
|
||||
}
|
||||
|
||||
static inline void set_rxint(ChannelState *s)
|
||||
static inline void set_rxint(ESCCChannelState *s)
|
||||
{
|
||||
s->rxint = 1;
|
||||
/* XXX: missing daisy chainnig: chn_b rx should have a lower priority
|
||||
/* XXX: missing daisy chainnig: escc_chn_b rx should have a lower priority
|
||||
than chn_a rx/tx/special_condition service*/
|
||||
s->rxint_under_svc = 1;
|
||||
if (s->chn == chn_a) {
|
||||
if (s->chn == escc_chn_a) {
|
||||
s->rregs[R_INTR] |= INTR_RXINTA;
|
||||
if (s->wregs[W_MINTR] & MINTR_STATUSHI)
|
||||
s->otherchn->rregs[R_IVEC] = IVEC_HIRXINTA;
|
||||
|
@ -344,12 +298,12 @@ static inline void set_rxint(ChannelState *s)
|
|||
escc_update_irq(s);
|
||||
}
|
||||
|
||||
static inline void set_txint(ChannelState *s)
|
||||
static inline void set_txint(ESCCChannelState *s)
|
||||
{
|
||||
s->txint = 1;
|
||||
if (!s->rxint_under_svc) {
|
||||
s->txint_under_svc = 1;
|
||||
if (s->chn == chn_a) {
|
||||
if (s->chn == escc_chn_a) {
|
||||
if (s->wregs[W_INTR] & INTR_TXINT) {
|
||||
s->rregs[R_INTR] |= INTR_TXINTA;
|
||||
}
|
||||
|
@ -367,11 +321,11 @@ static inline void set_txint(ChannelState *s)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void clr_rxint(ChannelState *s)
|
||||
static inline void clr_rxint(ESCCChannelState *s)
|
||||
{
|
||||
s->rxint = 0;
|
||||
s->rxint_under_svc = 0;
|
||||
if (s->chn == chn_a) {
|
||||
if (s->chn == escc_chn_a) {
|
||||
if (s->wregs[W_MINTR] & MINTR_STATUSHI)
|
||||
s->otherchn->rregs[R_IVEC] = IVEC_HINOINT;
|
||||
else
|
||||
|
@ -389,11 +343,11 @@ static inline void clr_rxint(ChannelState *s)
|
|||
escc_update_irq(s);
|
||||
}
|
||||
|
||||
static inline void clr_txint(ChannelState *s)
|
||||
static inline void clr_txint(ESCCChannelState *s)
|
||||
{
|
||||
s->txint = 0;
|
||||
s->txint_under_svc = 0;
|
||||
if (s->chn == chn_a) {
|
||||
if (s->chn == escc_chn_a) {
|
||||
if (s->wregs[W_MINTR] & MINTR_STATUSHI)
|
||||
s->otherchn->rregs[R_IVEC] = IVEC_HINOINT;
|
||||
else
|
||||
|
@ -412,12 +366,12 @@ static inline void clr_txint(ChannelState *s)
|
|||
escc_update_irq(s);
|
||||
}
|
||||
|
||||
static void escc_update_parameters(ChannelState *s)
|
||||
static void escc_update_parameters(ESCCChannelState *s)
|
||||
{
|
||||
int speed, parity, data_bits, stop_bits;
|
||||
QEMUSerialSetParams ssp;
|
||||
|
||||
if (!qemu_chr_fe_backend_connected(&s->chr) || s->type != ser)
|
||||
if (!qemu_chr_fe_backend_connected(&s->chr) || s->type != escc_serial)
|
||||
return;
|
||||
|
||||
if (s->wregs[W_TXCTRL1] & TXCTRL1_PAREN) {
|
||||
|
@ -474,7 +428,7 @@ static void escc_mem_write(void *opaque, hwaddr addr,
|
|||
uint64_t val, unsigned size)
|
||||
{
|
||||
ESCCState *serial = opaque;
|
||||
ChannelState *s;
|
||||
ESCCChannelState *s;
|
||||
uint32_t saddr;
|
||||
int newreg, channel;
|
||||
|
||||
|
@ -561,7 +515,7 @@ static void escc_mem_write(void *opaque, hwaddr addr,
|
|||
/* XXX this blocks entire thread. Rewrite to use
|
||||
* qemu_chr_fe_write and background I/O callbacks */
|
||||
qemu_chr_fe_write_all(&s->chr, &s->tx, 1);
|
||||
} else if (s->type == kbd && !s->disabled) {
|
||||
} else if (s->type == escc_kbd && !s->disabled) {
|
||||
handle_kbd_command(s, val);
|
||||
}
|
||||
}
|
||||
|
@ -578,7 +532,7 @@ static uint64_t escc_mem_read(void *opaque, hwaddr addr,
|
|||
unsigned size)
|
||||
{
|
||||
ESCCState *serial = opaque;
|
||||
ChannelState *s;
|
||||
ESCCChannelState *s;
|
||||
uint32_t saddr;
|
||||
uint32_t ret;
|
||||
int channel;
|
||||
|
@ -595,10 +549,11 @@ static uint64_t escc_mem_read(void *opaque, hwaddr addr,
|
|||
case SERIAL_DATA:
|
||||
s->rregs[R_STATUS] &= ~STATUS_RXAV;
|
||||
clr_rxint(s);
|
||||
if (s->type == kbd || s->type == mouse)
|
||||
if (s->type == escc_kbd || s->type == escc_mouse) {
|
||||
ret = get_queue(s);
|
||||
else
|
||||
} else {
|
||||
ret = s->rx;
|
||||
}
|
||||
trace_escc_mem_readb_data(CHN_C(s), ret);
|
||||
qemu_chr_fe_accept_input(&s->chr);
|
||||
return ret;
|
||||
|
@ -620,7 +575,7 @@ static const MemoryRegionOps escc_mem_ops = {
|
|||
|
||||
static int serial_can_receive(void *opaque)
|
||||
{
|
||||
ChannelState *s = opaque;
|
||||
ESCCChannelState *s = opaque;
|
||||
int ret;
|
||||
|
||||
if (((s->wregs[W_RXCTRL] & RXCTRL_RXEN) == 0) // Rx not enabled
|
||||
|
@ -632,7 +587,7 @@ static int serial_can_receive(void *opaque)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void serial_receive_byte(ChannelState *s, int ch)
|
||||
static void serial_receive_byte(ESCCChannelState *s, int ch)
|
||||
{
|
||||
trace_escc_serial_receive_byte(CHN_C(s), ch);
|
||||
s->rregs[R_STATUS] |= STATUS_RXAV;
|
||||
|
@ -640,7 +595,7 @@ static void serial_receive_byte(ChannelState *s, int ch)
|
|||
set_rxint(s);
|
||||
}
|
||||
|
||||
static void serial_receive_break(ChannelState *s)
|
||||
static void serial_receive_break(ESCCChannelState *s)
|
||||
{
|
||||
s->rregs[R_STATUS] |= STATUS_BRK;
|
||||
escc_update_irq(s);
|
||||
|
@ -648,13 +603,13 @@ static void serial_receive_break(ChannelState *s)
|
|||
|
||||
static void serial_receive1(void *opaque, const uint8_t *buf, int size)
|
||||
{
|
||||
ChannelState *s = opaque;
|
||||
ESCCChannelState *s = opaque;
|
||||
serial_receive_byte(s, buf[0]);
|
||||
}
|
||||
|
||||
static void serial_event(void *opaque, int event)
|
||||
{
|
||||
ChannelState *s = opaque;
|
||||
ESCCChannelState *s = opaque;
|
||||
if (event == CHR_EVENT_BREAK)
|
||||
serial_receive_break(s);
|
||||
}
|
||||
|
@ -664,16 +619,16 @@ static const VMStateDescription vmstate_escc_chn = {
|
|||
.version_id = 2,
|
||||
.minimum_version_id = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32(vmstate_dummy, ChannelState),
|
||||
VMSTATE_UINT32(reg, ChannelState),
|
||||
VMSTATE_UINT32(rxint, ChannelState),
|
||||
VMSTATE_UINT32(txint, ChannelState),
|
||||
VMSTATE_UINT32(rxint_under_svc, ChannelState),
|
||||
VMSTATE_UINT32(txint_under_svc, ChannelState),
|
||||
VMSTATE_UINT8(rx, ChannelState),
|
||||
VMSTATE_UINT8(tx, ChannelState),
|
||||
VMSTATE_BUFFER(wregs, ChannelState),
|
||||
VMSTATE_BUFFER(rregs, ChannelState),
|
||||
VMSTATE_UINT32(vmstate_dummy, ESCCChannelState),
|
||||
VMSTATE_UINT32(reg, ESCCChannelState),
|
||||
VMSTATE_UINT32(rxint, ESCCChannelState),
|
||||
VMSTATE_UINT32(txint, ESCCChannelState),
|
||||
VMSTATE_UINT32(rxint_under_svc, ESCCChannelState),
|
||||
VMSTATE_UINT32(txint_under_svc, ESCCChannelState),
|
||||
VMSTATE_UINT8(rx, ESCCChannelState),
|
||||
VMSTATE_UINT8(tx, ESCCChannelState),
|
||||
VMSTATE_BUFFER(wregs, ESCCChannelState),
|
||||
VMSTATE_BUFFER(rregs, ESCCChannelState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
@ -684,44 +639,15 @@ static const VMStateDescription vmstate_escc = {
|
|||
.minimum_version_id = 1,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_STRUCT_ARRAY(chn, ESCCState, 2, 2, vmstate_escc_chn,
|
||||
ChannelState),
|
||||
ESCCChannelState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
MemoryRegion *escc_init(hwaddr base, qemu_irq irqA, qemu_irq irqB,
|
||||
Chardev *chrA, Chardev *chrB,
|
||||
int clock, int it_shift)
|
||||
{
|
||||
DeviceState *dev;
|
||||
SysBusDevice *s;
|
||||
ESCCState *d;
|
||||
|
||||
dev = qdev_create(NULL, TYPE_ESCC);
|
||||
qdev_prop_set_uint32(dev, "disabled", 0);
|
||||
qdev_prop_set_uint32(dev, "frequency", clock);
|
||||
qdev_prop_set_uint32(dev, "it_shift", it_shift);
|
||||
qdev_prop_set_chr(dev, "chrB", chrB);
|
||||
qdev_prop_set_chr(dev, "chrA", chrA);
|
||||
qdev_prop_set_uint32(dev, "chnBtype", ser);
|
||||
qdev_prop_set_uint32(dev, "chnAtype", ser);
|
||||
qdev_init_nofail(dev);
|
||||
s = SYS_BUS_DEVICE(dev);
|
||||
sysbus_connect_irq(s, 0, irqB);
|
||||
sysbus_connect_irq(s, 1, irqA);
|
||||
if (base) {
|
||||
sysbus_mmio_map(s, 0, base);
|
||||
}
|
||||
|
||||
d = ESCC(s);
|
||||
return &d->mmio;
|
||||
}
|
||||
|
||||
|
||||
static void sunkbd_handle_event(DeviceState *dev, QemuConsole *src,
|
||||
InputEvent *evt)
|
||||
{
|
||||
ChannelState *s = (ChannelState *)dev;
|
||||
ESCCChannelState *s = (ESCCChannelState *)dev;
|
||||
int qcode, keycode;
|
||||
InputKeyEvent *key;
|
||||
|
||||
|
@ -777,7 +703,7 @@ static QemuInputHandler sunkbd_handler = {
|
|||
.event = sunkbd_handle_event,
|
||||
};
|
||||
|
||||
static void handle_kbd_command(ChannelState *s, int val)
|
||||
static void handle_kbd_command(ESCCChannelState *s, int val)
|
||||
{
|
||||
trace_escc_kbd_command(val);
|
||||
if (s->led_mode) { // Ignore led byte
|
||||
|
@ -808,7 +734,7 @@ static void handle_kbd_command(ChannelState *s, int val)
|
|||
static void sunmouse_event(void *opaque,
|
||||
int dx, int dy, int dz, int buttons_state)
|
||||
{
|
||||
ChannelState *s = opaque;
|
||||
ESCCChannelState *s = opaque;
|
||||
int ch;
|
||||
|
||||
trace_escc_sunmouse_event(dx, dy, buttons_state);
|
||||
|
@ -847,27 +773,6 @@ static void sunmouse_event(void *opaque,
|
|||
put_queue(s, 0);
|
||||
}
|
||||
|
||||
void slavio_serial_ms_kbd_init(hwaddr base, qemu_irq irq,
|
||||
int disabled, int clock, int it_shift)
|
||||
{
|
||||
DeviceState *dev;
|
||||
SysBusDevice *s;
|
||||
|
||||
dev = qdev_create(NULL, TYPE_ESCC);
|
||||
qdev_prop_set_uint32(dev, "disabled", disabled);
|
||||
qdev_prop_set_uint32(dev, "frequency", clock);
|
||||
qdev_prop_set_uint32(dev, "it_shift", it_shift);
|
||||
qdev_prop_set_chr(dev, "chrB", NULL);
|
||||
qdev_prop_set_chr(dev, "chrA", NULL);
|
||||
qdev_prop_set_uint32(dev, "chnBtype", mouse);
|
||||
qdev_prop_set_uint32(dev, "chnAtype", kbd);
|
||||
qdev_init_nofail(dev);
|
||||
s = SYS_BUS_DEVICE(dev);
|
||||
sysbus_connect_irq(s, 0, irq);
|
||||
sysbus_connect_irq(s, 1, irq);
|
||||
sysbus_mmio_map(s, 0, base);
|
||||
}
|
||||
|
||||
static void escc_init1(Object *obj)
|
||||
{
|
||||
ESCCState *s = ESCC(obj);
|
||||
|
@ -904,11 +809,11 @@ static void escc_realize(DeviceState *dev, Error **errp)
|
|||
}
|
||||
}
|
||||
|
||||
if (s->chn[0].type == mouse) {
|
||||
if (s->chn[0].type == escc_mouse) {
|
||||
qemu_add_mouse_event_handler(sunmouse_event, &s->chn[0], 0,
|
||||
"QEMU Sun Mouse");
|
||||
}
|
||||
if (s->chn[1].type == kbd) {
|
||||
if (s->chn[1].type == escc_kbd) {
|
||||
s->chn[1].hs = qemu_input_handler_register((DeviceState *)(&s->chn[1]),
|
||||
&sunkbd_handler);
|
||||
}
|
||||
|
|
|
@ -26,447 +26,123 @@
|
|||
#include "hw/hw.h"
|
||||
#include "hw/ppc/mac.h"
|
||||
#include "hw/input/adb.h"
|
||||
#include "hw/misc/mos6522.h"
|
||||
#include "hw/misc/macio/cuda.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/log.h"
|
||||
|
||||
/* XXX: implement all timer modes */
|
||||
|
||||
/* debug CUDA */
|
||||
//#define DEBUG_CUDA
|
||||
|
||||
/* debug CUDA packets */
|
||||
//#define DEBUG_CUDA_PACKET
|
||||
|
||||
#ifdef DEBUG_CUDA
|
||||
#define CUDA_DPRINTF(fmt, ...) \
|
||||
do { printf("CUDA: " fmt , ## __VA_ARGS__); } while (0)
|
||||
#else
|
||||
#define CUDA_DPRINTF(fmt, ...)
|
||||
#endif
|
||||
#include "trace.h"
|
||||
|
||||
/* Bits in B data register: all active low */
|
||||
#define TREQ 0x08 /* Transfer request (input) */
|
||||
#define TACK 0x10 /* Transfer acknowledge (output) */
|
||||
#define TIP 0x20 /* Transfer in progress (output) */
|
||||
|
||||
/* Bits in ACR */
|
||||
#define SR_CTRL 0x1c /* Shift register control bits */
|
||||
#define SR_EXT 0x0c /* Shift on external clock */
|
||||
#define SR_OUT 0x10 /* Shift out if 1 */
|
||||
|
||||
/* Bits in IFR and IER */
|
||||
#define IER_SET 0x80 /* set bits in IER */
|
||||
#define IER_CLR 0 /* clear bits in IER */
|
||||
#define SR_INT 0x04 /* Shift register full/empty */
|
||||
#define SR_DATA_INT 0x08
|
||||
#define SR_CLOCK_INT 0x10
|
||||
#define T1_INT 0x40 /* Timer 1 interrupt */
|
||||
#define T2_INT 0x20 /* Timer 2 interrupt */
|
||||
|
||||
/* Bits in ACR */
|
||||
#define T1MODE 0xc0 /* Timer 1 mode */
|
||||
#define T1MODE_CONT 0x40 /* continuous interrupts */
|
||||
#define TREQ 0x08 /* Transfer request (input) */
|
||||
#define TACK 0x10 /* Transfer acknowledge (output) */
|
||||
#define TIP 0x20 /* Transfer in progress (output) */
|
||||
|
||||
/* commands (1st byte) */
|
||||
#define ADB_PACKET 0
|
||||
#define CUDA_PACKET 1
|
||||
#define ERROR_PACKET 2
|
||||
#define TIMER_PACKET 3
|
||||
#define POWER_PACKET 4
|
||||
#define MACIIC_PACKET 5
|
||||
#define PMU_PACKET 6
|
||||
|
||||
|
||||
/* CUDA commands (2nd byte) */
|
||||
#define CUDA_WARM_START 0x0
|
||||
#define CUDA_AUTOPOLL 0x1
|
||||
#define CUDA_GET_6805_ADDR 0x2
|
||||
#define CUDA_GET_TIME 0x3
|
||||
#define CUDA_GET_PRAM 0x7
|
||||
#define CUDA_SET_6805_ADDR 0x8
|
||||
#define CUDA_SET_TIME 0x9
|
||||
#define CUDA_POWERDOWN 0xa
|
||||
#define CUDA_POWERUP_TIME 0xb
|
||||
#define CUDA_SET_PRAM 0xc
|
||||
#define CUDA_MS_RESET 0xd
|
||||
#define CUDA_SEND_DFAC 0xe
|
||||
#define CUDA_BATTERY_SWAP_SENSE 0x10
|
||||
#define CUDA_RESET_SYSTEM 0x11
|
||||
#define CUDA_SET_IPL 0x12
|
||||
#define CUDA_FILE_SERVER_FLAG 0x13
|
||||
#define CUDA_SET_AUTO_RATE 0x14
|
||||
#define CUDA_GET_AUTO_RATE 0x16
|
||||
#define CUDA_SET_DEVICE_LIST 0x19
|
||||
#define CUDA_GET_DEVICE_LIST 0x1a
|
||||
#define CUDA_SET_ONE_SECOND_MODE 0x1b
|
||||
#define CUDA_SET_POWER_MESSAGES 0x21
|
||||
#define CUDA_GET_SET_IIC 0x22
|
||||
#define CUDA_WAKEUP 0x23
|
||||
#define CUDA_TIMER_TICKLE 0x24
|
||||
#define CUDA_COMBINED_FORMAT_IIC 0x25
|
||||
#define ADB_PACKET 0
|
||||
#define CUDA_PACKET 1
|
||||
#define ERROR_PACKET 2
|
||||
#define TIMER_PACKET 3
|
||||
#define POWER_PACKET 4
|
||||
#define MACIIC_PACKET 5
|
||||
#define PMU_PACKET 6
|
||||
|
||||
#define CUDA_TIMER_FREQ (4700000 / 6)
|
||||
|
||||
/* CUDA returns time_t's offset from Jan 1, 1904, not 1970 */
|
||||
#define RTC_OFFSET 2082844800
|
||||
|
||||
/* CUDA registers */
|
||||
#define CUDA_REG_B 0x00
|
||||
#define CUDA_REG_A 0x01
|
||||
#define CUDA_REG_DIRB 0x02
|
||||
#define CUDA_REG_DIRA 0x03
|
||||
#define CUDA_REG_T1CL 0x04
|
||||
#define CUDA_REG_T1CH 0x05
|
||||
#define CUDA_REG_T1LL 0x06
|
||||
#define CUDA_REG_T1LH 0x07
|
||||
#define CUDA_REG_T2CL 0x08
|
||||
#define CUDA_REG_T2CH 0x09
|
||||
#define CUDA_REG_SR 0x0a
|
||||
#define CUDA_REG_ACR 0x0b
|
||||
#define CUDA_REG_PCR 0x0c
|
||||
#define CUDA_REG_IFR 0x0d
|
||||
#define CUDA_REG_IER 0x0e
|
||||
#define CUDA_REG_ANH 0x0f
|
||||
|
||||
static void cuda_update(CUDAState *s);
|
||||
static void cuda_receive_packet_from_host(CUDAState *s,
|
||||
const uint8_t *data, int len);
|
||||
static void cuda_timer_update(CUDAState *s, CUDATimer *ti,
|
||||
int64_t current_time);
|
||||
|
||||
static void cuda_update_irq(CUDAState *s)
|
||||
{
|
||||
if (s->ifr & s->ier & (SR_INT | T1_INT | T2_INT)) {
|
||||
qemu_irq_raise(s->irq);
|
||||
} else {
|
||||
qemu_irq_lower(s->irq);
|
||||
}
|
||||
}
|
||||
/* MacOS uses timer 1 for calibration on startup, so we use
|
||||
* the timebase frequency and cuda_get_counter_value() with
|
||||
* cuda_get_load_time() to steer MacOS to calculate calibrate its timers
|
||||
* correctly for both TCG and KVM (see commit b981289c49 "PPC: Cuda: Use cuda
|
||||
* timer to expose tbfreq to guest" for more information) */
|
||||
|
||||
static uint64_t get_counter_value(CUDAState *s, CUDATimer *ti)
|
||||
static uint64_t cuda_get_counter_value(MOS6522State *s, MOS6522Timer *ti)
|
||||
{
|
||||
MOS6522CUDAState *mcs = container_of(s, MOS6522CUDAState, parent_obj);
|
||||
CUDAState *cs = mcs->cuda;
|
||||
|
||||
/* Reverse of the tb calculation algorithm that Mac OS X uses on bootup */
|
||||
uint64_t tb_diff = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
|
||||
s->tb_frequency, NANOSECONDS_PER_SECOND) -
|
||||
cs->tb_frequency, NANOSECONDS_PER_SECOND) -
|
||||
ti->load_time;
|
||||
|
||||
return (tb_diff * 0xBF401675E5DULL) / (s->tb_frequency << 24);
|
||||
return (tb_diff * 0xBF401675E5DULL) / (cs->tb_frequency << 24);
|
||||
}
|
||||
|
||||
static uint64_t get_counter_load_time(CUDAState *s, CUDATimer *ti)
|
||||
static uint64_t cuda_get_load_time(MOS6522State *s, MOS6522Timer *ti)
|
||||
{
|
||||
MOS6522CUDAState *mcs = container_of(s, MOS6522CUDAState, parent_obj);
|
||||
CUDAState *cs = mcs->cuda;
|
||||
|
||||
uint64_t load_time = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
|
||||
s->tb_frequency, NANOSECONDS_PER_SECOND);
|
||||
cs->tb_frequency, NANOSECONDS_PER_SECOND);
|
||||
return load_time;
|
||||
}
|
||||
|
||||
static unsigned int get_counter(CUDAState *s, CUDATimer *ti)
|
||||
{
|
||||
int64_t d;
|
||||
unsigned int counter;
|
||||
|
||||
d = get_counter_value(s, ti);
|
||||
|
||||
if (ti->index == 0) {
|
||||
/* the timer goes down from latch to -1 (period of latch + 2) */
|
||||
if (d <= (ti->counter_value + 1)) {
|
||||
counter = (ti->counter_value - d) & 0xffff;
|
||||
} else {
|
||||
counter = (d - (ti->counter_value + 1)) % (ti->latch + 2);
|
||||
counter = (ti->latch - counter) & 0xffff;
|
||||
}
|
||||
} else {
|
||||
counter = (ti->counter_value - d) & 0xffff;
|
||||
}
|
||||
return counter;
|
||||
}
|
||||
|
||||
static void set_counter(CUDAState *s, CUDATimer *ti, unsigned int val)
|
||||
{
|
||||
CUDA_DPRINTF("T%d.counter=%d\n", 1 + ti->index, val);
|
||||
ti->load_time = get_counter_load_time(s, ti);
|
||||
ti->counter_value = val;
|
||||
cuda_timer_update(s, ti, ti->load_time);
|
||||
}
|
||||
|
||||
static int64_t get_next_irq_time(CUDATimer *ti, int64_t current_time)
|
||||
{
|
||||
int64_t d, next_time;
|
||||
unsigned int counter;
|
||||
|
||||
/* current counter value */
|
||||
d = muldiv64(current_time - ti->load_time,
|
||||
ti->frequency, NANOSECONDS_PER_SECOND);
|
||||
/* the timer goes down from latch to -1 (period of latch + 2) */
|
||||
if (d <= (ti->counter_value + 1)) {
|
||||
counter = (ti->counter_value - d) & 0xffff;
|
||||
} else {
|
||||
counter = (d - (ti->counter_value + 1)) % (ti->latch + 2);
|
||||
counter = (ti->latch - counter) & 0xffff;
|
||||
}
|
||||
|
||||
/* Note: we consider the irq is raised on 0 */
|
||||
if (counter == 0xffff) {
|
||||
next_time = d + ti->latch + 1;
|
||||
} else if (counter == 0) {
|
||||
next_time = d + ti->latch + 2;
|
||||
} else {
|
||||
next_time = d + counter;
|
||||
}
|
||||
CUDA_DPRINTF("latch=%d counter=%" PRId64 " delta_next=%" PRId64 "\n",
|
||||
ti->latch, d, next_time - d);
|
||||
next_time = muldiv64(next_time, NANOSECONDS_PER_SECOND, ti->frequency) +
|
||||
ti->load_time;
|
||||
if (next_time <= current_time) {
|
||||
next_time = current_time + 1;
|
||||
}
|
||||
return next_time;
|
||||
}
|
||||
|
||||
static void cuda_timer_update(CUDAState *s, CUDATimer *ti,
|
||||
int64_t current_time)
|
||||
{
|
||||
if (!ti->timer)
|
||||
return;
|
||||
if (ti->index == 0 && (s->acr & T1MODE) != T1MODE_CONT) {
|
||||
timer_del(ti->timer);
|
||||
} else {
|
||||
ti->next_irq_time = get_next_irq_time(ti, current_time);
|
||||
timer_mod(ti->timer, ti->next_irq_time);
|
||||
}
|
||||
}
|
||||
|
||||
static void cuda_timer1(void *opaque)
|
||||
{
|
||||
CUDAState *s = opaque;
|
||||
CUDATimer *ti = &s->timers[0];
|
||||
|
||||
cuda_timer_update(s, ti, ti->next_irq_time);
|
||||
s->ifr |= T1_INT;
|
||||
cuda_update_irq(s);
|
||||
}
|
||||
|
||||
static void cuda_timer2(void *opaque)
|
||||
{
|
||||
CUDAState *s = opaque;
|
||||
CUDATimer *ti = &s->timers[1];
|
||||
|
||||
cuda_timer_update(s, ti, ti->next_irq_time);
|
||||
s->ifr |= T2_INT;
|
||||
cuda_update_irq(s);
|
||||
}
|
||||
|
||||
static void cuda_set_sr_int(void *opaque)
|
||||
{
|
||||
CUDAState *s = opaque;
|
||||
MOS6522CUDAState *mcs = s->mos6522_cuda;
|
||||
MOS6522State *ms = MOS6522(mcs);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(ms);
|
||||
|
||||
CUDA_DPRINTF("CUDA: %s:%d\n", __func__, __LINE__);
|
||||
s->ifr |= SR_INT;
|
||||
cuda_update_irq(s);
|
||||
mdc->set_sr_int(ms);
|
||||
}
|
||||
|
||||
static void cuda_delay_set_sr_int(CUDAState *s)
|
||||
{
|
||||
MOS6522CUDAState *mcs = s->mos6522_cuda;
|
||||
MOS6522State *ms = MOS6522(mcs);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(ms);
|
||||
int64_t expire;
|
||||
|
||||
if (s->dirb == 0xff) {
|
||||
/* Not in Mac OS, fire the IRQ directly */
|
||||
cuda_set_sr_int(s);
|
||||
if (ms->dirb == 0xff || s->sr_delay_ns == 0) {
|
||||
/* Disabled or not in Mac OS, fire the IRQ directly */
|
||||
mdc->set_sr_int(ms);
|
||||
return;
|
||||
}
|
||||
|
||||
CUDA_DPRINTF("CUDA: %s:%d\n", __func__, __LINE__);
|
||||
trace_cuda_delay_set_sr_int();
|
||||
|
||||
expire = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 300 * SCALE_US;
|
||||
expire = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->sr_delay_ns;
|
||||
timer_mod(s->sr_delay_timer, expire);
|
||||
}
|
||||
|
||||
static uint64_t cuda_read(void *opaque, hwaddr addr, unsigned size)
|
||||
{
|
||||
CUDAState *s = opaque;
|
||||
uint32_t val;
|
||||
|
||||
addr = (addr >> 9) & 0xf;
|
||||
switch(addr) {
|
||||
case CUDA_REG_B:
|
||||
val = s->b;
|
||||
break;
|
||||
case CUDA_REG_A:
|
||||
val = s->a;
|
||||
break;
|
||||
case CUDA_REG_DIRB:
|
||||
val = s->dirb;
|
||||
break;
|
||||
case CUDA_REG_DIRA:
|
||||
val = s->dira;
|
||||
break;
|
||||
case CUDA_REG_T1CL:
|
||||
val = get_counter(s, &s->timers[0]) & 0xff;
|
||||
s->ifr &= ~T1_INT;
|
||||
cuda_update_irq(s);
|
||||
break;
|
||||
case CUDA_REG_T1CH:
|
||||
val = get_counter(s, &s->timers[0]) >> 8;
|
||||
cuda_update_irq(s);
|
||||
break;
|
||||
case CUDA_REG_T1LL:
|
||||
val = s->timers[0].latch & 0xff;
|
||||
break;
|
||||
case CUDA_REG_T1LH:
|
||||
/* XXX: check this */
|
||||
val = (s->timers[0].latch >> 8) & 0xff;
|
||||
break;
|
||||
case CUDA_REG_T2CL:
|
||||
val = get_counter(s, &s->timers[1]) & 0xff;
|
||||
s->ifr &= ~T2_INT;
|
||||
cuda_update_irq(s);
|
||||
break;
|
||||
case CUDA_REG_T2CH:
|
||||
val = get_counter(s, &s->timers[1]) >> 8;
|
||||
break;
|
||||
case CUDA_REG_SR:
|
||||
val = s->sr;
|
||||
s->ifr &= ~(SR_INT | SR_CLOCK_INT | SR_DATA_INT);
|
||||
cuda_update_irq(s);
|
||||
break;
|
||||
case CUDA_REG_ACR:
|
||||
val = s->acr;
|
||||
break;
|
||||
case CUDA_REG_PCR:
|
||||
val = s->pcr;
|
||||
break;
|
||||
case CUDA_REG_IFR:
|
||||
val = s->ifr;
|
||||
if (s->ifr & s->ier) {
|
||||
val |= 0x80;
|
||||
}
|
||||
break;
|
||||
case CUDA_REG_IER:
|
||||
val = s->ier | 0x80;
|
||||
break;
|
||||
default:
|
||||
case CUDA_REG_ANH:
|
||||
val = s->anh;
|
||||
break;
|
||||
}
|
||||
if (addr != CUDA_REG_IFR || val != 0) {
|
||||
CUDA_DPRINTF("read: reg=0x%x val=%02x\n", (int)addr, val);
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void cuda_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
|
||||
{
|
||||
CUDAState *s = opaque;
|
||||
|
||||
addr = (addr >> 9) & 0xf;
|
||||
CUDA_DPRINTF("write: reg=0x%x val=%02x\n", (int)addr, val);
|
||||
|
||||
switch(addr) {
|
||||
case CUDA_REG_B:
|
||||
s->b = (s->b & ~s->dirb) | (val & s->dirb);
|
||||
cuda_update(s);
|
||||
break;
|
||||
case CUDA_REG_A:
|
||||
s->a = (s->a & ~s->dira) | (val & s->dira);
|
||||
break;
|
||||
case CUDA_REG_DIRB:
|
||||
s->dirb = val;
|
||||
break;
|
||||
case CUDA_REG_DIRA:
|
||||
s->dira = val;
|
||||
break;
|
||||
case CUDA_REG_T1CL:
|
||||
s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
|
||||
cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
|
||||
break;
|
||||
case CUDA_REG_T1CH:
|
||||
s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
|
||||
s->ifr &= ~T1_INT;
|
||||
set_counter(s, &s->timers[0], s->timers[0].latch);
|
||||
break;
|
||||
case CUDA_REG_T1LL:
|
||||
s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
|
||||
cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
|
||||
break;
|
||||
case CUDA_REG_T1LH:
|
||||
s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
|
||||
s->ifr &= ~T1_INT;
|
||||
cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
|
||||
break;
|
||||
case CUDA_REG_T2CL:
|
||||
s->timers[1].latch = (s->timers[1].latch & 0xff00) | val;
|
||||
break;
|
||||
case CUDA_REG_T2CH:
|
||||
/* To ensure T2 generates an interrupt on zero crossing with the
|
||||
common timer code, write the value directly from the latch to
|
||||
the counter */
|
||||
s->timers[1].latch = (s->timers[1].latch & 0xff) | (val << 8);
|
||||
s->ifr &= ~T2_INT;
|
||||
set_counter(s, &s->timers[1], s->timers[1].latch);
|
||||
break;
|
||||
case CUDA_REG_SR:
|
||||
s->sr = val;
|
||||
break;
|
||||
case CUDA_REG_ACR:
|
||||
s->acr = val;
|
||||
cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
|
||||
break;
|
||||
case CUDA_REG_PCR:
|
||||
s->pcr = val;
|
||||
break;
|
||||
case CUDA_REG_IFR:
|
||||
/* reset bits */
|
||||
s->ifr &= ~val;
|
||||
cuda_update_irq(s);
|
||||
break;
|
||||
case CUDA_REG_IER:
|
||||
if (val & IER_SET) {
|
||||
/* set bits */
|
||||
s->ier |= val & 0x7f;
|
||||
} else {
|
||||
/* reset bits */
|
||||
s->ier &= ~val;
|
||||
}
|
||||
cuda_update_irq(s);
|
||||
break;
|
||||
default:
|
||||
case CUDA_REG_ANH:
|
||||
s->anh = val;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* NOTE: TIP and TREQ are negated */
|
||||
static void cuda_update(CUDAState *s)
|
||||
{
|
||||
MOS6522CUDAState *mcs = s->mos6522_cuda;
|
||||
MOS6522State *ms = MOS6522(mcs);
|
||||
int packet_received, len;
|
||||
|
||||
packet_received = 0;
|
||||
if (!(s->b & TIP)) {
|
||||
if (!(ms->b & TIP)) {
|
||||
/* transfer requested from host */
|
||||
|
||||
if (s->acr & SR_OUT) {
|
||||
if (ms->acr & SR_OUT) {
|
||||
/* data output */
|
||||
if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
|
||||
if ((ms->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
|
||||
if (s->data_out_index < sizeof(s->data_out)) {
|
||||
CUDA_DPRINTF("send: %02x\n", s->sr);
|
||||
s->data_out[s->data_out_index++] = s->sr;
|
||||
trace_cuda_data_send(ms->sr);
|
||||
s->data_out[s->data_out_index++] = ms->sr;
|
||||
cuda_delay_set_sr_int(s);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (s->data_in_index < s->data_in_size) {
|
||||
/* data input */
|
||||
if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
|
||||
s->sr = s->data_in[s->data_in_index++];
|
||||
CUDA_DPRINTF("recv: %02x\n", s->sr);
|
||||
if ((ms->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
|
||||
ms->sr = s->data_in[s->data_in_index++];
|
||||
trace_cuda_data_recv(ms->sr);
|
||||
/* indicate end of transfer */
|
||||
if (s->data_in_index >= s->data_in_size) {
|
||||
s->b = (s->b | TREQ);
|
||||
ms->b = (ms->b | TREQ);
|
||||
}
|
||||
cuda_delay_set_sr_int(s);
|
||||
}
|
||||
|
@ -474,12 +150,13 @@ static void cuda_update(CUDAState *s)
|
|||
}
|
||||
} else {
|
||||
/* no transfer requested: handle sync case */
|
||||
if ((s->last_b & TIP) && (s->b & TACK) != (s->last_b & TACK)) {
|
||||
if ((s->last_b & TIP) && (ms->b & TACK) != (s->last_b & TACK)) {
|
||||
/* update TREQ state each time TACK change state */
|
||||
if (s->b & TACK)
|
||||
s->b = (s->b | TREQ);
|
||||
else
|
||||
s->b = (s->b & ~TREQ);
|
||||
if (ms->b & TACK) {
|
||||
ms->b = (ms->b | TREQ);
|
||||
} else {
|
||||
ms->b = (ms->b & ~TREQ);
|
||||
}
|
||||
cuda_delay_set_sr_int(s);
|
||||
} else {
|
||||
if (!(s->last_b & TIP)) {
|
||||
|
@ -490,13 +167,13 @@ static void cuda_update(CUDAState *s)
|
|||
}
|
||||
/* signal if there is data to read */
|
||||
if (s->data_in_index < s->data_in_size) {
|
||||
s->b = (s->b & ~TREQ);
|
||||
ms->b = (ms->b & ~TREQ);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s->last_acr = s->acr;
|
||||
s->last_b = s->b;
|
||||
s->last_acr = ms->acr;
|
||||
s->last_b = ms->b;
|
||||
|
||||
/* NOTE: cuda_receive_packet_from_host() can call cuda_update()
|
||||
recursively */
|
||||
|
@ -510,15 +187,13 @@ static void cuda_update(CUDAState *s)
|
|||
static void cuda_send_packet_to_host(CUDAState *s,
|
||||
const uint8_t *data, int len)
|
||||
{
|
||||
#ifdef DEBUG_CUDA_PACKET
|
||||
{
|
||||
int i;
|
||||
printf("cuda_send_packet_to_host:\n");
|
||||
for(i = 0; i < len; i++)
|
||||
printf(" %02x", data[i]);
|
||||
printf("\n");
|
||||
int i;
|
||||
|
||||
trace_cuda_packet_send(len);
|
||||
for (i = 0; i < len; i++) {
|
||||
trace_cuda_packet_send_data(i, data[i]);
|
||||
}
|
||||
#endif
|
||||
|
||||
memcpy(s->data_in, data, len);
|
||||
s->data_in_size = len;
|
||||
s->data_in_index = 0;
|
||||
|
@ -538,9 +213,8 @@ static void cuda_adb_poll(void *opaque)
|
|||
obuf[1] = 0x40; /* polled data */
|
||||
cuda_send_packet_to_host(s, obuf, olen + 2);
|
||||
}
|
||||
timer_mod(s->adb_poll_timer,
|
||||
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||
(NANOSECONDS_PER_SECOND / (1000 / s->autopoll_rate_ms)));
|
||||
timer_mod(s->adb_poll_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||
(NANOSECONDS_PER_SECOND / (1000 / s->autopoll_rate_ms)));
|
||||
}
|
||||
|
||||
/* description of commands */
|
||||
|
@ -723,7 +397,7 @@ static void cuda_receive_packet(CUDAState *s,
|
|||
for (i = 0; i < ARRAY_SIZE(handlers); i++) {
|
||||
const CudaCommand *desc = &handlers[i];
|
||||
if (desc->command == data[0]) {
|
||||
CUDA_DPRINTF("handling command %s\n", desc->name);
|
||||
trace_cuda_receive_packet_cmd(desc->name);
|
||||
out_len = 0;
|
||||
if (desc->handler(s, data + 1, len - 1, obuf + 3, &out_len)) {
|
||||
cuda_send_packet_to_host(s, obuf, 3 + out_len);
|
||||
|
@ -752,15 +426,13 @@ static void cuda_receive_packet(CUDAState *s,
|
|||
static void cuda_receive_packet_from_host(CUDAState *s,
|
||||
const uint8_t *data, int len)
|
||||
{
|
||||
#ifdef DEBUG_CUDA_PACKET
|
||||
{
|
||||
int i;
|
||||
printf("cuda_receive_packet_from_host:\n");
|
||||
for(i = 0; i < len; i++)
|
||||
printf(" %02x", data[i]);
|
||||
printf("\n");
|
||||
int i;
|
||||
|
||||
trace_cuda_packet_receive(len);
|
||||
for (i = 0; i < len; i++) {
|
||||
trace_cuda_packet_receive_data(i, data[i]);
|
||||
}
|
||||
#endif
|
||||
|
||||
switch(data[0]) {
|
||||
case ADB_PACKET:
|
||||
{
|
||||
|
@ -787,9 +459,30 @@ static void cuda_receive_packet_from_host(CUDAState *s,
|
|||
}
|
||||
}
|
||||
|
||||
static const MemoryRegionOps cuda_ops = {
|
||||
.read = cuda_read,
|
||||
.write = cuda_write,
|
||||
static uint64_t mos6522_cuda_read(void *opaque, hwaddr addr, unsigned size)
|
||||
{
|
||||
CUDAState *s = opaque;
|
||||
MOS6522CUDAState *mcs = s->mos6522_cuda;
|
||||
MOS6522State *ms = MOS6522(mcs);
|
||||
|
||||
addr = (addr >> 9) & 0xf;
|
||||
return mos6522_read(ms, addr, size);
|
||||
}
|
||||
|
||||
static void mos6522_cuda_write(void *opaque, hwaddr addr, uint64_t val,
|
||||
unsigned size)
|
||||
{
|
||||
CUDAState *s = opaque;
|
||||
MOS6522CUDAState *mcs = s->mos6522_cuda;
|
||||
MOS6522State *ms = MOS6522(mcs);
|
||||
|
||||
addr = (addr >> 9) & 0xf;
|
||||
mos6522_write(ms, addr, val, size);
|
||||
}
|
||||
|
||||
static const MemoryRegionOps mos6522_cuda_ops = {
|
||||
.read = mos6522_cuda_read,
|
||||
.write = mos6522_cuda_write,
|
||||
.endianness = DEVICE_BIG_ENDIAN,
|
||||
.valid = {
|
||||
.min_access_size = 1,
|
||||
|
@ -797,44 +490,13 @@ static const MemoryRegionOps cuda_ops = {
|
|||
},
|
||||
};
|
||||
|
||||
static bool cuda_timer_exist(void *opaque, int version_id)
|
||||
{
|
||||
CUDATimer *s = opaque;
|
||||
|
||||
return s->timer != NULL;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_cuda_timer = {
|
||||
.name = "cuda_timer",
|
||||
.version_id = 0,
|
||||
.minimum_version_id = 0,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT16(latch, CUDATimer),
|
||||
VMSTATE_UINT16(counter_value, CUDATimer),
|
||||
VMSTATE_INT64(load_time, CUDATimer),
|
||||
VMSTATE_INT64(next_irq_time, CUDATimer),
|
||||
VMSTATE_TIMER_PTR_TEST(timer, CUDATimer, cuda_timer_exist),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_cuda = {
|
||||
.name = "cuda",
|
||||
.version_id = 4,
|
||||
.minimum_version_id = 4,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT8(a, CUDAState),
|
||||
VMSTATE_UINT8(b, CUDAState),
|
||||
VMSTATE_UINT8(last_b, CUDAState),
|
||||
VMSTATE_UINT8(dira, CUDAState),
|
||||
VMSTATE_UINT8(dirb, CUDAState),
|
||||
VMSTATE_UINT8(sr, CUDAState),
|
||||
VMSTATE_UINT8(acr, CUDAState),
|
||||
VMSTATE_UINT8(last_acr, CUDAState),
|
||||
VMSTATE_UINT8(pcr, CUDAState),
|
||||
VMSTATE_UINT8(ifr, CUDAState),
|
||||
VMSTATE_UINT8(ier, CUDAState),
|
||||
VMSTATE_UINT8(anh, CUDAState),
|
||||
VMSTATE_INT32(data_in_size, CUDAState),
|
||||
VMSTATE_INT32(data_in_index, CUDAState),
|
||||
VMSTATE_INT32(data_out_index, CUDAState),
|
||||
|
@ -844,8 +506,6 @@ static const VMStateDescription vmstate_cuda = {
|
|||
VMSTATE_BUFFER(data_in, CUDAState),
|
||||
VMSTATE_BUFFER(data_out, CUDAState),
|
||||
VMSTATE_UINT32(tick_offset, CUDAState),
|
||||
VMSTATE_STRUCT_ARRAY(timers, CUDAState, 2, 1,
|
||||
vmstate_cuda_timer, CUDATimer),
|
||||
VMSTATE_TIMER_PTR(adb_poll_timer, CUDAState),
|
||||
VMSTATE_TIMER_PTR(sr_delay_timer, CUDAState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
|
@ -856,61 +516,48 @@ static void cuda_reset(DeviceState *dev)
|
|||
{
|
||||
CUDAState *s = CUDA(dev);
|
||||
|
||||
s->b = 0;
|
||||
s->a = 0;
|
||||
s->dirb = 0xff;
|
||||
s->dira = 0;
|
||||
s->sr = 0;
|
||||
s->acr = 0;
|
||||
s->pcr = 0;
|
||||
s->ifr = 0;
|
||||
s->ier = 0;
|
||||
// s->ier = T1_INT | SR_INT;
|
||||
s->anh = 0;
|
||||
s->data_in_size = 0;
|
||||
s->data_in_index = 0;
|
||||
s->data_out_index = 0;
|
||||
s->autopoll = 0;
|
||||
|
||||
s->timers[0].latch = 0xffff;
|
||||
set_counter(s, &s->timers[0], 0xffff);
|
||||
|
||||
s->timers[1].latch = 0xffff;
|
||||
|
||||
s->sr_delay_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_set_sr_int, s);
|
||||
}
|
||||
|
||||
static void cuda_realizefn(DeviceState *dev, Error **errp)
|
||||
static void cuda_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
CUDAState *s = CUDA(dev);
|
||||
SysBusDevice *sbd;
|
||||
MOS6522State *ms;
|
||||
DeviceState *d;
|
||||
struct tm tm;
|
||||
|
||||
s->timers[0].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer1, s);
|
||||
s->timers[0].frequency = CUDA_TIMER_FREQ;
|
||||
s->timers[1].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer2, s);
|
||||
s->timers[1].frequency = (SCALE_US * 6000) / 4700;
|
||||
d = qdev_create(NULL, TYPE_MOS6522_CUDA);
|
||||
object_property_set_link(OBJECT(d), OBJECT(s), "cuda", errp);
|
||||
qdev_init_nofail(d);
|
||||
s->mos6522_cuda = MOS6522_CUDA(d);
|
||||
|
||||
/* Pass IRQ from 6522 */
|
||||
ms = MOS6522(d);
|
||||
sbd = SYS_BUS_DEVICE(s);
|
||||
sysbus_pass_irq(sbd, SYS_BUS_DEVICE(ms));
|
||||
|
||||
qemu_get_timedate(&tm, 0);
|
||||
s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET;
|
||||
|
||||
s->sr_delay_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_set_sr_int, s);
|
||||
s->sr_delay_ns = 300 * SCALE_US;
|
||||
|
||||
s->adb_poll_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_adb_poll, s);
|
||||
s->autopoll_rate_ms = 20;
|
||||
s->adb_poll_mask = 0xffff;
|
||||
s->autopoll_rate_ms = 20;
|
||||
}
|
||||
|
||||
static void cuda_initfn(Object *obj)
|
||||
static void cuda_init(Object *obj)
|
||||
{
|
||||
SysBusDevice *d = SYS_BUS_DEVICE(obj);
|
||||
CUDAState *s = CUDA(obj);
|
||||
int i;
|
||||
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
|
||||
|
||||
memory_region_init_io(&s->mem, obj, &cuda_ops, s, "cuda", 0x2000);
|
||||
sysbus_init_mmio(d, &s->mem);
|
||||
sysbus_init_irq(d, &s->irq);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s->timers); i++) {
|
||||
s->timers[i].index = i;
|
||||
}
|
||||
memory_region_init_io(&s->mem, obj, &mos6522_cuda_ops, s, "cuda", 0x2000);
|
||||
sysbus_init_mmio(sbd, &s->mem);
|
||||
|
||||
qbus_create_inplace(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS,
|
||||
DEVICE(obj), "adb.0");
|
||||
|
@ -925,7 +572,7 @@ static void cuda_class_init(ObjectClass *oc, void *data)
|
|||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
|
||||
dc->realize = cuda_realizefn;
|
||||
dc->realize = cuda_realize;
|
||||
dc->reset = cuda_reset;
|
||||
dc->vmsd = &vmstate_cuda;
|
||||
dc->props = cuda_properties;
|
||||
|
@ -936,12 +583,62 @@ static const TypeInfo cuda_type_info = {
|
|||
.name = TYPE_CUDA,
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
.instance_size = sizeof(CUDAState),
|
||||
.instance_init = cuda_initfn,
|
||||
.instance_init = cuda_init,
|
||||
.class_init = cuda_class_init,
|
||||
};
|
||||
|
||||
static void mos6522_cuda_portB_write(MOS6522State *s)
|
||||
{
|
||||
MOS6522CUDAState *mcs = container_of(s, MOS6522CUDAState, parent_obj);
|
||||
|
||||
cuda_update(mcs->cuda);
|
||||
}
|
||||
|
||||
static void mos6522_cuda_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
MOS6522State *ms = MOS6522(dev);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(ms);
|
||||
|
||||
mdc->parent_realize(dev, errp);
|
||||
|
||||
ms->timers[0].frequency = CUDA_TIMER_FREQ;
|
||||
ms->timers[1].frequency = (SCALE_US * 6000) / 4700;
|
||||
}
|
||||
|
||||
static void mos6522_cuda_init(Object *obj)
|
||||
{
|
||||
MOS6522CUDAState *s = MOS6522_CUDA(obj);
|
||||
|
||||
object_property_add_link(obj, "cuda", TYPE_CUDA,
|
||||
(Object **) &s->cuda,
|
||||
qdev_prop_allow_set_link_before_realize,
|
||||
0, NULL);
|
||||
}
|
||||
|
||||
static void mos6522_cuda_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_CLASS(oc);
|
||||
|
||||
dc->realize = mos6522_cuda_realize;
|
||||
mdc->portB_write = mos6522_cuda_portB_write;
|
||||
mdc->get_timer1_counter_value = cuda_get_counter_value;
|
||||
mdc->get_timer2_counter_value = cuda_get_counter_value;
|
||||
mdc->get_timer1_load_time = cuda_get_load_time;
|
||||
mdc->get_timer2_load_time = cuda_get_load_time;
|
||||
}
|
||||
|
||||
static const TypeInfo mos6522_cuda_type_info = {
|
||||
.name = TYPE_MOS6522_CUDA,
|
||||
.parent = TYPE_MOS6522,
|
||||
.instance_size = sizeof(MOS6522CUDAState),
|
||||
.instance_init = mos6522_cuda_init,
|
||||
.class_init = mos6522_cuda_class_init,
|
||||
};
|
||||
|
||||
static void cuda_register_types(void)
|
||||
{
|
||||
type_register_static(&mos6522_cuda_type_info);
|
||||
type_register_static(&cuda_type_info);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "qapi/error.h"
|
||||
#include "hw/hw.h"
|
||||
#include "hw/ppc/mac.h"
|
||||
#include "hw/misc/macio/cuda.h"
|
||||
#include "hw/pci/pci.h"
|
||||
#include "hw/ppc/mac_dbdma.h"
|
||||
#include "hw/char/escc.h"
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
# See docs/devel/tracing.txt for syntax documentation.
|
||||
|
||||
# hw/misc/macio/cuda.c
|
||||
cuda_delay_set_sr_int(void) ""
|
||||
cuda_data_send(uint8_t data) "send: 0x%02x"
|
||||
cuda_data_recv(uint8_t data) "recv: 0x%02x"
|
||||
cuda_receive_packet_cmd(const char *cmd) "handling command %s"
|
||||
cuda_packet_receive(int len) "length %d"
|
||||
cuda_packet_receive_data(int i, const uint8_t data) "[%d] 0x%02x"
|
||||
cuda_packet_send(int len) "length %d"
|
||||
cuda_packet_send_data(int i, const uint8_t data) "[%d] 0x%02x"
|
76
hw/ppc/mac.h
76
hw/ppc/mac.h
|
@ -30,6 +30,7 @@
|
|||
#include "hw/sysbus.h"
|
||||
#include "hw/ide/internal.h"
|
||||
#include "hw/input/adb.h"
|
||||
#include "hw/misc/mos6522.h"
|
||||
|
||||
/* SMP is not enabled, for now */
|
||||
#define MAX_CPUS 1
|
||||
|
@ -44,81 +45,6 @@
|
|||
|
||||
#define ESCC_CLOCK 3686400
|
||||
|
||||
/* Cuda */
|
||||
#define TYPE_CUDA "cuda"
|
||||
#define CUDA(obj) OBJECT_CHECK(CUDAState, (obj), TYPE_CUDA)
|
||||
|
||||
/**
|
||||
* CUDATimer:
|
||||
* @counter_value: counter value at load time
|
||||
*/
|
||||
typedef struct CUDATimer {
|
||||
int index;
|
||||
uint16_t latch;
|
||||
uint16_t counter_value;
|
||||
int64_t load_time;
|
||||
int64_t next_irq_time;
|
||||
uint64_t frequency;
|
||||
QEMUTimer *timer;
|
||||
} CUDATimer;
|
||||
|
||||
/**
|
||||
* CUDAState:
|
||||
* @b: B-side data
|
||||
* @a: A-side data
|
||||
* @dirb: B-side direction (1=output)
|
||||
* @dira: A-side direction (1=output)
|
||||
* @sr: Shift register
|
||||
* @acr: Auxiliary control register
|
||||
* @pcr: Peripheral control register
|
||||
* @ifr: Interrupt flag register
|
||||
* @ier: Interrupt enable register
|
||||
* @anh: A-side data, no handshake
|
||||
* @last_b: last value of B register
|
||||
* @last_acr: last value of ACR register
|
||||
*/
|
||||
typedef struct CUDAState {
|
||||
/*< private >*/
|
||||
SysBusDevice parent_obj;
|
||||
/*< public >*/
|
||||
|
||||
MemoryRegion mem;
|
||||
/* cuda registers */
|
||||
uint8_t b;
|
||||
uint8_t a;
|
||||
uint8_t dirb;
|
||||
uint8_t dira;
|
||||
uint8_t sr;
|
||||
uint8_t acr;
|
||||
uint8_t pcr;
|
||||
uint8_t ifr;
|
||||
uint8_t ier;
|
||||
uint8_t anh;
|
||||
|
||||
ADBBusState adb_bus;
|
||||
CUDATimer timers[2];
|
||||
|
||||
uint32_t tick_offset;
|
||||
uint64_t tb_frequency;
|
||||
|
||||
uint8_t last_b;
|
||||
uint8_t last_acr;
|
||||
|
||||
/* MacOS 9 is racy and requires a delay upon setting the SR_INT bit */
|
||||
QEMUTimer *sr_delay_timer;
|
||||
|
||||
int data_in_size;
|
||||
int data_in_index;
|
||||
int data_out_index;
|
||||
|
||||
qemu_irq irq;
|
||||
uint16_t adb_poll_mask;
|
||||
uint8_t autopoll_rate_ms;
|
||||
uint8_t autopoll;
|
||||
uint8_t data_in[128];
|
||||
uint8_t data_out[16];
|
||||
QEMUTimer *adb_poll_timer;
|
||||
} CUDAState;
|
||||
|
||||
/* MacIO */
|
||||
#define TYPE_OLDWORLD_MACIO "macio-oldworld"
|
||||
|
|
|
@ -369,8 +369,23 @@ static void ppc_core99_init(MachineState *machine)
|
|||
}
|
||||
|
||||
/* init basic PC hardware */
|
||||
escc_mem = escc_init(0, pic[0x25], pic[0x24],
|
||||
serial_hds[0], serial_hds[1], ESCC_CLOCK, 4);
|
||||
|
||||
dev = qdev_create(NULL, TYPE_ESCC);
|
||||
qdev_prop_set_uint32(dev, "disabled", 0);
|
||||
qdev_prop_set_uint32(dev, "frequency", ESCC_CLOCK);
|
||||
qdev_prop_set_uint32(dev, "it_shift", 4);
|
||||
qdev_prop_set_chr(dev, "chrA", serial_hds[0]);
|
||||
qdev_prop_set_chr(dev, "chrB", serial_hds[1]);
|
||||
qdev_prop_set_uint32(dev, "chnAtype", escc_serial);
|
||||
qdev_prop_set_uint32(dev, "chnBtype", escc_serial);
|
||||
qdev_init_nofail(dev);
|
||||
|
||||
s = SYS_BUS_DEVICE(dev);
|
||||
sysbus_connect_irq(s, 0, pic[0x24]);
|
||||
sysbus_connect_irq(s, 1, pic[0x25]);
|
||||
|
||||
escc_mem = &ESCC(s)->mmio;
|
||||
|
||||
memory_region_init_alias(escc_bar, NULL, "escc-bar",
|
||||
escc_mem, 0, memory_region_size(escc_mem));
|
||||
|
||||
|
|
|
@ -104,6 +104,7 @@ static void ppc_heathrow_init(MachineState *machine)
|
|||
DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS];
|
||||
void *fw_cfg;
|
||||
uint64_t tbfreq;
|
||||
SysBusDevice *s;
|
||||
|
||||
linux_boot = (kernel_filename != NULL);
|
||||
|
||||
|
@ -264,8 +265,22 @@ static void ppc_heathrow_init(MachineState *machine)
|
|||
get_system_io());
|
||||
pci_vga_init(pci_bus);
|
||||
|
||||
escc_mem = escc_init(0, pic[0x0f], pic[0x10], serial_hds[0],
|
||||
serial_hds[1], ESCC_CLOCK, 4);
|
||||
dev = qdev_create(NULL, TYPE_ESCC);
|
||||
qdev_prop_set_uint32(dev, "disabled", 0);
|
||||
qdev_prop_set_uint32(dev, "frequency", ESCC_CLOCK);
|
||||
qdev_prop_set_uint32(dev, "it_shift", 4);
|
||||
qdev_prop_set_chr(dev, "chrA", serial_hds[0]);
|
||||
qdev_prop_set_chr(dev, "chrB", serial_hds[1]);
|
||||
qdev_prop_set_uint32(dev, "chnBtype", escc_serial);
|
||||
qdev_prop_set_uint32(dev, "chnAtype", escc_serial);
|
||||
qdev_init_nofail(dev);
|
||||
|
||||
s = SYS_BUS_DEVICE(dev);
|
||||
sysbus_connect_irq(s, 0, pic[0x10]);
|
||||
sysbus_connect_irq(s, 1, pic[0x0f]);
|
||||
|
||||
escc_mem = &ESCC(s)->mmio;
|
||||
|
||||
memory_region_init_alias(escc_bar, NULL, "escc-bar",
|
||||
escc_mem, 0, memory_region_size(escc_mem));
|
||||
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* QEMU PowerPC 440 shared definitions
|
||||
*
|
||||
* Copyright (c) 2012 François Revol
|
||||
* Copyright (c) 2016-2018 BALATON Zoltan
|
||||
*
|
||||
* This work is licensed under the GNU GPL license version 2 or later.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef PPC440_H
|
||||
#define PPC440_H
|
||||
|
||||
#include "hw/ppc/ppc.h"
|
||||
|
||||
void ppc4xx_l2sram_init(CPUPPCState *env);
|
||||
void ppc4xx_cpr_init(CPUPPCState *env);
|
||||
void ppc4xx_sdr_init(CPUPPCState *env);
|
||||
void ppc440_sdram_init(CPUPPCState *env, int nbanks,
|
||||
MemoryRegion *ram_memories,
|
||||
hwaddr *ram_bases, hwaddr *ram_sizes,
|
||||
int do_init);
|
||||
void ppc4xx_ahb_init(CPUPPCState *env);
|
||||
void ppc460ex_pcie_init(CPUPPCState *env);
|
||||
|
||||
#endif /* PPC440_H */
|
File diff suppressed because it is too large
Load Diff
|
@ -99,6 +99,21 @@
|
|||
|
||||
#define PHANDLE_XICP 0x00001111
|
||||
|
||||
/* These two functions implement the VCPU id numbering: one to compute them
|
||||
* all and one to identify thread 0 of a VCORE. Any change to the first one
|
||||
* is likely to have an impact on the second one, so let's keep them close.
|
||||
*/
|
||||
static int spapr_vcpu_id(sPAPRMachineState *spapr, int cpu_index)
|
||||
{
|
||||
return
|
||||
(cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads;
|
||||
}
|
||||
static bool spapr_is_thread0_in_vcore(sPAPRMachineState *spapr,
|
||||
PowerPCCPU *cpu)
|
||||
{
|
||||
return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0;
|
||||
}
|
||||
|
||||
static ICSState *spapr_ics_create(sPAPRMachineState *spapr,
|
||||
const char *type_ics,
|
||||
int nr_irqs, Error **errp)
|
||||
|
@ -160,9 +175,9 @@ static void pre_2_10_vmstate_unregister_dummy_icp(int i)
|
|||
(void *)(uintptr_t) i);
|
||||
}
|
||||
|
||||
static inline int xics_max_server_number(void)
|
||||
static int xics_max_server_number(sPAPRMachineState *spapr)
|
||||
{
|
||||
return DIV_ROUND_UP(max_cpus * kvmppc_smt_threads(), smp_threads);
|
||||
return DIV_ROUND_UP(max_cpus * spapr->vsmt, smp_threads);
|
||||
}
|
||||
|
||||
static void xics_system_init(MachineState *machine, int nr_irqs, Error **errp)
|
||||
|
@ -194,7 +209,7 @@ static void xics_system_init(MachineState *machine, int nr_irqs, Error **errp)
|
|||
if (smc->pre_2_10_has_unused_icps) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < xics_max_server_number(); i++) {
|
||||
for (i = 0; i < xics_max_server_number(spapr); i++) {
|
||||
/* Dummy entries get deregistered when real ICPState objects
|
||||
* are registered during CPU core hotplug.
|
||||
*/
|
||||
|
@ -209,7 +224,7 @@ static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
|
|||
int i, ret = 0;
|
||||
uint32_t servers_prop[smt_threads];
|
||||
uint32_t gservers_prop[smt_threads * 2];
|
||||
int index = spapr_vcpu_id(cpu);
|
||||
int index = spapr_get_vcpu_id(cpu);
|
||||
|
||||
if (cpu->compat_pvr) {
|
||||
ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr);
|
||||
|
@ -238,7 +253,7 @@ static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
|
|||
|
||||
static int spapr_fixup_cpu_numa_dt(void *fdt, int offset, PowerPCCPU *cpu)
|
||||
{
|
||||
int index = spapr_vcpu_id(cpu);
|
||||
int index = spapr_get_vcpu_id(cpu);
|
||||
uint32_t associativity[] = {cpu_to_be32(0x5),
|
||||
cpu_to_be32(0x0),
|
||||
cpu_to_be32(0x0),
|
||||
|
@ -337,16 +352,15 @@ static int spapr_fixup_cpu_dt(void *fdt, sPAPRMachineState *spapr)
|
|||
int ret = 0, offset, cpus_offset;
|
||||
CPUState *cs;
|
||||
char cpu_model[32];
|
||||
int smt = kvmppc_smt_threads();
|
||||
uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
|
||||
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
DeviceClass *dc = DEVICE_GET_CLASS(cs);
|
||||
int index = spapr_vcpu_id(cpu);
|
||||
int index = spapr_get_vcpu_id(cpu);
|
||||
int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu));
|
||||
|
||||
if ((index % smt) != 0) {
|
||||
if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -493,7 +507,7 @@ static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset,
|
|||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
|
||||
int index = spapr_vcpu_id(cpu);
|
||||
int index = spapr_get_vcpu_id(cpu);
|
||||
uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
|
||||
0xffffffff, 0xffffffff};
|
||||
uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq()
|
||||
|
@ -614,7 +628,6 @@ static void spapr_populate_cpus_dt_node(void *fdt, sPAPRMachineState *spapr)
|
|||
CPUState *cs;
|
||||
int cpus_offset;
|
||||
char *nodename;
|
||||
int smt = kvmppc_smt_threads();
|
||||
|
||||
cpus_offset = fdt_add_subnode(fdt, 0, "cpus");
|
||||
_FDT(cpus_offset);
|
||||
|
@ -628,11 +641,11 @@ static void spapr_populate_cpus_dt_node(void *fdt, sPAPRMachineState *spapr)
|
|||
*/
|
||||
CPU_FOREACH_REVERSE(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
int index = spapr_vcpu_id(cpu);
|
||||
int index = spapr_get_vcpu_id(cpu);
|
||||
DeviceClass *dc = DEVICE_GET_CLASS(cs);
|
||||
int offset;
|
||||
|
||||
if ((index % smt) != 0) {
|
||||
if (!spapr_is_thread0_in_vcore(spapr, cpu)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1131,7 +1144,7 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr,
|
|||
_FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
|
||||
|
||||
/* /interrupt controller */
|
||||
spapr_dt_xics(xics_max_server_number(), fdt, PHANDLE_XICP);
|
||||
spapr_dt_xics(xics_max_server_number(spapr), fdt, PHANDLE_XICP);
|
||||
|
||||
ret = spapr_populate_memory(spapr, fdt);
|
||||
if (ret < 0) {
|
||||
|
@ -2224,7 +2237,6 @@ static void spapr_init_cpus(sPAPRMachineState *spapr)
|
|||
MachineState *machine = MACHINE(spapr);
|
||||
MachineClass *mc = MACHINE_GET_CLASS(machine);
|
||||
const char *type = spapr_get_cpu_core_type(machine->cpu_type);
|
||||
int smt = kvmppc_smt_threads();
|
||||
const CPUArchIdList *possible_cpus;
|
||||
int boot_cores_nr = smp_cpus / smp_threads;
|
||||
int i;
|
||||
|
@ -2254,7 +2266,7 @@ static void spapr_init_cpus(sPAPRMachineState *spapr)
|
|||
|
||||
if (mc->has_hotpluggable_cpus) {
|
||||
spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
|
||||
(core_id / smp_threads) * smt);
|
||||
spapr_vcpu_id(spapr, core_id));
|
||||
}
|
||||
|
||||
if (i < boot_cores_nr) {
|
||||
|
@ -3237,7 +3249,7 @@ static void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
|
|||
{
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
DeviceClass *dc = DEVICE_GET_CLASS(cs);
|
||||
int id = spapr_vcpu_id(cpu);
|
||||
int id = spapr_get_vcpu_id(cpu);
|
||||
void *fdt;
|
||||
int offset, fdt_size;
|
||||
char *nodename;
|
||||
|
@ -3281,10 +3293,10 @@ static
|
|||
void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
|
||||
Error **errp)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
|
||||
int index;
|
||||
sPAPRDRConnector *drc;
|
||||
CPUCore *cc = CPU_CORE(dev);
|
||||
int smt = kvmppc_smt_threads();
|
||||
|
||||
if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) {
|
||||
error_setg(errp, "Unable to find CPU core with core-id: %d",
|
||||
|
@ -3296,7 +3308,8 @@ void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|||
return;
|
||||
}
|
||||
|
||||
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index * smt);
|
||||
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
|
||||
spapr_vcpu_id(spapr, cc->core_id));
|
||||
g_assert(drc);
|
||||
|
||||
spapr_drc_detach(drc);
|
||||
|
@ -3315,7 +3328,6 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|||
CPUState *cs = CPU(core->threads[0]);
|
||||
sPAPRDRConnector *drc;
|
||||
Error *local_err = NULL;
|
||||
int smt = kvmppc_smt_threads();
|
||||
CPUArchId *core_slot;
|
||||
int index;
|
||||
bool hotplugged = spapr_drc_hotplugged(dev);
|
||||
|
@ -3326,7 +3338,8 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|||
cc->core_id);
|
||||
return;
|
||||
}
|
||||
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index * smt);
|
||||
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU,
|
||||
spapr_vcpu_id(spapr, cc->core_id));
|
||||
|
||||
g_assert(drc || !mc->has_hotpluggable_cpus);
|
||||
|
||||
|
@ -3795,7 +3808,7 @@ static void spapr_pic_print_info(InterruptStatsProvider *obj,
|
|||
ics_pic_print_info(spapr->ics, mon);
|
||||
}
|
||||
|
||||
int spapr_vcpu_id(PowerPCCPU *cpu)
|
||||
int spapr_get_vcpu_id(PowerPCCPU *cpu)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
|
||||
|
@ -3806,6 +3819,24 @@ int spapr_vcpu_id(PowerPCCPU *cpu)
|
|||
}
|
||||
}
|
||||
|
||||
void spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
|
||||
int vcpu_id;
|
||||
|
||||
vcpu_id = spapr_vcpu_id(spapr, cpu_index);
|
||||
|
||||
if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id)) {
|
||||
error_setg(errp, "Can't create CPU with id %d in KVM", vcpu_id);
|
||||
error_append_hint(errp, "Adjust the number of cpus to %d "
|
||||
"or try to raise the number of threads per core\n",
|
||||
vcpu_id * smp_threads / spapr->vsmt);
|
||||
return;
|
||||
}
|
||||
|
||||
cpu->vcpu_id = vcpu_id;
|
||||
}
|
||||
|
||||
PowerPCCPU *spapr_find_cpu(int vcpu_id)
|
||||
{
|
||||
CPUState *cs;
|
||||
|
@ -3813,7 +3844,7 @@ PowerPCCPU *spapr_find_cpu(int vcpu_id)
|
|||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
|
||||
if (spapr_vcpu_id(cpu) == vcpu_id) {
|
||||
if (spapr_get_vcpu_id(cpu) == vcpu_id) {
|
||||
return cpu;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -205,7 +205,9 @@ static void cap_safe_bounds_check_apply(sPAPRMachineState *spapr, uint8_t val,
|
|||
static void cap_safe_indirect_branch_apply(sPAPRMachineState *spapr,
|
||||
uint8_t val, Error **errp)
|
||||
{
|
||||
if (tcg_enabled() && val) {
|
||||
if (val == SPAPR_CAP_WORKAROUND) { /* Can only be Broken or Fixed */
|
||||
error_setg(errp, "Requested safe indirect branch capability level \"workaround\" not valid, try cap-ibs=fixed");
|
||||
} else if (tcg_enabled() && val) {
|
||||
/* TODO - for now only allow broken for TCG */
|
||||
error_setg(errp, "Requested safe indirect branch capability level not supported by tcg, try a different value for cap-ibs");
|
||||
} else if (kvm_enabled() && (val > kvmppc_get_cap_safe_indirect_branch())) {
|
||||
|
@ -263,7 +265,7 @@ sPAPRCapabilityInfo capability_table[SPAPR_CAP_NUM] = {
|
|||
},
|
||||
[SPAPR_CAP_IBS] = {
|
||||
.name = "ibs",
|
||||
.description = "Indirect Branch Serialisation" VALUE_DESC_TRISTATE,
|
||||
.description = "Indirect Branch Serialisation (broken, fixed)",
|
||||
.index = SPAPR_CAP_IBS,
|
||||
.get = spapr_cap_get_tristate,
|
||||
.set = spapr_cap_set_tristate,
|
||||
|
@ -350,34 +352,34 @@ int spapr_caps_post_migration(sPAPRMachineState *spapr)
|
|||
}
|
||||
|
||||
/* Used to generate the migration field and needed function for a spapr cap */
|
||||
#define SPAPR_CAP_MIG_STATE(cap, ccap) \
|
||||
static bool spapr_cap_##cap##_needed(void *opaque) \
|
||||
#define SPAPR_CAP_MIG_STATE(sname, cap) \
|
||||
static bool spapr_cap_##sname##_needed(void *opaque) \
|
||||
{ \
|
||||
sPAPRMachineState *spapr = opaque; \
|
||||
\
|
||||
return spapr->cmd_line_caps[SPAPR_CAP_##ccap] && \
|
||||
(spapr->eff.caps[SPAPR_CAP_##ccap] != \
|
||||
spapr->def.caps[SPAPR_CAP_##ccap]); \
|
||||
return spapr->cmd_line_caps[cap] && \
|
||||
(spapr->eff.caps[cap] != \
|
||||
spapr->def.caps[cap]); \
|
||||
} \
|
||||
\
|
||||
const VMStateDescription vmstate_spapr_cap_##cap = { \
|
||||
.name = "spapr/cap/" #cap, \
|
||||
const VMStateDescription vmstate_spapr_cap_##sname = { \
|
||||
.name = "spapr/cap/" #sname, \
|
||||
.version_id = 1, \
|
||||
.minimum_version_id = 1, \
|
||||
.needed = spapr_cap_##cap##_needed, \
|
||||
.needed = spapr_cap_##sname##_needed, \
|
||||
.fields = (VMStateField[]) { \
|
||||
VMSTATE_UINT8(mig.caps[SPAPR_CAP_##ccap], \
|
||||
VMSTATE_UINT8(mig.caps[cap], \
|
||||
sPAPRMachineState), \
|
||||
VMSTATE_END_OF_LIST() \
|
||||
}, \
|
||||
}
|
||||
|
||||
SPAPR_CAP_MIG_STATE(htm, HTM);
|
||||
SPAPR_CAP_MIG_STATE(vsx, VSX);
|
||||
SPAPR_CAP_MIG_STATE(dfp, DFP);
|
||||
SPAPR_CAP_MIG_STATE(cfpc, CFPC);
|
||||
SPAPR_CAP_MIG_STATE(sbbc, SBBC);
|
||||
SPAPR_CAP_MIG_STATE(ibs, IBS);
|
||||
SPAPR_CAP_MIG_STATE(htm, SPAPR_CAP_HTM);
|
||||
SPAPR_CAP_MIG_STATE(vsx, SPAPR_CAP_VSX);
|
||||
SPAPR_CAP_MIG_STATE(dfp, SPAPR_CAP_DFP);
|
||||
SPAPR_CAP_MIG_STATE(cfpc, SPAPR_CAP_CFPC);
|
||||
SPAPR_CAP_MIG_STATE(sbbc, SPAPR_CAP_SBBC);
|
||||
SPAPR_CAP_MIG_STATE(ibs, SPAPR_CAP_IBS);
|
||||
|
||||
void spapr_caps_reset(sPAPRMachineState *spapr)
|
||||
{
|
||||
|
|
|
@ -172,13 +172,8 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
|
|||
cs = CPU(obj);
|
||||
cpu = sc->threads[i] = POWERPC_CPU(obj);
|
||||
cs->cpu_index = cc->core_id + i;
|
||||
cpu->vcpu_id = (cc->core_id * spapr->vsmt / smp_threads) + i;
|
||||
if (kvm_enabled() && !kvm_vcpu_id_is_valid(cpu->vcpu_id)) {
|
||||
error_setg(&local_err, "Can't create CPU with id %d in KVM",
|
||||
cpu->vcpu_id);
|
||||
error_append_hint(&local_err, "Adjust the number of cpus to %d "
|
||||
"or try to raise the number of threads per core\n",
|
||||
cpu->vcpu_id * smp_threads / spapr->vsmt);
|
||||
spapr_set_vcpu_id(cpu, cs->cpu_index, &local_err);
|
||||
if (local_err) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
|
|
@ -731,11 +731,21 @@ static target_ulong h_resize_hpt_commit(PowerPCCPU *cpu,
|
|||
return H_AUTHORITY;
|
||||
}
|
||||
|
||||
if (!spapr->htab_shift) {
|
||||
/* Radix guest, no HPT */
|
||||
return H_NOT_AVAILABLE;
|
||||
}
|
||||
|
||||
trace_spapr_h_resize_hpt_commit(flags, shift);
|
||||
|
||||
rc = kvmppc_resize_hpt_commit(cpu, flags, shift);
|
||||
if (rc != -ENOSYS) {
|
||||
return resize_hpt_convert_rc(rc);
|
||||
rc = resize_hpt_convert_rc(rc);
|
||||
if (rc == H_SUCCESS) {
|
||||
/* Need to set the new htab_shift in the machine state */
|
||||
spapr->htab_shift = shift;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (flags != 0) {
|
||||
|
|
|
@ -818,6 +818,8 @@ static void sun4m_hw_init(const struct sun4m_hwdef *hwdef,
|
|||
DriveInfo *fd[MAX_FD];
|
||||
FWCfgState *fw_cfg;
|
||||
unsigned int num_vsimms;
|
||||
DeviceState *dev;
|
||||
SysBusDevice *s;
|
||||
|
||||
/* init CPUs */
|
||||
for(i = 0; i < smp_cpus; i++) {
|
||||
|
@ -925,12 +927,36 @@ static void sun4m_hw_init(const struct sun4m_hwdef *hwdef,
|
|||
|
||||
slavio_timer_init_all(hwdef->counter_base, slavio_irq[19], slavio_cpu_irq, smp_cpus);
|
||||
|
||||
slavio_serial_ms_kbd_init(hwdef->ms_kb_base, slavio_irq[14],
|
||||
!machine->enable_graphics, ESCC_CLOCK, 1);
|
||||
/* Slavio TTYA (base+4, Linux ttyS0) is the first QEMU serial device
|
||||
Slavio TTYB (base+0, Linux ttyS1) is the second QEMU serial device */
|
||||
escc_init(hwdef->serial_base, slavio_irq[15], slavio_irq[15],
|
||||
serial_hds[0], serial_hds[1], ESCC_CLOCK, 1);
|
||||
dev = qdev_create(NULL, TYPE_ESCC);
|
||||
qdev_prop_set_uint32(dev, "disabled", !machine->enable_graphics);
|
||||
qdev_prop_set_uint32(dev, "frequency", ESCC_CLOCK);
|
||||
qdev_prop_set_uint32(dev, "it_shift", 1);
|
||||
qdev_prop_set_chr(dev, "chrB", NULL);
|
||||
qdev_prop_set_chr(dev, "chrA", NULL);
|
||||
qdev_prop_set_uint32(dev, "chnBtype", escc_mouse);
|
||||
qdev_prop_set_uint32(dev, "chnAtype", escc_kbd);
|
||||
qdev_init_nofail(dev);
|
||||
s = SYS_BUS_DEVICE(dev);
|
||||
sysbus_connect_irq(s, 0, slavio_irq[14]);
|
||||
sysbus_connect_irq(s, 1, slavio_irq[14]);
|
||||
sysbus_mmio_map(s, 0, hwdef->ms_kb_base);
|
||||
|
||||
dev = qdev_create(NULL, TYPE_ESCC);
|
||||
qdev_prop_set_uint32(dev, "disabled", 0);
|
||||
qdev_prop_set_uint32(dev, "frequency", ESCC_CLOCK);
|
||||
qdev_prop_set_uint32(dev, "it_shift", 1);
|
||||
qdev_prop_set_chr(dev, "chrB", serial_hds[1]);
|
||||
qdev_prop_set_chr(dev, "chrA", serial_hds[0]);
|
||||
qdev_prop_set_uint32(dev, "chnBtype", escc_serial);
|
||||
qdev_prop_set_uint32(dev, "chnAtype", escc_serial);
|
||||
qdev_init_nofail(dev);
|
||||
|
||||
s = SYS_BUS_DEVICE(dev);
|
||||
sysbus_connect_irq(s, 0, slavio_irq[15]);
|
||||
sysbus_connect_irq(s, 1, slavio_irq[15]);
|
||||
sysbus_mmio_map(s, 0, hwdef->serial_base);
|
||||
|
||||
if (hwdef->apc_base) {
|
||||
apc_init(hwdef->apc_base, qemu_allocate_irq(cpu_halt_signal, NULL, 0));
|
||||
|
|
|
@ -1,14 +1,58 @@
|
|||
#ifndef HW_ESCC_H
|
||||
#define HW_ESCC_H
|
||||
|
||||
#include "chardev/char-fe.h"
|
||||
#include "chardev/char-serial.h"
|
||||
#include "ui/input.h"
|
||||
|
||||
/* escc.c */
|
||||
#define TYPE_ESCC "escc"
|
||||
#define ESCC_SIZE 4
|
||||
MemoryRegion *escc_init(hwaddr base, qemu_irq irqA, qemu_irq irqB,
|
||||
Chardev *chrA, Chardev *chrB,
|
||||
int clock, int it_shift);
|
||||
|
||||
void slavio_serial_ms_kbd_init(hwaddr base, qemu_irq irq,
|
||||
int disabled, int clock, int it_shift);
|
||||
#define ESCC(obj) OBJECT_CHECK(ESCCState, (obj), TYPE_ESCC)
|
||||
|
||||
typedef enum {
|
||||
escc_chn_a, escc_chn_b,
|
||||
} ESCCChnID;
|
||||
|
||||
typedef enum {
|
||||
escc_serial, escc_kbd, escc_mouse,
|
||||
} ESCCChnType;
|
||||
|
||||
#define ESCC_SERIO_QUEUE_SIZE 256
|
||||
|
||||
typedef struct {
|
||||
uint8_t data[ESCC_SERIO_QUEUE_SIZE];
|
||||
int rptr, wptr, count;
|
||||
} ESCCSERIOQueue;
|
||||
|
||||
#define ESCC_SERIAL_REGS 16
|
||||
typedef struct ESCCChannelState {
|
||||
qemu_irq irq;
|
||||
uint32_t rxint, txint, rxint_under_svc, txint_under_svc;
|
||||
struct ESCCChannelState *otherchn;
|
||||
uint32_t reg;
|
||||
uint8_t wregs[ESCC_SERIAL_REGS], rregs[ESCC_SERIAL_REGS];
|
||||
ESCCSERIOQueue queue;
|
||||
CharBackend chr;
|
||||
int e0_mode, led_mode, caps_lock_mode, num_lock_mode;
|
||||
int disabled;
|
||||
int clock;
|
||||
uint32_t vmstate_dummy;
|
||||
ESCCChnID chn; /* this channel, A (base+4) or B (base+0) */
|
||||
ESCCChnType type;
|
||||
uint8_t rx, tx;
|
||||
QemuInputHandlerState *hs;
|
||||
} ESCCChannelState;
|
||||
|
||||
typedef struct ESCCState {
|
||||
SysBusDevice parent_obj;
|
||||
|
||||
struct ESCCChannelState chn[2];
|
||||
uint32_t it_shift;
|
||||
MemoryRegion mmio;
|
||||
uint32_t disabled;
|
||||
uint32_t frequency;
|
||||
} ESCCState;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* QEMU PowerMac CUDA device support
|
||||
*
|
||||
* Copyright (c) 2004-2007 Fabrice Bellard
|
||||
* Copyright (c) 2007 Jocelyn Mayer
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef CUDA_H
|
||||
#define CUDA_H
|
||||
|
||||
/* CUDA commands (2nd byte) */
|
||||
#define CUDA_WARM_START 0x0
|
||||
#define CUDA_AUTOPOLL 0x1
|
||||
#define CUDA_GET_6805_ADDR 0x2
|
||||
#define CUDA_GET_TIME 0x3
|
||||
#define CUDA_GET_PRAM 0x7
|
||||
#define CUDA_SET_6805_ADDR 0x8
|
||||
#define CUDA_SET_TIME 0x9
|
||||
#define CUDA_POWERDOWN 0xa
|
||||
#define CUDA_POWERUP_TIME 0xb
|
||||
#define CUDA_SET_PRAM 0xc
|
||||
#define CUDA_MS_RESET 0xd
|
||||
#define CUDA_SEND_DFAC 0xe
|
||||
#define CUDA_BATTERY_SWAP_SENSE 0x10
|
||||
#define CUDA_RESET_SYSTEM 0x11
|
||||
#define CUDA_SET_IPL 0x12
|
||||
#define CUDA_FILE_SERVER_FLAG 0x13
|
||||
#define CUDA_SET_AUTO_RATE 0x14
|
||||
#define CUDA_GET_AUTO_RATE 0x16
|
||||
#define CUDA_SET_DEVICE_LIST 0x19
|
||||
#define CUDA_GET_DEVICE_LIST 0x1a
|
||||
#define CUDA_SET_ONE_SECOND_MODE 0x1b
|
||||
#define CUDA_SET_POWER_MESSAGES 0x21
|
||||
#define CUDA_GET_SET_IIC 0x22
|
||||
#define CUDA_WAKEUP 0x23
|
||||
#define CUDA_TIMER_TICKLE 0x24
|
||||
#define CUDA_COMBINED_FORMAT_IIC 0x25
|
||||
|
||||
/* Cuda */
|
||||
#define TYPE_CUDA "cuda"
|
||||
#define CUDA(obj) OBJECT_CHECK(CUDAState, (obj), TYPE_CUDA)
|
||||
|
||||
typedef struct MOS6522CUDAState MOS6522CUDAState;
|
||||
|
||||
typedef struct CUDAState {
|
||||
/*< private >*/
|
||||
SysBusDevice parent_obj;
|
||||
/*< public >*/
|
||||
MemoryRegion mem;
|
||||
|
||||
ADBBusState adb_bus;
|
||||
MOS6522CUDAState *mos6522_cuda;
|
||||
|
||||
uint32_t tick_offset;
|
||||
uint64_t tb_frequency;
|
||||
|
||||
uint8_t last_b;
|
||||
uint8_t last_acr;
|
||||
|
||||
/* MacOS 9 is racy and requires a delay upon setting the SR_INT bit */
|
||||
uint64_t sr_delay_ns;
|
||||
QEMUTimer *sr_delay_timer;
|
||||
|
||||
int data_in_size;
|
||||
int data_in_index;
|
||||
int data_out_index;
|
||||
|
||||
qemu_irq irq;
|
||||
uint16_t adb_poll_mask;
|
||||
uint8_t autopoll_rate_ms;
|
||||
uint8_t autopoll;
|
||||
uint8_t data_in[128];
|
||||
uint8_t data_out[16];
|
||||
QEMUTimer *adb_poll_timer;
|
||||
} CUDAState;
|
||||
|
||||
/* MOS6522 CUDA */
|
||||
typedef struct MOS6522CUDAState {
|
||||
/*< private >*/
|
||||
MOS6522State parent_obj;
|
||||
|
||||
CUDAState *cuda;
|
||||
} MOS6522CUDAState;
|
||||
|
||||
#define TYPE_MOS6522_CUDA "mos6522-cuda"
|
||||
#define MOS6522_CUDA(obj) OBJECT_CHECK(MOS6522CUDAState, (obj), \
|
||||
TYPE_MOS6522_CUDA)
|
||||
|
||||
#endif /* CUDA_H */
|
|
@ -65,7 +65,7 @@ void pcie_host_mmcfg_update(PCIExpressHost *e,
|
|||
* bit 12 - 14: function number
|
||||
* bit 0 - 11: offset in configuration space of a given device
|
||||
*/
|
||||
#define PCIE_MMCFG_SIZE_MAX (1ULL << 28)
|
||||
#define PCIE_MMCFG_SIZE_MAX (1ULL << 29)
|
||||
#define PCIE_MMCFG_SIZE_MIN (1ULL << 20)
|
||||
#define PCIE_MMCFG_BUS_BIT 20
|
||||
#define PCIE_MMCFG_BUS_MASK 0x1ff
|
||||
|
|
|
@ -766,7 +766,8 @@ void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg);
|
|||
|
||||
#define HTAB_SIZE(spapr) (1ULL << ((spapr)->htab_shift))
|
||||
|
||||
int spapr_vcpu_id(PowerPCCPU *cpu);
|
||||
int spapr_get_vcpu_id(PowerPCCPU *cpu);
|
||||
void spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp);
|
||||
PowerPCCPU *spapr_find_cpu(int vcpu_id);
|
||||
|
||||
int spapr_irq_alloc(sPAPRMachineState *spapr, int irq_hint, bool lsi,
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "exec/helper-gen.h"
|
||||
|
||||
#include "trace-tcg.h"
|
||||
#include "exec/translator.h"
|
||||
#include "exec/log.h"
|
||||
|
||||
|
||||
|
@ -187,8 +188,7 @@ void ppc_translate_init(void)
|
|||
|
||||
/* internal defines */
|
||||
struct DisasContext {
|
||||
struct TranslationBlock *tb;
|
||||
target_ulong nip;
|
||||
DisasContextBase base;
|
||||
uint32_t opcode;
|
||||
uint32_t exception;
|
||||
/* Routine used to access memory */
|
||||
|
@ -275,7 +275,7 @@ static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
|
|||
* the faulting instruction
|
||||
*/
|
||||
if (ctx->exception == POWERPC_EXCP_NONE) {
|
||||
gen_update_nip(ctx, ctx->nip - 4);
|
||||
gen_update_nip(ctx, ctx->base.pc_next - 4);
|
||||
}
|
||||
t0 = tcg_const_i32(excp);
|
||||
t1 = tcg_const_i32(error);
|
||||
|
@ -293,7 +293,7 @@ static void gen_exception(DisasContext *ctx, uint32_t excp)
|
|||
* the faulting instruction
|
||||
*/
|
||||
if (ctx->exception == POWERPC_EXCP_NONE) {
|
||||
gen_update_nip(ctx, ctx->nip - 4);
|
||||
gen_update_nip(ctx, ctx->base.pc_next - 4);
|
||||
}
|
||||
t0 = tcg_const_i32(excp);
|
||||
gen_helper_raise_exception(cpu_env, t0);
|
||||
|
@ -322,7 +322,7 @@ static void gen_debug_exception(DisasContext *ctx)
|
|||
*/
|
||||
if ((ctx->exception != POWERPC_EXCP_BRANCH) &&
|
||||
(ctx->exception != POWERPC_EXCP_SYNC)) {
|
||||
gen_update_nip(ctx, ctx->nip);
|
||||
gen_update_nip(ctx, ctx->base.pc_next);
|
||||
}
|
||||
t0 = tcg_const_i32(EXCP_DEBUG);
|
||||
gen_helper_raise_exception(cpu_env, t0);
|
||||
|
@ -349,7 +349,7 @@ static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error)
|
|||
/* Stop translation */
|
||||
static inline void gen_stop_exception(DisasContext *ctx)
|
||||
{
|
||||
gen_update_nip(ctx, ctx->nip);
|
||||
gen_update_nip(ctx, ctx->base.pc_next);
|
||||
ctx->exception = POWERPC_EXCP_STOP;
|
||||
}
|
||||
|
||||
|
@ -978,7 +978,7 @@ static void gen_addpcis(DisasContext *ctx)
|
|||
{
|
||||
target_long d = DX(ctx->opcode);
|
||||
|
||||
tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], ctx->nip + (d << 16));
|
||||
tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], ctx->base.pc_next + (d << 16));
|
||||
}
|
||||
|
||||
static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
|
||||
|
@ -1580,7 +1580,7 @@ static void gen_pause(DisasContext *ctx)
|
|||
tcg_temp_free_i32(t0);
|
||||
|
||||
/* Stop translation, this gives other CPUs a chance to run */
|
||||
gen_exception_nip(ctx, EXCP_HLT, ctx->nip);
|
||||
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
|
||||
}
|
||||
#endif /* defined(TARGET_PPC64) */
|
||||
|
||||
|
@ -2397,7 +2397,7 @@ static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask)
|
|||
tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
|
||||
t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
|
||||
t2 = tcg_const_i32(ctx->opcode & 0x03FF0000);
|
||||
gen_update_nip(ctx, ctx->nip - 4);
|
||||
gen_update_nip(ctx, ctx->base.pc_next - 4);
|
||||
gen_helper_raise_exception_err(cpu_env, t1, t2);
|
||||
tcg_temp_free_i32(t1);
|
||||
tcg_temp_free_i32(t2);
|
||||
|
@ -3322,7 +3322,7 @@ static void gen_wait(DisasContext *ctx)
|
|||
-offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
|
||||
tcg_temp_free_i32(t0);
|
||||
/* Stop translation, as the CPU is supposed to sleep from now */
|
||||
gen_exception_nip(ctx, EXCP_HLT, ctx->nip);
|
||||
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
|
||||
}
|
||||
|
||||
#if defined(TARGET_PPC64)
|
||||
|
@ -3407,7 +3407,7 @@ static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
|
|||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
|
||||
return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
|
@ -3422,7 +3422,7 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
|
|||
if (use_goto_tb(ctx, dest)) {
|
||||
tcg_gen_goto_tb(n);
|
||||
tcg_gen_movi_tl(cpu_nip, dest & ~3);
|
||||
tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
|
||||
tcg_gen_exit_tb((uintptr_t)ctx->base.tb + n);
|
||||
} else {
|
||||
tcg_gen_movi_tl(cpu_nip, dest & ~3);
|
||||
if (unlikely(ctx->singlestep_enabled)) {
|
||||
|
@ -3458,14 +3458,14 @@ static void gen_b(DisasContext *ctx)
|
|||
li = LI(ctx->opcode);
|
||||
li = (li ^ 0x02000000) - 0x02000000;
|
||||
if (likely(AA(ctx->opcode) == 0)) {
|
||||
target = ctx->nip + li - 4;
|
||||
target = ctx->base.pc_next + li - 4;
|
||||
} else {
|
||||
target = li;
|
||||
}
|
||||
if (LK(ctx->opcode)) {
|
||||
gen_setlr(ctx, ctx->nip);
|
||||
gen_setlr(ctx, ctx->base.pc_next);
|
||||
}
|
||||
gen_update_cfar(ctx, ctx->nip - 4);
|
||||
gen_update_cfar(ctx, ctx->base.pc_next - 4);
|
||||
gen_goto_tb(ctx, 0, target);
|
||||
}
|
||||
|
||||
|
@ -3493,7 +3493,7 @@ static void gen_bcond(DisasContext *ctx, int type)
|
|||
target = NULL;
|
||||
}
|
||||
if (LK(ctx->opcode))
|
||||
gen_setlr(ctx, ctx->nip);
|
||||
gen_setlr(ctx, ctx->base.pc_next);
|
||||
l1 = gen_new_label();
|
||||
if ((bo & 0x4) == 0) {
|
||||
/* Decrement and test CTR */
|
||||
|
@ -3530,11 +3530,11 @@ static void gen_bcond(DisasContext *ctx, int type)
|
|||
}
|
||||
tcg_temp_free_i32(temp);
|
||||
}
|
||||
gen_update_cfar(ctx, ctx->nip - 4);
|
||||
gen_update_cfar(ctx, ctx->base.pc_next - 4);
|
||||
if (type == BCOND_IM) {
|
||||
target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
|
||||
if (likely(AA(ctx->opcode) == 0)) {
|
||||
gen_goto_tb(ctx, 0, ctx->nip + li - 4);
|
||||
gen_goto_tb(ctx, 0, ctx->base.pc_next + li - 4);
|
||||
} else {
|
||||
gen_goto_tb(ctx, 0, li);
|
||||
}
|
||||
|
@ -3549,7 +3549,7 @@ static void gen_bcond(DisasContext *ctx, int type)
|
|||
}
|
||||
if ((bo & 0x14) != 0x14) {
|
||||
gen_set_label(l1);
|
||||
gen_goto_tb(ctx, 1, ctx->nip);
|
||||
gen_goto_tb(ctx, 1, ctx->base.pc_next);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3645,7 +3645,7 @@ static void gen_rfi(DisasContext *ctx)
|
|||
}
|
||||
/* Restore CPU state */
|
||||
CHK_SV;
|
||||
gen_update_cfar(ctx, ctx->nip - 4);
|
||||
gen_update_cfar(ctx, ctx->base.pc_next - 4);
|
||||
gen_helper_rfi(cpu_env);
|
||||
gen_sync_exception(ctx);
|
||||
#endif
|
||||
|
@ -3659,7 +3659,7 @@ static void gen_rfid(DisasContext *ctx)
|
|||
#else
|
||||
/* Restore CPU state */
|
||||
CHK_SV;
|
||||
gen_update_cfar(ctx, ctx->nip - 4);
|
||||
gen_update_cfar(ctx, ctx->base.pc_next - 4);
|
||||
gen_helper_rfid(cpu_env);
|
||||
gen_sync_exception(ctx);
|
||||
#endif
|
||||
|
@ -3934,10 +3934,11 @@ static inline void gen_op_mfspr(DisasContext *ctx)
|
|||
*/
|
||||
if (sprn != SPR_PVR) {
|
||||
fprintf(stderr, "Trying to read privileged spr %d (0x%03x) at "
|
||||
TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
|
||||
TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4);
|
||||
if (qemu_log_separate()) {
|
||||
qemu_log("Trying to read privileged spr %d (0x%03x) at "
|
||||
TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
|
||||
TARGET_FMT_lx "\n", sprn, sprn,
|
||||
ctx->base.pc_next - 4);
|
||||
}
|
||||
}
|
||||
gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
|
||||
|
@ -3951,10 +3952,10 @@ static inline void gen_op_mfspr(DisasContext *ctx)
|
|||
}
|
||||
/* Not defined */
|
||||
fprintf(stderr, "Trying to read invalid spr %d (0x%03x) at "
|
||||
TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
|
||||
TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4);
|
||||
if (qemu_log_separate()) {
|
||||
qemu_log("Trying to read invalid spr %d (0x%03x) at "
|
||||
TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
|
||||
TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4);
|
||||
}
|
||||
|
||||
/* The behaviour depends on MSR:PR and SPR# bit 0x10,
|
||||
|
@ -4030,7 +4031,7 @@ static void gen_mtmsrd(DisasContext *ctx)
|
|||
* if we enter power saving mode, we will exit the loop
|
||||
* directly from ppc_store_msr
|
||||
*/
|
||||
gen_update_nip(ctx, ctx->nip);
|
||||
gen_update_nip(ctx, ctx->base.pc_next);
|
||||
gen_helper_store_msr(cpu_env, cpu_gpr[rS(ctx->opcode)]);
|
||||
/* Must stop the translation as machine state (may have) changed */
|
||||
/* Note that mtmsr is not always defined as context-synchronizing */
|
||||
|
@ -4059,7 +4060,7 @@ static void gen_mtmsr(DisasContext *ctx)
|
|||
* if we enter power saving mode, we will exit the loop
|
||||
* directly from ppc_store_msr
|
||||
*/
|
||||
gen_update_nip(ctx, ctx->nip);
|
||||
gen_update_nip(ctx, ctx->base.pc_next);
|
||||
#if defined(TARGET_PPC64)
|
||||
tcg_gen_deposit_tl(msr, cpu_msr, cpu_gpr[rS(ctx->opcode)], 0, 32);
|
||||
#else
|
||||
|
@ -4097,10 +4098,10 @@ static void gen_mtspr(DisasContext *ctx)
|
|||
} else {
|
||||
/* Privilege exception */
|
||||
fprintf(stderr, "Trying to write privileged spr %d (0x%03x) at "
|
||||
TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
|
||||
TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4);
|
||||
if (qemu_log_separate()) {
|
||||
qemu_log("Trying to write privileged spr %d (0x%03x) at "
|
||||
TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
|
||||
TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4);
|
||||
}
|
||||
gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
|
||||
}
|
||||
|
@ -4115,10 +4116,10 @@ static void gen_mtspr(DisasContext *ctx)
|
|||
/* Not defined */
|
||||
if (qemu_log_separate()) {
|
||||
qemu_log("Trying to write invalid spr %d (0x%03x) at "
|
||||
TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
|
||||
TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4);
|
||||
}
|
||||
fprintf(stderr, "Trying to write invalid spr %d (0x%03x) at "
|
||||
TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
|
||||
TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4);
|
||||
|
||||
|
||||
/* The behaviour depends on MSR:PR and SPR# bit 0x10,
|
||||
|
@ -7206,213 +7207,222 @@ void ppc_cpu_dump_statistics(CPUState *cs, FILE*f,
|
|||
#endif
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||
static int ppc_tr_init_disas_context(DisasContextBase *dcbase,
|
||||
CPUState *cs, int max_insns)
|
||||
{
|
||||
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||
CPUPPCState *env = cs->env_ptr;
|
||||
DisasContext ctx, *ctxp = &ctx;
|
||||
opc_handler_t **table, *handler;
|
||||
target_ulong pc_start;
|
||||
int num_insns;
|
||||
int max_insns;
|
||||
int bound;
|
||||
|
||||
pc_start = tb->pc;
|
||||
ctx.nip = pc_start;
|
||||
ctx.tb = tb;
|
||||
ctx.exception = POWERPC_EXCP_NONE;
|
||||
ctx.spr_cb = env->spr_cb;
|
||||
ctx.pr = msr_pr;
|
||||
ctx.mem_idx = env->dmmu_idx;
|
||||
ctx.dr = msr_dr;
|
||||
ctx->exception = POWERPC_EXCP_NONE;
|
||||
ctx->spr_cb = env->spr_cb;
|
||||
ctx->pr = msr_pr;
|
||||
ctx->mem_idx = env->dmmu_idx;
|
||||
ctx->dr = msr_dr;
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
ctx.hv = msr_hv || !env->has_hv_mode;
|
||||
ctx->hv = msr_hv || !env->has_hv_mode;
|
||||
#endif
|
||||
ctx.insns_flags = env->insns_flags;
|
||||
ctx.insns_flags2 = env->insns_flags2;
|
||||
ctx.access_type = -1;
|
||||
ctx.need_access_type = !(env->mmu_model & POWERPC_MMU_64B);
|
||||
ctx.le_mode = !!(env->hflags & (1 << MSR_LE));
|
||||
ctx.default_tcg_memop_mask = ctx.le_mode ? MO_LE : MO_BE;
|
||||
ctx->insns_flags = env->insns_flags;
|
||||
ctx->insns_flags2 = env->insns_flags2;
|
||||
ctx->access_type = -1;
|
||||
ctx->need_access_type = !(env->mmu_model & POWERPC_MMU_64B);
|
||||
ctx->le_mode = !!(env->hflags & (1 << MSR_LE));
|
||||
ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE;
|
||||
#if defined(TARGET_PPC64)
|
||||
ctx.sf_mode = msr_is_64bit(env, env->msr);
|
||||
ctx.has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
|
||||
ctx->sf_mode = msr_is_64bit(env, env->msr);
|
||||
ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
|
||||
#endif
|
||||
if (env->mmu_model == POWERPC_MMU_32B ||
|
||||
env->mmu_model == POWERPC_MMU_601 ||
|
||||
(env->mmu_model & POWERPC_MMU_64B))
|
||||
ctx.lazy_tlb_flush = true;
|
||||
ctx->lazy_tlb_flush = true;
|
||||
|
||||
ctx.fpu_enabled = !!msr_fp;
|
||||
ctx->fpu_enabled = !!msr_fp;
|
||||
if ((env->flags & POWERPC_FLAG_SPE) && msr_spe)
|
||||
ctx.spe_enabled = !!msr_spe;
|
||||
ctx->spe_enabled = !!msr_spe;
|
||||
else
|
||||
ctx.spe_enabled = false;
|
||||
ctx->spe_enabled = false;
|
||||
if ((env->flags & POWERPC_FLAG_VRE) && msr_vr)
|
||||
ctx.altivec_enabled = !!msr_vr;
|
||||
ctx->altivec_enabled = !!msr_vr;
|
||||
else
|
||||
ctx.altivec_enabled = false;
|
||||
ctx->altivec_enabled = false;
|
||||
if ((env->flags & POWERPC_FLAG_VSX) && msr_vsx) {
|
||||
ctx.vsx_enabled = !!msr_vsx;
|
||||
ctx->vsx_enabled = !!msr_vsx;
|
||||
} else {
|
||||
ctx.vsx_enabled = false;
|
||||
ctx->vsx_enabled = false;
|
||||
}
|
||||
#if defined(TARGET_PPC64)
|
||||
if ((env->flags & POWERPC_FLAG_TM) && msr_tm) {
|
||||
ctx.tm_enabled = !!msr_tm;
|
||||
ctx->tm_enabled = !!msr_tm;
|
||||
} else {
|
||||
ctx.tm_enabled = false;
|
||||
ctx->tm_enabled = false;
|
||||
}
|
||||
#endif
|
||||
ctx.gtse = !!(env->spr[SPR_LPCR] & LPCR_GTSE);
|
||||
ctx->gtse = !!(env->spr[SPR_LPCR] & LPCR_GTSE);
|
||||
if ((env->flags & POWERPC_FLAG_SE) && msr_se)
|
||||
ctx.singlestep_enabled = CPU_SINGLE_STEP;
|
||||
ctx->singlestep_enabled = CPU_SINGLE_STEP;
|
||||
else
|
||||
ctx.singlestep_enabled = 0;
|
||||
ctx->singlestep_enabled = 0;
|
||||
if ((env->flags & POWERPC_FLAG_BE) && msr_be)
|
||||
ctx.singlestep_enabled |= CPU_BRANCH_STEP;
|
||||
if (unlikely(cs->singlestep_enabled)) {
|
||||
ctx.singlestep_enabled |= GDBSTUB_SINGLE_STEP;
|
||||
ctx->singlestep_enabled |= CPU_BRANCH_STEP;
|
||||
if (unlikely(ctx->base.singlestep_enabled)) {
|
||||
ctx->singlestep_enabled |= GDBSTUB_SINGLE_STEP;
|
||||
}
|
||||
#if defined (DO_SINGLE_STEP) && 0
|
||||
/* Single step trace mode */
|
||||
msr_se = 1;
|
||||
#endif
|
||||
num_insns = 0;
|
||||
max_insns = tb_cflags(tb) & CF_COUNT_MASK;
|
||||
if (max_insns == 0) {
|
||||
max_insns = CF_COUNT_MASK;
|
||||
|
||||
bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
|
||||
return MIN(max_insns, bound);
|
||||
}
|
||||
|
||||
static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
|
||||
{
|
||||
}
|
||||
|
||||
static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
|
||||
{
|
||||
tcg_gen_insn_start(dcbase->pc_next);
|
||||
}
|
||||
|
||||
static bool ppc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
|
||||
const CPUBreakpoint *bp)
|
||||
{
|
||||
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||
|
||||
gen_debug_exception(ctx);
|
||||
/* The address covered by the breakpoint must be included in
|
||||
[tb->pc, tb->pc + tb->size) in order to for it to be
|
||||
properly cleared -- thus we increment the PC here so that
|
||||
the logic setting tb->size below does the right thing. */
|
||||
ctx->base.pc_next += 4;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
|
||||
{
|
||||
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||
CPUPPCState *env = cs->env_ptr;
|
||||
opc_handler_t **table, *handler;
|
||||
|
||||
LOG_DISAS("----------------\n");
|
||||
LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
|
||||
ctx->base.pc_next, ctx->mem_idx, (int)msr_ir);
|
||||
|
||||
if (unlikely(need_byteswap(ctx))) {
|
||||
ctx->opcode = bswap32(cpu_ldl_code(env, ctx->base.pc_next));
|
||||
} else {
|
||||
ctx->opcode = cpu_ldl_code(env, ctx->base.pc_next);
|
||||
}
|
||||
if (max_insns > TCG_MAX_INSNS) {
|
||||
max_insns = TCG_MAX_INSNS;
|
||||
}
|
||||
|
||||
gen_tb_start(tb);
|
||||
tcg_clear_temp_count();
|
||||
/* Set env in case of segfault during code fetch */
|
||||
while (ctx.exception == POWERPC_EXCP_NONE && !tcg_op_buf_full()) {
|
||||
tcg_gen_insn_start(ctx.nip);
|
||||
num_insns++;
|
||||
|
||||
if (unlikely(cpu_breakpoint_test(cs, ctx.nip, BP_ANY))) {
|
||||
gen_debug_exception(ctxp);
|
||||
/* The address covered by the breakpoint must be included in
|
||||
[tb->pc, tb->pc + tb->size) in order to for it to be
|
||||
properly cleared -- thus we increment the PC here so that
|
||||
the logic setting tb->size below does the right thing. */
|
||||
ctx.nip += 4;
|
||||
break;
|
||||
}
|
||||
|
||||
LOG_DISAS("----------------\n");
|
||||
LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
|
||||
ctx.nip, ctx.mem_idx, (int)msr_ir);
|
||||
if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO))
|
||||
gen_io_start();
|
||||
if (unlikely(need_byteswap(&ctx))) {
|
||||
ctx.opcode = bswap32(cpu_ldl_code(env, ctx.nip));
|
||||
} else {
|
||||
ctx.opcode = cpu_ldl_code(env, ctx.nip);
|
||||
}
|
||||
LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n",
|
||||
ctx.opcode, opc1(ctx.opcode), opc2(ctx.opcode),
|
||||
opc3(ctx.opcode), opc4(ctx.opcode),
|
||||
ctx.le_mode ? "little" : "big");
|
||||
ctx.nip += 4;
|
||||
table = env->opcodes;
|
||||
handler = table[opc1(ctx.opcode)];
|
||||
LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n",
|
||||
ctx->opcode, opc1(ctx->opcode), opc2(ctx->opcode),
|
||||
opc3(ctx->opcode), opc4(ctx->opcode),
|
||||
ctx->le_mode ? "little" : "big");
|
||||
ctx->base.pc_next += 4;
|
||||
table = env->opcodes;
|
||||
handler = table[opc1(ctx->opcode)];
|
||||
if (is_indirect_opcode(handler)) {
|
||||
table = ind_table(handler);
|
||||
handler = table[opc2(ctx->opcode)];
|
||||
if (is_indirect_opcode(handler)) {
|
||||
table = ind_table(handler);
|
||||
handler = table[opc2(ctx.opcode)];
|
||||
handler = table[opc3(ctx->opcode)];
|
||||
if (is_indirect_opcode(handler)) {
|
||||
table = ind_table(handler);
|
||||
handler = table[opc3(ctx.opcode)];
|
||||
if (is_indirect_opcode(handler)) {
|
||||
table = ind_table(handler);
|
||||
handler = table[opc4(ctx.opcode)];
|
||||
}
|
||||
handler = table[opc4(ctx->opcode)];
|
||||
}
|
||||
}
|
||||
/* Is opcode *REALLY* valid ? */
|
||||
if (unlikely(handler->handler == &gen_invalid)) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: "
|
||||
"%02x - %02x - %02x - %02x (%08x) "
|
||||
TARGET_FMT_lx " %d\n",
|
||||
opc1(ctx.opcode), opc2(ctx.opcode),
|
||||
opc3(ctx.opcode), opc4(ctx.opcode),
|
||||
ctx.opcode, ctx.nip - 4, (int)msr_ir);
|
||||
} else {
|
||||
uint32_t inval;
|
||||
|
||||
if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE) && Rc(ctx.opcode))) {
|
||||
inval = handler->inval2;
|
||||
} else {
|
||||
inval = handler->inval1;
|
||||
}
|
||||
|
||||
if (unlikely((ctx.opcode & inval) != 0)) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: "
|
||||
"%02x - %02x - %02x - %02x (%08x) "
|
||||
TARGET_FMT_lx "\n", ctx.opcode & inval,
|
||||
opc1(ctx.opcode), opc2(ctx.opcode),
|
||||
opc3(ctx.opcode), opc4(ctx.opcode),
|
||||
ctx.opcode, ctx.nip - 4);
|
||||
gen_inval_exception(ctxp, POWERPC_EXCP_INVAL_INVAL);
|
||||
break;
|
||||
}
|
||||
}
|
||||
(*(handler->handler))(&ctx);
|
||||
#if defined(DO_PPC_STATISTICS)
|
||||
handler->count++;
|
||||
#endif
|
||||
/* Check trace mode exceptions */
|
||||
if (unlikely(ctx.singlestep_enabled & CPU_SINGLE_STEP &&
|
||||
(ctx.nip <= 0x100 || ctx.nip > 0xF00) &&
|
||||
ctx.exception != POWERPC_SYSCALL &&
|
||||
ctx.exception != POWERPC_EXCP_TRAP &&
|
||||
ctx.exception != POWERPC_EXCP_BRANCH)) {
|
||||
gen_exception_nip(ctxp, POWERPC_EXCP_TRACE, ctx.nip);
|
||||
} else if (unlikely(((ctx.nip & (TARGET_PAGE_SIZE - 1)) == 0) ||
|
||||
(cs->singlestep_enabled) ||
|
||||
singlestep ||
|
||||
num_insns >= max_insns)) {
|
||||
/* if we reach a page boundary or are single stepping, stop
|
||||
* generation
|
||||
*/
|
||||
break;
|
||||
}
|
||||
if (tcg_check_temp_count()) {
|
||||
fprintf(stderr, "Opcode %02x %02x %02x %02x (%08x) leaked "
|
||||
"temporaries\n", opc1(ctx.opcode), opc2(ctx.opcode),
|
||||
opc3(ctx.opcode), opc4(ctx.opcode), ctx.opcode);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
if (tb_cflags(tb) & CF_LAST_IO)
|
||||
gen_io_end();
|
||||
if (ctx.exception == POWERPC_EXCP_NONE) {
|
||||
gen_goto_tb(&ctx, 0, ctx.nip);
|
||||
} else if (ctx.exception != POWERPC_EXCP_BRANCH) {
|
||||
if (unlikely(cs->singlestep_enabled)) {
|
||||
gen_debug_exception(ctxp);
|
||||
/* Is opcode *REALLY* valid ? */
|
||||
if (unlikely(handler->handler == &gen_invalid)) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: "
|
||||
"%02x - %02x - %02x - %02x (%08x) "
|
||||
TARGET_FMT_lx " %d\n",
|
||||
opc1(ctx->opcode), opc2(ctx->opcode),
|
||||
opc3(ctx->opcode), opc4(ctx->opcode),
|
||||
ctx->opcode, ctx->base.pc_next - 4, (int)msr_ir);
|
||||
} else {
|
||||
uint32_t inval;
|
||||
|
||||
if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE)
|
||||
&& Rc(ctx->opcode))) {
|
||||
inval = handler->inval2;
|
||||
} else {
|
||||
inval = handler->inval1;
|
||||
}
|
||||
|
||||
if (unlikely((ctx->opcode & inval) != 0)) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: "
|
||||
"%02x - %02x - %02x - %02x (%08x) "
|
||||
TARGET_FMT_lx "\n", ctx->opcode & inval,
|
||||
opc1(ctx->opcode), opc2(ctx->opcode),
|
||||
opc3(ctx->opcode), opc4(ctx->opcode),
|
||||
ctx->opcode, ctx->base.pc_next - 4);
|
||||
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
|
||||
ctx->base.is_jmp = DISAS_NORETURN;
|
||||
return;
|
||||
}
|
||||
}
|
||||
(*(handler->handler))(ctx);
|
||||
#if defined(DO_PPC_STATISTICS)
|
||||
handler->count++;
|
||||
#endif
|
||||
/* Check trace mode exceptions */
|
||||
if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP &&
|
||||
(ctx->base.pc_next <= 0x100 || ctx->base.pc_next > 0xF00) &&
|
||||
ctx->exception != POWERPC_SYSCALL &&
|
||||
ctx->exception != POWERPC_EXCP_TRAP &&
|
||||
ctx->exception != POWERPC_EXCP_BRANCH)) {
|
||||
gen_exception_nip(ctx, POWERPC_EXCP_TRACE, ctx->base.pc_next);
|
||||
}
|
||||
|
||||
if (tcg_check_temp_count()) {
|
||||
qemu_log("Opcode %02x %02x %02x %02x (%08x) leaked "
|
||||
"temporaries\n", opc1(ctx->opcode), opc2(ctx->opcode),
|
||||
opc3(ctx->opcode), opc4(ctx->opcode), ctx->opcode);
|
||||
}
|
||||
|
||||
ctx->base.is_jmp = ctx->exception == POWERPC_EXCP_NONE ?
|
||||
DISAS_NEXT : DISAS_NORETURN;
|
||||
}
|
||||
|
||||
static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
|
||||
{
|
||||
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||
|
||||
if (ctx->exception == POWERPC_EXCP_NONE) {
|
||||
gen_goto_tb(ctx, 0, ctx->base.pc_next);
|
||||
} else if (ctx->exception != POWERPC_EXCP_BRANCH) {
|
||||
if (unlikely(ctx->base.singlestep_enabled)) {
|
||||
gen_debug_exception(ctx);
|
||||
}
|
||||
/* Generate the return instruction */
|
||||
tcg_gen_exit_tb(0);
|
||||
}
|
||||
gen_tb_end(tb, num_insns);
|
||||
}
|
||||
|
||||
tb->size = ctx.nip - pc_start;
|
||||
tb->icount = num_insns;
|
||||
static void ppc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
|
||||
{
|
||||
qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
|
||||
log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
|
||||
}
|
||||
|
||||
#if defined(DEBUG_DISAS)
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
|
||||
&& qemu_log_in_addr_range(pc_start)) {
|
||||
qemu_log_lock();
|
||||
qemu_log("IN: %s\n", lookup_symbol(pc_start));
|
||||
log_target_disas(cs, pc_start, ctx.nip - pc_start);
|
||||
qemu_log("\n");
|
||||
qemu_log_unlock();
|
||||
}
|
||||
#endif
|
||||
static const TranslatorOps ppc_tr_ops = {
|
||||
.init_disas_context = ppc_tr_init_disas_context,
|
||||
.tb_start = ppc_tr_tb_start,
|
||||
.insn_start = ppc_tr_insn_start,
|
||||
.breakpoint_check = ppc_tr_breakpoint_check,
|
||||
.translate_insn = ppc_tr_translate_insn,
|
||||
.tb_stop = ppc_tr_tb_stop,
|
||||
.disas_log = ppc_tr_disas_log,
|
||||
};
|
||||
|
||||
void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||
{
|
||||
DisasContext ctx;
|
||||
|
||||
translator_loop(&ppc_tr_ops, &ctx.base, cs, tb);
|
||||
}
|
||||
|
||||
void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb,
|
||||
|
|
|
@ -15,7 +15,7 @@ static void gen_##name(DisasContext *ctx) \
|
|||
gen_exception(ctx, POWERPC_EXCP_FPU); \
|
||||
return; \
|
||||
} \
|
||||
gen_update_nip(ctx, ctx->nip - 4); \
|
||||
gen_update_nip(ctx, ctx->base.pc_next - 4); \
|
||||
rd = gen_fprp_ptr(rD(ctx->opcode)); \
|
||||
ra = gen_fprp_ptr(rA(ctx->opcode)); \
|
||||
rb = gen_fprp_ptr(rB(ctx->opcode)); \
|
||||
|
@ -36,7 +36,7 @@ static void gen_##name(DisasContext *ctx) \
|
|||
gen_exception(ctx, POWERPC_EXCP_FPU); \
|
||||
return; \
|
||||
} \
|
||||
gen_update_nip(ctx, ctx->nip - 4); \
|
||||
gen_update_nip(ctx, ctx->base.pc_next - 4); \
|
||||
ra = gen_fprp_ptr(rA(ctx->opcode)); \
|
||||
rb = gen_fprp_ptr(rB(ctx->opcode)); \
|
||||
gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
|
||||
|
@ -54,7 +54,7 @@ static void gen_##name(DisasContext *ctx) \
|
|||
gen_exception(ctx, POWERPC_EXCP_FPU); \
|
||||
return; \
|
||||
} \
|
||||
gen_update_nip(ctx, ctx->nip - 4); \
|
||||
gen_update_nip(ctx, ctx->base.pc_next - 4); \
|
||||
uim = tcg_const_i32(UIMM5(ctx->opcode)); \
|
||||
rb = gen_fprp_ptr(rB(ctx->opcode)); \
|
||||
gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
|
||||
|
@ -72,7 +72,7 @@ static void gen_##name(DisasContext *ctx) \
|
|||
gen_exception(ctx, POWERPC_EXCP_FPU); \
|
||||
return; \
|
||||
} \
|
||||
gen_update_nip(ctx, ctx->nip - 4); \
|
||||
gen_update_nip(ctx, ctx->base.pc_next - 4); \
|
||||
ra = gen_fprp_ptr(rA(ctx->opcode)); \
|
||||
dcm = tcg_const_i32(DCM(ctx->opcode)); \
|
||||
gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
|
||||
|
@ -90,7 +90,7 @@ static void gen_##name(DisasContext *ctx) \
|
|||
gen_exception(ctx, POWERPC_EXCP_FPU); \
|
||||
return; \
|
||||
} \
|
||||
gen_update_nip(ctx, ctx->nip - 4); \
|
||||
gen_update_nip(ctx, ctx->base.pc_next - 4); \
|
||||
rt = gen_fprp_ptr(rD(ctx->opcode)); \
|
||||
rb = gen_fprp_ptr(rB(ctx->opcode)); \
|
||||
u32_1 = tcg_const_i32(u32f1(ctx->opcode)); \
|
||||
|
@ -114,7 +114,7 @@ static void gen_##name(DisasContext *ctx) \
|
|||
gen_exception(ctx, POWERPC_EXCP_FPU); \
|
||||
return; \
|
||||
} \
|
||||
gen_update_nip(ctx, ctx->nip - 4); \
|
||||
gen_update_nip(ctx, ctx->base.pc_next - 4); \
|
||||
rt = gen_fprp_ptr(rD(ctx->opcode)); \
|
||||
ra = gen_fprp_ptr(rA(ctx->opcode)); \
|
||||
rb = gen_fprp_ptr(rB(ctx->opcode)); \
|
||||
|
@ -137,7 +137,7 @@ static void gen_##name(DisasContext *ctx) \
|
|||
gen_exception(ctx, POWERPC_EXCP_FPU); \
|
||||
return; \
|
||||
} \
|
||||
gen_update_nip(ctx, ctx->nip - 4); \
|
||||
gen_update_nip(ctx, ctx->base.pc_next - 4); \
|
||||
rt = gen_fprp_ptr(rD(ctx->opcode)); \
|
||||
rb = gen_fprp_ptr(rB(ctx->opcode)); \
|
||||
gen_helper_##name(cpu_env, rt, rb); \
|
||||
|
@ -157,7 +157,7 @@ static void gen_##name(DisasContext *ctx) \
|
|||
gen_exception(ctx, POWERPC_EXCP_FPU); \
|
||||
return; \
|
||||
} \
|
||||
gen_update_nip(ctx, ctx->nip - 4); \
|
||||
gen_update_nip(ctx, ctx->base.pc_next - 4); \
|
||||
rt = gen_fprp_ptr(rD(ctx->opcode)); \
|
||||
rs = gen_fprp_ptr(fprfld(ctx->opcode)); \
|
||||
i32 = tcg_const_i32(i32fld(ctx->opcode)); \
|
||||
|
|
|
@ -179,11 +179,11 @@ static void spr_write_ureg(DisasContext *ctx, int sprn, int gprn)
|
|||
#if !defined(CONFIG_USER_ONLY)
|
||||
static void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
|
||||
{
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_start();
|
||||
}
|
||||
gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_stop_exception(ctx);
|
||||
}
|
||||
|
@ -191,11 +191,11 @@ static void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
|
|||
|
||||
static void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
|
||||
{
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_start();
|
||||
}
|
||||
gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_stop_exception(ctx);
|
||||
}
|
||||
|
@ -206,11 +206,11 @@ static void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
|
|||
/* Time base */
|
||||
static void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
|
||||
{
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_start();
|
||||
}
|
||||
gen_helper_load_tbl(cpu_gpr[gprn], cpu_env);
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_stop_exception(ctx);
|
||||
}
|
||||
|
@ -218,11 +218,11 @@ static void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
|
|||
|
||||
static void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
|
||||
{
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_start();
|
||||
}
|
||||
gen_helper_load_tbu(cpu_gpr[gprn], cpu_env);
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_stop_exception(ctx);
|
||||
}
|
||||
|
@ -243,11 +243,11 @@ static void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
|
|||
#if !defined(CONFIG_USER_ONLY)
|
||||
static void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
|
||||
{
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_start();
|
||||
}
|
||||
gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]);
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_stop_exception(ctx);
|
||||
}
|
||||
|
@ -255,11 +255,11 @@ static void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
|
|||
|
||||
static void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
|
||||
{
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_start();
|
||||
}
|
||||
gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]);
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_stop_exception(ctx);
|
||||
}
|
||||
|
@ -287,11 +287,11 @@ static void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
|
|||
/* HDECR */
|
||||
static void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
|
||||
{
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_start();
|
||||
}
|
||||
gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_stop_exception(ctx);
|
||||
}
|
||||
|
@ -299,11 +299,11 @@ static void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
|
|||
|
||||
static void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
|
||||
{
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_start();
|
||||
}
|
||||
gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
|
||||
if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
|
||||
gen_io_end();
|
||||
gen_stop_exception(ctx);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue