trivial patches for 2024-10-04

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEZKoqtTHVaQM2a/75gqpKJDselHgFAmcAEU0ACgkQgqpKJDse
 lHgJlhAAmDwxXcHIeNgyOxSmopgKC5VKmux1qvi3PNmM46CGYTDG3s4MIUIRPNhi
 zoLQhSdjcFNQi133WoXAWZInTwYCeEe4JbWev7bTDZxoJvZFss6P/DhmSY7tCnaf
 QU+XeNl86Iy28glZjiL9EFZi7SM9+OWVF5Dqxd2NlCNA6OlnAtHoVp3bHUqkVgr1
 Lhq+0GRsxhU9bg3eO+yGXVquuOtSMa5LjEqP6kUe6ajo1E4/+GqO9hvfaj8K35Da
 B5wa39/MnSN0alnNS8rJUJXxBp2hZt8VamntL86v4kMLQCVGR+KL5FmApZzxzM/r
 fY8Ky4b5w8U0BDXnwCcr3A2bYlurC7FhDgBJw3YCQNwbxQbbG7PfbMATD86nfZPd
 HTjDjn874reGXgdXt15+3q1zm8kDylMZxEJpRdmsB+uYFVDlNCimPcCPe1YSjVcW
 AR5/NubrigpuX8qM5tSiLhjoeAZ0vQjoapGs5zi2dQtg4MltRgi32HPIRq3ooUUg
 T2XBhDUElrwwftGQuDN6Vt5Z0EQPP6HDoFLz0VhzWvlsR5DOLjxK4oLsDmQoV34n
 9I4wSmFzwX0Vy0QJIjL6LFec/Ky8uO7QAX5PCLcsEnZy+/q2GsSdwXCPT+SPJ1AL
 bEfqLw7U6CSv/eiGOpScCnCLENrw3GXrN31SqtOtgxDPj2lVEsU=
 =HYZX
 -----END PGP SIGNATURE-----

Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging

trivial patches for 2024-10-04

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCgAdFiEEZKoqtTHVaQM2a/75gqpKJDselHgFAmcAEU0ACgkQgqpKJDse
# lHgJlhAAmDwxXcHIeNgyOxSmopgKC5VKmux1qvi3PNmM46CGYTDG3s4MIUIRPNhi
# zoLQhSdjcFNQi133WoXAWZInTwYCeEe4JbWev7bTDZxoJvZFss6P/DhmSY7tCnaf
# QU+XeNl86Iy28glZjiL9EFZi7SM9+OWVF5Dqxd2NlCNA6OlnAtHoVp3bHUqkVgr1
# Lhq+0GRsxhU9bg3eO+yGXVquuOtSMa5LjEqP6kUe6ajo1E4/+GqO9hvfaj8K35Da
# B5wa39/MnSN0alnNS8rJUJXxBp2hZt8VamntL86v4kMLQCVGR+KL5FmApZzxzM/r
# fY8Ky4b5w8U0BDXnwCcr3A2bYlurC7FhDgBJw3YCQNwbxQbbG7PfbMATD86nfZPd
# HTjDjn874reGXgdXt15+3q1zm8kDylMZxEJpRdmsB+uYFVDlNCimPcCPe1YSjVcW
# AR5/NubrigpuX8qM5tSiLhjoeAZ0vQjoapGs5zi2dQtg4MltRgi32HPIRq3ooUUg
# T2XBhDUElrwwftGQuDN6Vt5Z0EQPP6HDoFLz0VhzWvlsR5DOLjxK4oLsDmQoV34n
# 9I4wSmFzwX0Vy0QJIjL6LFec/Ky8uO7QAX5PCLcsEnZy+/q2GsSdwXCPT+SPJ1AL
# bEfqLw7U6CSv/eiGOpScCnCLENrw3GXrN31SqtOtgxDPj2lVEsU=
# =HYZX
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 04 Oct 2024 17:01:17 BST
# gpg:                using RSA key 64AA2AB531D56903366BFEF982AA4A243B1E9478
# gpg: Good signature from "Michael Tokarev <mjt@debian.org>" [full]
# gpg:                 aka "Michael Tokarev <mjt@corpit.ru>" [full]
# gpg:                 aka "Michael Tokarev <mjt@tls.msk.ru>" [full]
# Primary key fingerprint: 9D8B E14E 3F2A 9DD7 9199  28F1 61AD 3D98 ECDF 2C8E
#      Subkey fingerprint: 64AA 2AB5 31D5 6903 366B  FEF9 82AA 4A24 3B1E 9478

* tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu: (23 commits)
  MAINTAINERS: Add myself as maintainer of e500 machines
  docs/devel: Mention post_load hook restrictions where we document the hook
  tests/functional: Fix hash validation
  hw/mips: Build fw_cfg.c once
  tests/tcg/plugins: Remove remainder of the cris target
  block-backend: Remove deadcode
  hw/net/rocker: Remove unused rocker_fp_ports
  hw/pci: Remove unused pcie_chassis_find_slot
  replay: Remove unused replay_disable_events
  remote: Remove unused remote_iohub_finalize
  vhost: Remove unused vhost_dev_{load|save}_inflight
  ui/cursor: remove cursor_get_mono_image
  hw: Remove unused fw_cfg_init_io
  linux-user: Remove unused handle_vm86_fault
  hw/char: Remove unused serial_set_frequency
  hw/net/net_rx_pkt: Remove deadcode
  net: Remove deadcode
  q35: Remove unused mch_mcfg_base
  hw/xen: Remove deadcode
  MAINTAINERS: remove gensyscalls.sh from the linux-user section
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-10-04 17:08:01 +01:00
commit a3fb4e93a3
38 changed files with 22 additions and 465 deletions

View File

@ -1378,8 +1378,9 @@ F: hw/pci-host/ppc4xx_pci.c
F: tests/functional/test_ppc_bamboo.py
e500
M: Bernhard Beschow <shentey@gmail.com>
L: qemu-ppc@nongnu.org
S: Orphan
S: Odd Fixes
F: hw/ppc/e500*
F: hw/ppc/ppce500_spin.c
F: hw/gpio/mpc8xxx.c
@ -1395,8 +1396,9 @@ F: docs/system/ppc/ppce500.rst
F: tests/functional/test_ppc64_e500.py
mpc8544ds
M: Bernhard Beschow <shentey@gmail.com>
L: qemu-ppc@nongnu.org
S: Orphan
S: Odd Fixes
F: hw/ppc/mpc8544ds.c
F: hw/ppc/mpc8544_guts.c
F: tests/functional/test_ppc_mpc8544ds.py
@ -3685,7 +3687,6 @@ F: configs/targets/*linux-user.mak
F: scripts/qemu-binfmt-conf.sh
F: scripts/update-syscalltbl.sh
F: scripts/update-mips-syscall-args.sh
F: scripts/gensyscalls.sh
Tiny Code Generator (TCG)
-------------------------

View File

@ -853,15 +853,6 @@ BlockBackendPublic *blk_get_public(BlockBackend *blk)
return &blk->public;
}
/*
* Returns a BlockBackend given the associated @public fields.
*/
BlockBackend *blk_by_public(BlockBackendPublic *public)
{
GLOBAL_STATE_CODE();
return container_of(public, BlockBackend, public);
}
/*
* Disassociates the currently associated BlockDriverState from @blk.
*/
@ -1214,12 +1205,6 @@ BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
return blk->iostatus;
}
void blk_iostatus_disable(BlockBackend *blk)
{
GLOBAL_STATE_CODE();
blk->iostatus_enabled = false;
}
void blk_iostatus_reset(BlockBackend *blk)
{
GLOBAL_STATE_CODE();
@ -2228,28 +2213,6 @@ void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
blk->enable_write_cache = wce;
}
void blk_activate(BlockBackend *blk, Error **errp)
{
BlockDriverState *bs = blk_bs(blk);
GLOBAL_STATE_CODE();
if (!bs) {
error_setg(errp, "Device '%s' has no medium", blk->name);
return;
}
/*
* Migration code can call this function in coroutine context, so leave
* coroutine context if necessary.
*/
if (qemu_in_coroutine()) {
bdrv_co_activate(bs, errp);
} else {
GRAPH_RDLOCK_GUARD_MAINLOOP();
bdrv_activate(bs, errp);
}
}
bool coroutine_fn blk_co_is_inserted(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
@ -2380,36 +2343,6 @@ bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
return bdrv_op_is_blocked(bs, op, errp);
}
void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
{
BlockDriverState *bs = blk_bs(blk);
GLOBAL_STATE_CODE();
if (bs) {
bdrv_op_unblock(bs, op, reason);
}
}
void blk_op_block_all(BlockBackend *blk, Error *reason)
{
BlockDriverState *bs = blk_bs(blk);
GLOBAL_STATE_CODE();
if (bs) {
bdrv_op_block_all(bs, reason);
}
}
void blk_op_unblock_all(BlockBackend *blk, Error *reason)
{
BlockDriverState *bs = blk_bs(blk);
GLOBAL_STATE_CODE();
if (bs) {
bdrv_op_unblock_all(bs, reason);
}
}
/**
* Return BB's current AioContext. Note that this context may change
* concurrently at any time, with one exception: If the BB has a root node
@ -2564,12 +2497,6 @@ void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
notifier_list_add(&blk->remove_bs_notifiers, notify);
}
void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
{
GLOBAL_STATE_CODE();
notifier_list_add(&blk->insert_bs_notifiers, notify);
}
BlockAcctStats *blk_get_stats(BlockBackend *blk)
{
IO_CODE();

View File

@ -465,6 +465,12 @@ Examples of such API functions are:
- portio_list_set_address()
- portio_list_set_enabled()
Since the order of device save/restore is not defined, you must
avoid accessing or changing any other device's state in one of these
callbacks. (For instance, don't do anything that calls ``update_irq()``
in a ``post_load`` hook.) Otherwise, restore will not be deterministic,
and this will break execution record/replay.
Iterative device migration
--------------------------

View File

@ -202,6 +202,9 @@ into the log.
Saving/restoring the VM state
-----------------------------
Record/replay relies on VM state save and restore being complete and
deterministic.
All fields in the device state structure (including virtual timers)
should be restored by loadvm to the same values they had before savevm.

View File

@ -20,7 +20,6 @@
#include "qemu/log.h"
#include "qemu/error-report.h"
#include "qemu/lockable.h"
#include "exec/tswap.h"
#include "sysemu/runstate.h"
#include "trace.h"
#include "qapi/error.h"

View File

@ -951,13 +951,6 @@ static void serial_unrealize(DeviceState *dev)
qemu_unregister_reset(serial_reset, s);
}
/* Change the main reference oscillator frequency. */
void serial_set_frequency(SerialState *s, uint32_t frequency)
{
s->baudbase = frequency;
serial_update_parameters(s);
}
const MemoryRegionOps serial_io_ops = {
.read = serial_ioport_read,
.write = serial_ioport_write,

View File

@ -1,6 +1,6 @@
mips_ss = ss.source_set()
mips_ss.add(files('bootloader.c', 'mips_int.c'))
mips_ss.add(when: 'CONFIG_FW_CFG_MIPS', if_true: files('fw_cfg.c'))
common_ss.add(when: 'CONFIG_FW_CFG_MIPS', if_true: files('fw_cfg.c'))
mips_ss.add(when: 'CONFIG_LOONGSON3V', if_true: files('loongson3_bootp.c', 'loongson3_virt.c'))
mips_ss.add(when: 'CONFIG_MALTA', if_true: files('malta.c'))
mips_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('cps.c'))

View File

@ -209,12 +209,6 @@ void net_rx_pkt_get_protocols(struct NetRxPkt *pkt,
*l4hdr_proto = pkt->l4hdr_info.proto;
}
size_t net_rx_pkt_get_l3_hdr_offset(struct NetRxPkt *pkt)
{
assert(pkt);
return pkt->l3hdr_off;
}
size_t net_rx_pkt_get_l4_hdr_offset(struct NetRxPkt *pkt)
{
assert(pkt);
@ -426,13 +420,6 @@ struct iovec *net_rx_pkt_get_iovec(struct NetRxPkt *pkt)
return pkt->vec;
}
uint16_t net_rx_pkt_get_iovec_len(struct NetRxPkt *pkt)
{
assert(pkt);
return pkt->vec_len;
}
void net_rx_pkt_set_vhdr(struct NetRxPkt *pkt,
struct virtio_net_hdr *vhdr)
{

View File

@ -77,14 +77,6 @@ void net_rx_pkt_get_protocols(struct NetRxPkt *pkt,
bool *hasip4, bool *hasip6,
EthL4HdrProto *l4hdr_proto);
/**
* fetches L3 header offset
*
* @pkt: packet
*
*/
size_t net_rx_pkt_get_l3_hdr_offset(struct NetRxPkt *pkt);
/**
* fetches L4 header offset
*
@ -267,15 +259,6 @@ net_rx_pkt_attach_data(struct NetRxPkt *pkt, const void *data,
*/
struct iovec *net_rx_pkt_get_iovec(struct NetRxPkt *pkt);
/**
* returns io vector length that holds the attached data
*
* @pkt: packet
* @ret: IOVec length
*
*/
uint16_t net_rx_pkt_get_iovec_len(struct NetRxPkt *pkt);
/**
* prints rx packet data if debug is enabled
*

View File

@ -134,11 +134,6 @@ RockerPortList *qmp_query_rocker_ports(const char *name, Error **errp)
return list;
}
uint32_t rocker_fp_ports(Rocker *r)
{
return r->fp_ports;
}
static uint32_t rocker_get_pport_by_tx_ring(Rocker *r,
DescRing *ring)
{

View File

@ -72,7 +72,6 @@ DECLARE_INSTANCE_CHECKER(Rocker, ROCKER,
TYPE_ROCKER)
Rocker *rocker_find(const char *name);
uint32_t rocker_fp_ports(Rocker *r);
int rocker_event_link_changed(Rocker *r, uint32_t pport, bool link_up);
int rocker_event_mac_vlan_seen(Rocker *r, uint32_t pport, uint8_t *addr,
uint16_t vlan_id);

View File

@ -1171,11 +1171,6 @@ FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase,
return s;
}
FWCfgState *fw_cfg_init_io(uint32_t iobase)
{
return fw_cfg_init_io_dma(iobase, 0, NULL);
}
FWCfgState *fw_cfg_init_mem_wide(hwaddr ctl_addr,
hwaddr data_addr, uint32_t data_width,
hwaddr dma_addr, AddressSpace *dma_as)

View File

@ -662,16 +662,6 @@ static void mch_realize(PCIDevice *d, Error **errp)
OBJECT(&mch->smram));
}
uint64_t mch_mcfg_base(void)
{
bool ambiguous;
Object *o = object_resolve_path_type("", TYPE_MCH_PCI_DEVICE, &ambiguous);
if (!o) {
return 0;
}
return MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT;
}
static Property mch_props[] = {
DEFINE_PROP_UINT16("extended-tseg-mbytes", MCHPCIState, ext_tseg_mbytes,
16),

View File

@ -92,16 +92,6 @@ static PCIESlot *pcie_chassis_find_slot_with_chassis(struct PCIEChassis *c,
return s;
}
PCIESlot *pcie_chassis_find_slot(uint8_t chassis_number, uint16_t slot)
{
struct PCIEChassis *c;
c = pcie_chassis_find(chassis_number);
if (!c) {
return NULL;
}
return pcie_chassis_find_slot_with_chassis(c, slot);
}
int pcie_chassis_add_slot(struct PCIESlot *slot)
{
struct PCIEChassis *c;

View File

@ -33,19 +33,6 @@ void remote_iohub_init(RemoteIOHubState *iohub)
}
}
void remote_iohub_finalize(RemoteIOHubState *iohub)
{
int pirq;
for (pirq = 0; pirq < REMOTE_IOHUB_NB_PIRQS; pirq++) {
qemu_set_fd_handler(event_notifier_get_fd(&iohub->resamplefds[pirq]),
NULL, NULL, NULL);
event_notifier_cleanup(&iohub->irqfds[pirq]);
event_notifier_cleanup(&iohub->resamplefds[pirq]);
qemu_mutex_destroy(&iohub->irq_level_lock[pirq]);
}
}
int remote_iohub_map_irq(PCIDevice *pci_dev, int intx)
{
return pci_dev->devfn;

View File

@ -1930,62 +1930,6 @@ void vhost_dev_free_inflight(struct vhost_inflight *inflight)
}
}
static int vhost_dev_resize_inflight(struct vhost_inflight *inflight,
uint64_t new_size)
{
Error *err = NULL;
int fd = -1;
void *addr = qemu_memfd_alloc("vhost-inflight", new_size,
F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
&fd, &err);
if (err) {
error_report_err(err);
return -ENOMEM;
}
vhost_dev_free_inflight(inflight);
inflight->offset = 0;
inflight->addr = addr;
inflight->fd = fd;
inflight->size = new_size;
return 0;
}
void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f)
{
if (inflight->addr) {
qemu_put_be64(f, inflight->size);
qemu_put_be16(f, inflight->queue_size);
qemu_put_buffer(f, inflight->addr, inflight->size);
} else {
qemu_put_be64(f, 0);
}
}
int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f)
{
uint64_t size;
size = qemu_get_be64(f);
if (!size) {
return 0;
}
if (inflight->size != size) {
int ret = vhost_dev_resize_inflight(inflight, size);
if (ret < 0) {
return ret;
}
}
inflight->queue_size = qemu_get_be16(f);
qemu_get_buffer(f, inflight->addr, size);
return 0;
}
int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev)
{
int r;

View File

@ -93,8 +93,6 @@ struct SerialMM {
extern const VMStateDescription vmstate_serial;
extern const MemoryRegionOps serial_io_ops;
void serial_set_frequency(SerialState *s, uint32_t frequency);
#define TYPE_SERIAL "serial"
OBJECT_DECLARE_SIMPLE_TYPE(SerialState, SERIAL)

View File

@ -321,7 +321,6 @@ void fw_cfg_add_extra_pci_roots(PCIBus *bus, FWCfgState *s);
FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase,
AddressSpace *dma_as);
FWCfgState *fw_cfg_init_io(uint32_t iobase);
FWCfgState *fw_cfg_init_mem(hwaddr ctl_addr, hwaddr data_addr);
FWCfgState *fw_cfg_init_mem_wide(hwaddr ctl_addr,
hwaddr data_addr, uint32_t data_width,

View File

@ -181,8 +181,6 @@ struct Q35PCIHost {
#define MCH_PCIE_DEV 1
#define MCH_PCIE_FUNC 0
uint64_t mch_mcfg_base(void);
/*
* Arbitrary but unique BNF number for IOAPIC device.
*

View File

@ -72,7 +72,6 @@ struct PCIESlot {
};
void pcie_chassis_create(uint8_t chassis_number);
PCIESlot *pcie_chassis_find_slot(uint8_t chassis, uint16_t slot);
int pcie_chassis_add_slot(struct PCIESlot *slot);
void pcie_chassis_del_slot(PCIESlot *s);

View File

@ -37,6 +37,5 @@ void remote_iohub_set_irq(void *opaque, int pirq, int level);
void process_set_irqfd_msg(PCIDevice *pci_dev, MPQemuMsg *msg);
void remote_iohub_init(RemoteIOHubState *iohub);
void remote_iohub_finalize(RemoteIOHubState *iohub);
#endif

View File

@ -338,8 +338,6 @@ void vhost_virtqueue_stop(struct vhost_dev *dev, struct VirtIODevice *vdev,
void vhost_dev_reset_inflight(struct vhost_inflight *inflight);
void vhost_dev_free_inflight(struct vhost_inflight *inflight);
void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f);
int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f);
int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev);
int vhost_dev_set_inflight(struct vhost_dev *dev,
struct vhost_inflight *inflight);

View File

@ -172,9 +172,6 @@ ssize_t qemu_sendv_packet_async(NetClientState *nc, const struct iovec *iov,
int iovcnt, NetPacketSent *sent_cb);
ssize_t qemu_send_packet(NetClientState *nc, const uint8_t *buf, int size);
ssize_t qemu_receive_packet(NetClientState *nc, const uint8_t *buf, int size);
ssize_t qemu_receive_packet_iov(NetClientState *nc,
const struct iovec *iov,
int iovcnt);
ssize_t qemu_send_packet_raw(NetClientState *nc, const uint8_t *buf, int size);
ssize_t qemu_send_packet_async(NetClientState *nc, const uint8_t *buf,
int size, NetPacketSent *sent_cb);
@ -307,7 +304,6 @@ void hmp_host_net_remove(Monitor *mon, const QDict *qdict);
void netdev_add(QemuOpts *opts, Error **errp);
int net_hub_id_for_client(NetClientState *nc, int *id);
NetClientState *net_hub_port_find(int hub_id);
#define DEFAULT_NETWORK_SCRIPT CONFIG_SYSCONFDIR "/qemu-ifup"
#define DEFAULT_NETWORK_DOWN_SCRIPT CONFIG_SYSCONFDIR "/qemu-ifdown"

View File

@ -59,10 +59,6 @@ ssize_t qemu_net_queue_receive(NetQueue *queue,
const uint8_t *data,
size_t size);
ssize_t qemu_net_queue_receive_iov(NetQueue *queue,
const struct iovec *iov,
int iovcnt);
ssize_t qemu_net_queue_send(NetQueue *queue,
NetClientState *sender,
unsigned flags,

View File

@ -54,7 +54,6 @@ bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp);
void monitor_remove_blk(BlockBackend *blk);
BlockBackendPublic *blk_get_public(BlockBackend *blk);
BlockBackend *blk_by_public(BlockBackendPublic *public);
void blk_remove_bs(BlockBackend *blk);
int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp);
@ -67,7 +66,6 @@ void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm);
void blk_iostatus_enable(BlockBackend *blk);
BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk);
void blk_iostatus_disable(BlockBackend *blk);
void blk_iostatus_reset(BlockBackend *blk);
int blk_attach_dev(BlockBackend *blk, DeviceState *dev);
void blk_detach_dev(BlockBackend *blk, DeviceState *dev);
@ -76,8 +74,6 @@ BlockBackend *blk_by_dev(void *dev);
BlockBackend *blk_by_qdev_id(const char *id, Error **errp);
void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque);
void blk_activate(BlockBackend *blk, Error **errp);
int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags);
void blk_aio_cancel(BlockAIOCB *acb);
int blk_commit_all(void);
@ -91,9 +87,6 @@ bool blk_is_sg(BlockBackend *blk);
void blk_set_enable_write_cache(BlockBackend *blk, bool wce);
int blk_get_flags(BlockBackend *blk);
bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp);
void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason);
void blk_op_block_all(BlockBackend *blk, Error *reason);
void blk_op_unblock_all(BlockBackend *blk, Error *reason);
int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
Error **errp);
void blk_add_aio_context_notifier(BlockBackend *blk,
@ -105,7 +98,6 @@ void blk_remove_aio_context_notifier(BlockBackend *blk,
void (*detach_aio_context)(void *),
void *opaque);
void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify);
void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify);
BlockBackendRootState *blk_get_root_state(BlockBackend *blk);
void blk_update_root_state(BlockBackend *blk);
bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk);

View File

@ -117,8 +117,6 @@ void replay_async_events(void);
/* Asynchronous events queue */
/*! Disables storing events in the queue */
void replay_disable_events(void);
/*! Enables storing events in the queue */
void replay_enable_events(void);
/*! Returns true when saving events is enabled */

View File

@ -175,7 +175,6 @@ int cursor_get_mono_bpl(QEMUCursor *c);
void cursor_set_mono(QEMUCursor *c,
uint32_t foreground, uint32_t background, uint8_t *image,
int transparent, uint8_t *mask);
void cursor_get_mono_image(QEMUCursor *c, int foreground, uint8_t *mask);
void cursor_get_mono_mask(QEMUCursor *c, int transparent, uint8_t *mask);
typedef void *QEMUGLContext;

View File

@ -102,7 +102,6 @@ int host_to_target_waitstatus(int status);
/* vm86.c */
void save_v86_state(CPUX86State *env);
void handle_vm86_trap(CPUX86State *env, int trapno);
void handle_vm86_fault(CPUX86State *env);
int do_vm86(CPUX86State *env, long subfunction, abi_ulong v86_addr);
#elif defined(TARGET_SPARC64)
void sparc64_set_context(CPUSPARCState *env);

View File

@ -255,142 +255,6 @@ void handle_vm86_trap(CPUX86State *env, int trapno)
}
}
#define CHECK_IF_IN_TRAP() \
if ((ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) && \
(ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_TFpendig)) \
newflags |= TF_MASK
#define VM86_FAULT_RETURN \
if ((ts->vm86plus.vm86plus.flags & TARGET_force_return_for_pic) && \
(ts->v86flags & (IF_MASK | VIF_MASK))) \
return_to_32bit(env, TARGET_VM86_PICRETURN); \
return
void handle_vm86_fault(CPUX86State *env)
{
CPUState *cs = env_cpu(env);
TaskState *ts = get_task_state(cs);
uint32_t csp, ssp;
unsigned int ip, sp, newflags, newip, newcs, opcode, intno;
int data32, pref_done;
csp = env->segs[R_CS].selector << 4;
ip = env->eip & 0xffff;
ssp = env->segs[R_SS].selector << 4;
sp = env->regs[R_ESP] & 0xffff;
LOG_VM86("VM86 exception %04x:%08x\n",
env->segs[R_CS].selector, env->eip);
data32 = 0;
pref_done = 0;
do {
opcode = vm_getb(env, csp, ip);
ADD16(ip, 1);
switch (opcode) {
case 0x66: /* 32-bit data */ data32=1; break;
case 0x67: /* 32-bit address */ break;
case 0x2e: /* CS */ break;
case 0x3e: /* DS */ break;
case 0x26: /* ES */ break;
case 0x36: /* SS */ break;
case 0x65: /* GS */ break;
case 0x64: /* FS */ break;
case 0xf2: /* repnz */ break;
case 0xf3: /* rep */ break;
default: pref_done = 1;
}
} while (!pref_done);
/* VM86 mode */
switch(opcode) {
case 0x9c: /* pushf */
if (data32) {
vm_putl(env, ssp, sp - 4, get_vflags(env));
ADD16(env->regs[R_ESP], -4);
} else {
vm_putw(env, ssp, sp - 2, get_vflags(env));
ADD16(env->regs[R_ESP], -2);
}
env->eip = ip;
VM86_FAULT_RETURN;
case 0x9d: /* popf */
if (data32) {
newflags = vm_getl(env, ssp, sp);
ADD16(env->regs[R_ESP], 4);
} else {
newflags = vm_getw(env, ssp, sp);
ADD16(env->regs[R_ESP], 2);
}
env->eip = ip;
CHECK_IF_IN_TRAP();
if (data32) {
if (set_vflags_long(newflags, env))
return;
} else {
if (set_vflags_short(newflags, env))
return;
}
VM86_FAULT_RETURN;
case 0xcd: /* int */
intno = vm_getb(env, csp, ip);
ADD16(ip, 1);
env->eip = ip;
if (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) {
if ( (ts->vm86plus.vm86plus.vm86dbg_intxxtab[intno >> 3] >>
(intno &7)) & 1) {
return_to_32bit(env, TARGET_VM86_INTx + (intno << 8));
return;
}
}
do_int(env, intno);
break;
case 0xcf: /* iret */
if (data32) {
newip = vm_getl(env, ssp, sp) & 0xffff;
newcs = vm_getl(env, ssp, sp + 4) & 0xffff;
newflags = vm_getl(env, ssp, sp + 8);
ADD16(env->regs[R_ESP], 12);
} else {
newip = vm_getw(env, ssp, sp);
newcs = vm_getw(env, ssp, sp + 2);
newflags = vm_getw(env, ssp, sp + 4);
ADD16(env->regs[R_ESP], 6);
}
env->eip = newip;
cpu_x86_load_seg(env, R_CS, newcs);
CHECK_IF_IN_TRAP();
if (data32) {
if (set_vflags_long(newflags, env))
return;
} else {
if (set_vflags_short(newflags, env))
return;
}
VM86_FAULT_RETURN;
case 0xfa: /* cli */
env->eip = ip;
clear_IF(env);
VM86_FAULT_RETURN;
case 0xfb: /* sti */
env->eip = ip;
if (set_IF(env))
return;
VM86_FAULT_RETURN;
default:
/* real VM86 GPF exception */
return_to_32bit(env, TARGET_VM86_UNKNOWN);
break;
}
}
int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr)
{
CPUState *cs = env_cpu(env);

View File

@ -193,31 +193,6 @@ NetClientState *net_hub_add_port(int hub_id, const char *name,
return &port->nc;
}
/**
* Find a available port on a hub; otherwise create one new port
*/
NetClientState *net_hub_port_find(int hub_id)
{
NetHub *hub;
NetHubPort *port;
NetClientState *nc;
QLIST_FOREACH(hub, &hubs, next) {
if (hub->id == hub_id) {
QLIST_FOREACH(port, &hub->ports, next) {
nc = port->nc.peer;
if (!nc) {
return &(port->nc);
}
}
break;
}
}
nc = net_hub_add_port(hub_id, NULL, NULL);
return nc;
}
/**
* Print hub configuration
*/

View File

@ -750,16 +750,6 @@ ssize_t qemu_receive_packet(NetClientState *nc, const uint8_t *buf, int size)
return qemu_net_queue_receive(nc->incoming_queue, buf, size);
}
ssize_t qemu_receive_packet_iov(NetClientState *nc, const struct iovec *iov,
int iovcnt)
{
if (!qemu_can_receive_packet(nc)) {
return 0;
}
return qemu_net_queue_receive_iov(nc->incoming_queue, iov, iovcnt);
}
ssize_t qemu_send_packet_raw(NetClientState *nc, const uint8_t *buf, int size)
{
return qemu_send_packet_async_with_flags(nc, QEMU_NET_PACKET_FLAG_RAW,

View File

@ -193,17 +193,6 @@ ssize_t qemu_net_queue_receive(NetQueue *queue,
return qemu_net_queue_deliver(queue, NULL, 0, data, size);
}
ssize_t qemu_net_queue_receive_iov(NetQueue *queue,
const struct iovec *iov,
int iovcnt)
{
if (queue->delivering) {
return 0;
}
return qemu_net_queue_deliver_iov(queue, NULL, 0, iov, iovcnt);
}
ssize_t qemu_net_queue_send(NetQueue *queue,
NetClientState *sender,
unsigned flags,

View File

@ -154,9 +154,9 @@ static xkb_mod_mask_t get_mod(struct xkb_keymap *map, const char *name)
int main(int argc, char *argv[])
{
static struct xkb_context *ctx;
static struct xkb_keymap *map;
static struct xkb_state *state;
struct xkb_context *ctx;
struct xkb_keymap *map;
struct xkb_state *state;
xkb_mod_index_t mod, mods;
int rc;
@ -213,6 +213,7 @@ int main(int argc, char *argv[])
ctx = xkb_context_new(XKB_CONTEXT_NO_FLAGS);
map = xkb_keymap_new_from_names(ctx, &names, XKB_KEYMAP_COMPILE_NO_FLAGS);
xkb_context_unref(ctx);
if (!map) {
/* libxkbcommon prints error */
exit(1);
@ -234,6 +235,8 @@ int main(int argc, char *argv[])
state = xkb_state_new(map);
xkb_keymap_key_for_each(map, walk_map, state);
xkb_state_unref(state);
xkb_keymap_unref(map);
/* add quirks */
fprintf(outfile,

View File

@ -92,15 +92,6 @@ void replay_flush_events(void)
}
}
void replay_disable_events(void)
{
if (replay_mode != REPLAY_MODE_NONE) {
events_enabled = false;
/* Flush events queue before waiting of completion */
replay_flush_events();
}
}
/*! Adds specified async event to the queue */
void replay_add_event(ReplayAsyncEventKind event_kind,
void *opaque,

View File

@ -57,7 +57,7 @@ class Asset:
break
hl.update(chunk)
return hl.hexdigest()
return self.hash == hl.hexdigest()
def valid(self):
return self.cache_file.exists() and self._check(self.cache_file)

View File

@ -34,7 +34,6 @@ static const struct SyscallInfo arch_syscall_info[] = {
{ "arm", 4 },
{ "armeb", 4 },
{ "avr", -1 },
{ "cris", -1 },
{ "hexagon", 64 },
{ "hppa", -1 },
{ "i386", 4 },

View File

@ -197,30 +197,6 @@ void cursor_set_mono(QEMUCursor *c,
}
}
void cursor_get_mono_image(QEMUCursor *c, int foreground, uint8_t *image)
{
uint32_t *data = c->data;
uint8_t bit;
int x,y,bpl;
bpl = cursor_get_mono_bpl(c);
memset(image, 0, bpl * c->height);
for (y = 0; y < c->height; y++) {
bit = 0x80;
for (x = 0; x < c->width; x++, data++) {
if (((*data & 0xff000000) == 0xff000000) &&
((*data & 0x00ffffff) == foreground)) {
image[x/8] |= bit;
}
bit >>= 1;
if (bit == 0) {
bit = 0x80;
}
}
image += bpl;
}
}
void cursor_get_mono_mask(QEMUCursor *c, int transparent, uint8_t *mask)
{
uint32_t *data = c->data;

View File

@ -1935,7 +1935,7 @@ static void do_key_event(VncState *vs, int down, int keycode, int sym)
}
qkbd_state_key_event(vs->vd->kbd, qcode, down);
if (!qemu_console_is_graphic(vs->vd->dcl.con)) {
if (QEMU_IS_TEXT_CONSOLE(vs->vd->dcl.con)) {
QemuTextConsole *con = QEMU_TEXT_CONSOLE(vs->vd->dcl.con);
bool numlock = qkbd_state_modifier_get(vs->vd->kbd, QKBD_MOD_NUMLOCK);
bool control = qkbd_state_modifier_get(vs->vd->kbd, QKBD_MOD_CTRL);