mirror of https://github.com/xqemu/xqemu.git
monitor: Peel off @mon_global wrapper
Wrapping global variables in a struct without a use for the wrapper struct buys us nothing but longer lines. Unwrap them. Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-Id: <20180703085358.13941-21-armbru@redhat.com>
This commit is contained in:
parent
f91dc2a0d0
commit
cab5ad86b4
52
monitor.c
52
monitor.c
|
@ -211,7 +211,7 @@ struct Monitor {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* State used only in the thread "owning" the monitor.
|
* State used only in the thread "owning" the monitor.
|
||||||
* If @use_io_thread, this is mon_global.mon_iothread.
|
* If @use_io_thread, this is @mon_iothread.
|
||||||
* Else, it's the main thread.
|
* Else, it's the main thread.
|
||||||
* These members can be safely accessed without locks.
|
* These members can be safely accessed without locks.
|
||||||
*/
|
*/
|
||||||
|
@ -240,14 +240,13 @@ struct Monitor {
|
||||||
int mux_out;
|
int mux_out;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Let's add monitor global variables to this struct. */
|
IOThread *mon_iothread;
|
||||||
static struct {
|
|
||||||
IOThread *mon_iothread;
|
/* Bottom half to dispatch the requests received from I/O thread */
|
||||||
/* Bottom half to dispatch the requests received from I/O thread */
|
QEMUBH *qmp_dispatcher_bh;
|
||||||
QEMUBH *qmp_dispatcher_bh;
|
|
||||||
/* Bottom half to deliver the responses back to clients */
|
/* Bottom half to deliver the responses back to clients */
|
||||||
QEMUBH *qmp_respond_bh;
|
QEMUBH *qmp_respond_bh;
|
||||||
} mon_global;
|
|
||||||
|
|
||||||
struct QMPRequest {
|
struct QMPRequest {
|
||||||
/* Owner of the request */
|
/* Owner of the request */
|
||||||
|
@ -531,7 +530,7 @@ static void monitor_json_emitter(Monitor *mon, QObject *data)
|
||||||
qemu_mutex_lock(&mon->qmp.qmp_queue_lock);
|
qemu_mutex_lock(&mon->qmp.qmp_queue_lock);
|
||||||
g_queue_push_tail(mon->qmp.qmp_responses, qobject_ref(data));
|
g_queue_push_tail(mon->qmp.qmp_responses, qobject_ref(data));
|
||||||
qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
|
qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
|
||||||
qemu_bh_schedule(mon_global.qmp_respond_bh);
|
qemu_bh_schedule(qmp_respond_bh);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* If not using monitor I/O thread, then we are in main thread.
|
* If not using monitor I/O thread, then we are in main thread.
|
||||||
|
@ -4219,7 +4218,7 @@ static void monitor_qmp_bh_dispatcher(void *data)
|
||||||
qmp_request_free(req_obj);
|
qmp_request_free(req_obj);
|
||||||
|
|
||||||
/* Reschedule instead of looping so the main loop stays responsive */
|
/* Reschedule instead of looping so the main loop stays responsive */
|
||||||
qemu_bh_schedule(mon_global.qmp_dispatcher_bh);
|
qemu_bh_schedule(qmp_dispatcher_bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define QMP_REQ_QUEUE_LEN_MAX (8)
|
#define QMP_REQ_QUEUE_LEN_MAX (8)
|
||||||
|
@ -4305,7 +4304,7 @@ static void handle_qmp_command(JSONMessageParser *parser, GQueue *tokens)
|
||||||
qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
|
qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
|
||||||
|
|
||||||
/* Kick the dispatcher routine */
|
/* Kick the dispatcher routine */
|
||||||
qemu_bh_schedule(mon_global.qmp_dispatcher_bh);
|
qemu_bh_schedule(qmp_dispatcher_bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void monitor_qmp_read(void *opaque, const uint8_t *buf, int size)
|
static void monitor_qmp_read(void *opaque, const uint8_t *buf, int size)
|
||||||
|
@ -4358,7 +4357,7 @@ int monitor_suspend(Monitor *mon)
|
||||||
* Kick I/O thread to make sure this takes effect. It'll be
|
* Kick I/O thread to make sure this takes effect. It'll be
|
||||||
* evaluated again in prepare() of the watch object.
|
* evaluated again in prepare() of the watch object.
|
||||||
*/
|
*/
|
||||||
aio_notify(iothread_get_aio_context(mon_global.mon_iothread));
|
aio_notify(iothread_get_aio_context(mon_iothread));
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_monitor_suspend(mon, 1);
|
trace_monitor_suspend(mon, 1);
|
||||||
|
@ -4378,7 +4377,7 @@ void monitor_resume(Monitor *mon)
|
||||||
* kick the thread in case it's sleeping.
|
* kick the thread in case it's sleeping.
|
||||||
*/
|
*/
|
||||||
if (mon->use_io_thread) {
|
if (mon->use_io_thread) {
|
||||||
aio_notify(iothread_get_aio_context(mon_global.mon_iothread));
|
aio_notify(iothread_get_aio_context(mon_iothread));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(mon->rs);
|
assert(mon->rs);
|
||||||
|
@ -4516,25 +4515,24 @@ static void sortcmdlist(void)
|
||||||
|
|
||||||
static GMainContext *monitor_get_io_context(void)
|
static GMainContext *monitor_get_io_context(void)
|
||||||
{
|
{
|
||||||
return iothread_get_g_main_context(mon_global.mon_iothread);
|
return iothread_get_g_main_context(mon_iothread);
|
||||||
}
|
}
|
||||||
|
|
||||||
static AioContext *monitor_get_aio_context(void)
|
static AioContext *monitor_get_aio_context(void)
|
||||||
{
|
{
|
||||||
return iothread_get_aio_context(mon_global.mon_iothread);
|
return iothread_get_aio_context(mon_iothread);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void monitor_iothread_init(void)
|
static void monitor_iothread_init(void)
|
||||||
{
|
{
|
||||||
mon_global.mon_iothread = iothread_create("mon_iothread",
|
mon_iothread = iothread_create("mon_iothread", &error_abort);
|
||||||
&error_abort);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This MUST be on main loop thread since we have commands that
|
* This MUST be on main loop thread since we have commands that
|
||||||
* have assumption to be run on main loop thread. It would be
|
* have assumption to be run on main loop thread. It would be
|
||||||
* nice that one day we can remove this assumption in the future.
|
* nice that one day we can remove this assumption in the future.
|
||||||
*/
|
*/
|
||||||
mon_global.qmp_dispatcher_bh = aio_bh_new(iohandler_get_aio_context(),
|
qmp_dispatcher_bh = aio_bh_new(iohandler_get_aio_context(),
|
||||||
monitor_qmp_bh_dispatcher,
|
monitor_qmp_bh_dispatcher,
|
||||||
NULL);
|
NULL);
|
||||||
|
|
||||||
|
@ -4543,7 +4541,7 @@ static void monitor_iothread_init(void)
|
||||||
* thread, so that monitors that are using I/O thread will make
|
* thread, so that monitors that are using I/O thread will make
|
||||||
* sure read/write operations are all done on the I/O thread.
|
* sure read/write operations are all done on the I/O thread.
|
||||||
*/
|
*/
|
||||||
mon_global.qmp_respond_bh = aio_bh_new(monitor_get_aio_context(),
|
qmp_respond_bh = aio_bh_new(monitor_get_aio_context(),
|
||||||
monitor_qmp_bh_responder,
|
monitor_qmp_bh_responder,
|
||||||
NULL);
|
NULL);
|
||||||
}
|
}
|
||||||
|
@ -4702,7 +4700,7 @@ void monitor_cleanup(void)
|
||||||
* we need to unregister from chardev below in
|
* we need to unregister from chardev below in
|
||||||
* monitor_data_destroy(), and chardev is not thread-safe yet
|
* monitor_data_destroy(), and chardev is not thread-safe yet
|
||||||
*/
|
*/
|
||||||
iothread_stop(mon_global.mon_iothread);
|
iothread_stop(mon_iothread);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* After we have I/O thread to send responses, it's possible that
|
* After we have I/O thread to send responses, it's possible that
|
||||||
|
@ -4723,13 +4721,13 @@ void monitor_cleanup(void)
|
||||||
qemu_mutex_unlock(&monitor_lock);
|
qemu_mutex_unlock(&monitor_lock);
|
||||||
|
|
||||||
/* QEMUBHs needs to be deleted before destroying the I/O thread */
|
/* QEMUBHs needs to be deleted before destroying the I/O thread */
|
||||||
qemu_bh_delete(mon_global.qmp_dispatcher_bh);
|
qemu_bh_delete(qmp_dispatcher_bh);
|
||||||
mon_global.qmp_dispatcher_bh = NULL;
|
qmp_dispatcher_bh = NULL;
|
||||||
qemu_bh_delete(mon_global.qmp_respond_bh);
|
qemu_bh_delete(qmp_respond_bh);
|
||||||
mon_global.qmp_respond_bh = NULL;
|
qmp_respond_bh = NULL;
|
||||||
|
|
||||||
iothread_destroy(mon_global.mon_iothread);
|
iothread_destroy(mon_iothread);
|
||||||
mon_global.mon_iothread = NULL;
|
mon_iothread = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
QemuOptsList qemu_mon_opts = {
|
QemuOptsList qemu_mon_opts = {
|
||||||
|
|
Loading…
Reference in New Issue