mirror of https://github.com/xemu-project/xemu.git
monitor: Improve some comments
Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-Id: <20180703085358.13941-32-armbru@redhat.com>
This commit is contained in:
parent
279f9e0840
commit
774a6b67a4
100
monitor.c
100
monitor.c
|
@ -169,15 +169,16 @@ typedef struct {
|
||||||
JSONMessageParser parser;
|
JSONMessageParser parser;
|
||||||
/*
|
/*
|
||||||
* When a client connects, we're in capabilities negotiation mode.
|
* When a client connects, we're in capabilities negotiation mode.
|
||||||
* When command qmp_capabilities succeeds, we go into command
|
* @commands is &qmp_cap_negotiation_commands then. When command
|
||||||
* mode.
|
* qmp_capabilities succeeds, we go into command mode, and
|
||||||
|
* @command becomes &qmp_commands.
|
||||||
*/
|
*/
|
||||||
QmpCommandList *commands;
|
QmpCommandList *commands;
|
||||||
bool capab_offered[QMP_CAPABILITY__MAX]; /* capabilities offered */
|
bool capab_offered[QMP_CAPABILITY__MAX]; /* capabilities offered */
|
||||||
bool capab[QMP_CAPABILITY__MAX]; /* offered and accepted */
|
bool capab[QMP_CAPABILITY__MAX]; /* offered and accepted */
|
||||||
/*
|
/*
|
||||||
* Protects qmp request/response queue. Please take monitor_lock
|
* Protects qmp request/response queue.
|
||||||
* first when used together.
|
* Take monitor_lock first when you need both.
|
||||||
*/
|
*/
|
||||||
QemuMutex qmp_queue_lock;
|
QemuMutex qmp_queue_lock;
|
||||||
/* Input queue that holds all the parsed QMP requests */
|
/* Input queue that holds all the parsed QMP requests */
|
||||||
|
@ -232,7 +233,7 @@ struct Monitor {
|
||||||
QemuMutex mon_lock;
|
QemuMutex mon_lock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fields that are protected by the per-monitor lock.
|
* Members that are protected by the per-monitor lock
|
||||||
*/
|
*/
|
||||||
QLIST_HEAD(, mon_fd_t) fds;
|
QLIST_HEAD(, mon_fd_t) fds;
|
||||||
QString *outbuf;
|
QString *outbuf;
|
||||||
|
@ -241,6 +242,7 @@ struct Monitor {
|
||||||
int mux_out;
|
int mux_out;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Shared monitor I/O thread */
|
||||||
IOThread *mon_iothread;
|
IOThread *mon_iothread;
|
||||||
|
|
||||||
/* Bottom half to dispatch the requests received from I/O thread */
|
/* Bottom half to dispatch the requests received from I/O thread */
|
||||||
|
@ -302,9 +304,9 @@ static inline bool monitor_is_qmp(const Monitor *mon)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Whether @mon is using readline? Note: not all HMP monitors use
|
* Is @mon is using readline?
|
||||||
* readline, e.g., gdbserver has a non-interactive HMP monitor, so
|
* Note: not all HMP monitors use readline, e.g., gdbserver has a
|
||||||
* readline is not used there.
|
* non-interactive HMP monitor, so readline is not used there.
|
||||||
*/
|
*/
|
||||||
static inline bool monitor_uses_readline(const Monitor *mon)
|
static inline bool monitor_uses_readline(const Monitor *mon)
|
||||||
{
|
{
|
||||||
|
@ -318,14 +320,12 @@ static inline bool monitor_is_hmp_non_interactive(const Monitor *mon)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the clock to use for recording an event's time.
|
* Return the clock to use for recording an event's time.
|
||||||
|
* It's QEMU_CLOCK_REALTIME, except for qtests it's
|
||||||
|
* QEMU_CLOCK_VIRTUAL, to support testing rate limits.
|
||||||
* Beware: result is invalid before configure_accelerator().
|
* Beware: result is invalid before configure_accelerator().
|
||||||
*/
|
*/
|
||||||
static inline QEMUClockType monitor_get_event_clock(void)
|
static inline QEMUClockType monitor_get_event_clock(void)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* This allows us to perform tests on the monitor queues to verify
|
|
||||||
* that the rate limits are enforced.
|
|
||||||
*/
|
|
||||||
return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME;
|
return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -368,7 +368,7 @@ static void qmp_request_free(QMPRequest *req)
|
||||||
g_free(req);
|
g_free(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Must with the mon->qmp.qmp_queue_lock held */
|
/* Caller must hold mon->qmp.qmp_queue_lock */
|
||||||
static void monitor_qmp_cleanup_req_queue_locked(Monitor *mon)
|
static void monitor_qmp_cleanup_req_queue_locked(Monitor *mon)
|
||||||
{
|
{
|
||||||
while (!g_queue_is_empty(mon->qmp.qmp_requests)) {
|
while (!g_queue_is_empty(mon->qmp.qmp_requests)) {
|
||||||
|
@ -376,7 +376,7 @@ static void monitor_qmp_cleanup_req_queue_locked(Monitor *mon)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Must with the mon->qmp.qmp_queue_lock held */
|
/* Caller must hold the mon->qmp.qmp_queue_lock */
|
||||||
static void monitor_qmp_cleanup_resp_queue_locked(Monitor *mon)
|
static void monitor_qmp_cleanup_resp_queue_locked(Monitor *mon)
|
||||||
{
|
{
|
||||||
while (!g_queue_is_empty(mon->qmp.qmp_responses)) {
|
while (!g_queue_is_empty(mon->qmp.qmp_responses)) {
|
||||||
|
@ -407,7 +407,7 @@ static gboolean monitor_unblocked(GIOChannel *chan, GIOCondition cond,
|
||||||
return FALSE;
|
return FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called with mon->mon_lock held. */
|
/* Caller must hold mon->mon_lock */
|
||||||
static void monitor_flush_locked(Monitor *mon)
|
static void monitor_flush_locked(Monitor *mon)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
@ -523,10 +523,8 @@ static void qmp_queue_response(Monitor *mon, QDict *rsp)
|
||||||
{
|
{
|
||||||
if (mon->use_io_thread) {
|
if (mon->use_io_thread) {
|
||||||
/*
|
/*
|
||||||
* If using I/O thread, we need to queue the item so that I/O
|
* Push a reference to the response queue. The I/O thread
|
||||||
* thread will do the rest for us. Take refcount so that
|
* drains that queue and emits.
|
||||||
* caller won't free the data (which will be finally freed in
|
|
||||||
* responder thread).
|
|
||||||
*/
|
*/
|
||||||
qemu_mutex_lock(&mon->qmp.qmp_queue_lock);
|
qemu_mutex_lock(&mon->qmp.qmp_queue_lock);
|
||||||
g_queue_push_tail(mon->qmp.qmp_responses, qobject_ref(rsp));
|
g_queue_push_tail(mon->qmp.qmp_responses, qobject_ref(rsp));
|
||||||
|
@ -534,8 +532,8 @@ static void qmp_queue_response(Monitor *mon, QDict *rsp)
|
||||||
qemu_bh_schedule(qmp_respond_bh);
|
qemu_bh_schedule(qmp_respond_bh);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* If not using monitor I/O thread, then we are in main thread.
|
* Not using monitor I/O thread, i.e. we are in the main thread.
|
||||||
* Do the emission right away.
|
* Emit right away.
|
||||||
*/
|
*/
|
||||||
qmp_send_response(mon, rsp);
|
qmp_send_response(mon, rsp);
|
||||||
}
|
}
|
||||||
|
@ -611,8 +609,9 @@ static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Emits the event to every monitor instance, @event is only used for trace
|
* Broadcast an event to all monitors.
|
||||||
* Called with monitor_lock held.
|
* @qdict is the event object. Its member "event" must match @event.
|
||||||
|
* Caller must hold monitor_lock.
|
||||||
*/
|
*/
|
||||||
static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict)
|
static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict)
|
||||||
{
|
{
|
||||||
|
@ -981,8 +980,7 @@ static int parse_cmdline(const char *cmdline,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns true if the command can be executed in preconfig mode
|
* Can command @cmd be executed in preconfig state?
|
||||||
* i.e. it has the 'p' flag.
|
|
||||||
*/
|
*/
|
||||||
static bool cmd_can_preconfig(const mon_cmd_t *cmd)
|
static bool cmd_can_preconfig(const mon_cmd_t *cmd)
|
||||||
{
|
{
|
||||||
|
@ -2221,7 +2219,7 @@ void qmp_getfd(const char *fdname, Error **errp)
|
||||||
tmp_fd = monfd->fd;
|
tmp_fd = monfd->fd;
|
||||||
monfd->fd = fd;
|
monfd->fd = fd;
|
||||||
qemu_mutex_unlock(&cur_mon->mon_lock);
|
qemu_mutex_unlock(&cur_mon->mon_lock);
|
||||||
/* Make sure close() is out of critical section */
|
/* Make sure close() is outside critical section */
|
||||||
close(tmp_fd);
|
close(tmp_fd);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -2250,7 +2248,7 @@ void qmp_closefd(const char *fdname, Error **errp)
|
||||||
g_free(monfd->name);
|
g_free(monfd->name);
|
||||||
g_free(monfd);
|
g_free(monfd);
|
||||||
qemu_mutex_unlock(&cur_mon->mon_lock);
|
qemu_mutex_unlock(&cur_mon->mon_lock);
|
||||||
/* Make sure close() is out of critical section */
|
/* Make sure close() is outside critical section */
|
||||||
close(tmp_fd);
|
close(tmp_fd);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -4139,7 +4137,8 @@ static void monitor_qmp_dispatch(Monitor *mon, QObject *req, QObject *id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pop one QMP request from monitor queues, return NULL if not found.
|
* Pop a QMP request from a monitor request queue.
|
||||||
|
* Return the request, or NULL all request queues are empty.
|
||||||
* We are using round-robin fashion to pop the request, to avoid
|
* We are using round-robin fashion to pop the request, to avoid
|
||||||
* processing commands only on a very busy monitor. To achieve that,
|
* processing commands only on a very busy monitor. To achieve that,
|
||||||
* when we process one request on a specific monitor, we put that
|
* when we process one request on a specific monitor, we put that
|
||||||
|
@ -4234,7 +4233,7 @@ static void handle_qmp_command(JSONMessageParser *parser, GQueue *tokens)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (qdict && qmp_is_oob(qdict)) {
|
if (qdict && qmp_is_oob(qdict)) {
|
||||||
/* Out-of-band (OOB) requests are executed directly in parser. */
|
/* OOB commands are executed immediately */
|
||||||
trace_monitor_qmp_cmd_out_of_band(qobject_get_try_str(id)
|
trace_monitor_qmp_cmd_out_of_band(qobject_get_try_str(id)
|
||||||
?: "");
|
?: "");
|
||||||
monitor_qmp_dispatch(mon, req, id);
|
monitor_qmp_dispatch(mon, req, id);
|
||||||
|
@ -4356,8 +4355,8 @@ void monitor_resume(Monitor *mon)
|
||||||
if (atomic_dec_fetch(&mon->suspend_cnt) == 0) {
|
if (atomic_dec_fetch(&mon->suspend_cnt) == 0) {
|
||||||
if (monitor_is_qmp(mon)) {
|
if (monitor_is_qmp(mon)) {
|
||||||
/*
|
/*
|
||||||
* For QMP monitors that are running in I/O thread, let's
|
* For QMP monitors that are running in the I/O thread,
|
||||||
* kick the thread in case it's sleeping.
|
* let's kick the thread in case it's sleeping.
|
||||||
*/
|
*/
|
||||||
if (mon->use_io_thread) {
|
if (mon->use_io_thread) {
|
||||||
aio_notify(iothread_get_aio_context(mon_iothread));
|
aio_notify(iothread_get_aio_context(mon_iothread));
|
||||||
|
@ -4505,18 +4504,18 @@ static void monitor_iothread_init(void)
|
||||||
mon_iothread = iothread_create("mon_iothread", &error_abort);
|
mon_iothread = iothread_create("mon_iothread", &error_abort);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This MUST be on main loop thread since we have commands that
|
* The dispatcher BH must run in the main loop thread, since we
|
||||||
* have assumption to be run on main loop thread. It would be
|
* have commands assuming that context. It would be nice to get
|
||||||
* nice that one day we can remove this assumption in the future.
|
* rid of those assumptions.
|
||||||
*/
|
*/
|
||||||
qmp_dispatcher_bh = aio_bh_new(iohandler_get_aio_context(),
|
qmp_dispatcher_bh = aio_bh_new(iohandler_get_aio_context(),
|
||||||
monitor_qmp_bh_dispatcher,
|
monitor_qmp_bh_dispatcher,
|
||||||
NULL);
|
NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unlike the dispatcher BH, this must be run on the monitor I/O
|
* The responder BH must be run in the monitor I/O thread, so that
|
||||||
* thread, so that monitors that are using I/O thread will make
|
* monitors that are using the I/O thread have their output
|
||||||
* sure read/write operations are all done on the I/O thread.
|
* written by the I/O thread.
|
||||||
*/
|
*/
|
||||||
qmp_respond_bh = aio_bh_new(monitor_get_aio_context(),
|
qmp_respond_bh = aio_bh_new(monitor_get_aio_context(),
|
||||||
monitor_qmp_bh_responder,
|
monitor_qmp_bh_responder,
|
||||||
|
@ -4586,15 +4585,11 @@ static void monitor_qmp_setup_handlers_bh(void *opaque)
|
||||||
GMainContext *context;
|
GMainContext *context;
|
||||||
|
|
||||||
if (mon->use_io_thread) {
|
if (mon->use_io_thread) {
|
||||||
/*
|
/* Use @mon_iothread context */
|
||||||
* When @use_io_thread is set, we use the global shared dedicated
|
|
||||||
* I/O thread for this monitor to handle input/output.
|
|
||||||
*/
|
|
||||||
context = monitor_get_io_context();
|
context = monitor_get_io_context();
|
||||||
/* We should have inited globals before reaching here. */
|
|
||||||
assert(context);
|
assert(context);
|
||||||
} else {
|
} else {
|
||||||
/* The default main loop, which is the main thread */
|
/* Use default main loop context */
|
||||||
context = NULL;
|
context = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4644,15 +4639,12 @@ void monitor_init(Chardev *chr, int flags)
|
||||||
remove_fd_in_watch(chr);
|
remove_fd_in_watch(chr);
|
||||||
/*
|
/*
|
||||||
* We can't call qemu_chr_fe_set_handlers() directly here
|
* We can't call qemu_chr_fe_set_handlers() directly here
|
||||||
* since during the procedure the chardev will be active
|
* since chardev might be running in the monitor I/O
|
||||||
* and running in monitor I/O thread, while we'll still do
|
* thread. Schedule a bottom half.
|
||||||
* something before returning from it, which is a possible
|
|
||||||
* race too. To avoid that, we just create a BH to setup
|
|
||||||
* the handlers.
|
|
||||||
*/
|
*/
|
||||||
aio_bh_schedule_oneshot(monitor_get_aio_context(),
|
aio_bh_schedule_oneshot(monitor_get_aio_context(),
|
||||||
monitor_qmp_setup_handlers_bh, mon);
|
monitor_qmp_setup_handlers_bh, mon);
|
||||||
/* We'll add this to mon_list in the BH when setup done */
|
/* The bottom half will add @mon to @mon_list */
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read,
|
qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read,
|
||||||
|
@ -4673,21 +4665,19 @@ void monitor_cleanup(void)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to explicitly stop the I/O thread (but not destroy it),
|
* We need to explicitly stop the I/O thread (but not destroy it),
|
||||||
* cleanup the monitor resources, then destroy the I/O thread since
|
* clean up the monitor resources, then destroy the I/O thread since
|
||||||
* we need to unregister from chardev below in
|
* we need to unregister from chardev below in
|
||||||
* monitor_data_destroy(), and chardev is not thread-safe yet
|
* monitor_data_destroy(), and chardev is not thread-safe yet
|
||||||
*/
|
*/
|
||||||
iothread_stop(mon_iothread);
|
iothread_stop(mon_iothread);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* After we have I/O thread to send responses, it's possible that
|
* Flush all response queues. Note that even after this flush,
|
||||||
* when we stop the I/O thread there are still replies queued in the
|
* data may remain in output buffers.
|
||||||
* responder queue. Flush all of them. Note that even after this
|
|
||||||
* flush it's still possible that out buffer is not flushed.
|
|
||||||
* It'll be done in below monitor_flush() as the last resort.
|
|
||||||
*/
|
*/
|
||||||
monitor_qmp_bh_responder(NULL);
|
monitor_qmp_bh_responder(NULL);
|
||||||
|
|
||||||
|
/* Flush output buffers and destroy monitors */
|
||||||
qemu_mutex_lock(&monitor_lock);
|
qemu_mutex_lock(&monitor_lock);
|
||||||
QTAILQ_FOREACH_SAFE(mon, &mon_list, entry, next) {
|
QTAILQ_FOREACH_SAFE(mon, &mon_list, entry, next) {
|
||||||
QTAILQ_REMOVE(&mon_list, mon, entry);
|
QTAILQ_REMOVE(&mon_list, mon, entry);
|
||||||
|
|
Loading…
Reference in New Issue