mirror of https://github.com/xemu-project/xemu.git
hw/xen: Implement EVTCHNOP_send
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Reviewed-by: Paul Durrant <paul@xen.org>
This commit is contained in:
parent
f5417856d2
commit
cf7679abdd
|
@ -490,6 +490,133 @@ static int unmask_port(XenEvtchnState *s, evtchn_port_t port, bool do_unmask)
|
|||
}
|
||||
}
|
||||
|
||||
static int do_set_port_lm(XenEvtchnState *s, evtchn_port_t port,
|
||||
struct shared_info *shinfo,
|
||||
struct vcpu_info *vcpu_info)
|
||||
{
|
||||
const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
|
||||
typeof(shinfo->evtchn_pending[0]) mask;
|
||||
int idx = port / bits_per_word;
|
||||
int offset = port % bits_per_word;
|
||||
|
||||
mask = 1UL << offset;
|
||||
|
||||
if (idx >= bits_per_word) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Update the pending bit itself. If it was already set, we're done. */
|
||||
if (qatomic_fetch_or(&shinfo->evtchn_pending[idx], mask) & mask) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Check if it's masked. */
|
||||
if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Now on to the vcpu_info evtchn_pending_sel index... */
|
||||
mask = 1UL << idx;
|
||||
|
||||
/* If a port in this word was already pending for this vCPU, all done. */
|
||||
if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set evtchn_upcall_pending for this vCPU */
|
||||
if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
inject_callback(s, s->port_table[port].vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_set_port_compat(XenEvtchnState *s, evtchn_port_t port,
|
||||
struct compat_shared_info *shinfo,
|
||||
struct compat_vcpu_info *vcpu_info)
|
||||
{
|
||||
const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
|
||||
typeof(shinfo->evtchn_pending[0]) mask;
|
||||
int idx = port / bits_per_word;
|
||||
int offset = port % bits_per_word;
|
||||
|
||||
mask = 1UL << offset;
|
||||
|
||||
if (idx >= bits_per_word) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Update the pending bit itself. If it was already set, we're done. */
|
||||
if (qatomic_fetch_or(&shinfo->evtchn_pending[idx], mask) & mask) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Check if it's masked. */
|
||||
if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Now on to the vcpu_info evtchn_pending_sel index... */
|
||||
mask = 1UL << idx;
|
||||
|
||||
/* If a port in this word was already pending for this vCPU, all done. */
|
||||
if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set evtchn_upcall_pending for this vCPU */
|
||||
if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
inject_callback(s, s->port_table[port].vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_port_pending(XenEvtchnState *s, evtchn_port_t port)
|
||||
{
|
||||
void *vcpu_info, *shinfo;
|
||||
|
||||
if (s->port_table[port].type == EVTCHNSTAT_closed) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (s->evtchn_in_kernel) {
|
||||
XenEvtchnPort *p = &s->port_table[port];
|
||||
CPUState *cpu = qemu_get_cpu(p->vcpu);
|
||||
struct kvm_irq_routing_xen_evtchn evt;
|
||||
|
||||
if (!cpu) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
evt.port = port;
|
||||
evt.vcpu = kvm_arch_vcpu_id(cpu);
|
||||
evt.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
|
||||
|
||||
return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_EVTCHN_SEND, &evt);
|
||||
}
|
||||
|
||||
shinfo = xen_overlay_get_shinfo_ptr();
|
||||
if (!shinfo) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
vcpu_info = kvm_xen_get_vcpu_info_hva(s->port_table[port].vcpu);
|
||||
if (!vcpu_info) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (xen_is_long_mode()) {
|
||||
return do_set_port_lm(s, port, shinfo, vcpu_info);
|
||||
} else {
|
||||
return do_set_port_compat(s, port, shinfo, vcpu_info);
|
||||
}
|
||||
}
|
||||
|
||||
static int clear_port_pending(XenEvtchnState *s, evtchn_port_t port)
|
||||
{
|
||||
void *p = xen_overlay_get_shinfo_ptr();
|
||||
|
@ -709,3 +836,56 @@ int xen_evtchn_bind_ipi_op(struct evtchn_bind_ipi *ipi)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int xen_evtchn_send_op(struct evtchn_send *send)
|
||||
{
|
||||
XenEvtchnState *s = xen_evtchn_singleton;
|
||||
XenEvtchnPort *p;
|
||||
int ret = 0;
|
||||
|
||||
if (!s) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
if (!valid_port(send->port)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&s->port_lock);
|
||||
|
||||
p = &s->port_table[send->port];
|
||||
|
||||
switch (p->type) {
|
||||
case EVTCHNSTAT_interdomain:
|
||||
if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU) {
|
||||
/*
|
||||
* This is an event from the guest to qemu itself, which is
|
||||
* serving as the driver domain. Not yet implemented; it will
|
||||
* be hooked up to the qemu implementation of xenstore,
|
||||
* console, PV net/block drivers etc.
|
||||
*/
|
||||
ret = -ENOSYS;
|
||||
} else {
|
||||
/* Loopback interdomain ports; just a complex IPI */
|
||||
set_port_pending(s, p->type_val);
|
||||
}
|
||||
break;
|
||||
|
||||
case EVTCHNSTAT_ipi:
|
||||
set_port_pending(s, send->port);
|
||||
break;
|
||||
|
||||
case EVTCHNSTAT_unbound:
|
||||
/* Xen will silently drop these */
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
qemu_mutex_unlock(&s->port_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,10 +20,12 @@ struct evtchn_close;
|
|||
struct evtchn_unmask;
|
||||
struct evtchn_bind_virq;
|
||||
struct evtchn_bind_ipi;
|
||||
struct evtchn_send;
|
||||
int xen_evtchn_status_op(struct evtchn_status *status);
|
||||
int xen_evtchn_close_op(struct evtchn_close *close);
|
||||
int xen_evtchn_unmask_op(struct evtchn_unmask *unmask);
|
||||
int xen_evtchn_bind_virq_op(struct evtchn_bind_virq *virq);
|
||||
int xen_evtchn_bind_ipi_op(struct evtchn_bind_ipi *ipi);
|
||||
int xen_evtchn_send_op(struct evtchn_send *send);
|
||||
|
||||
#endif /* QEMU_XEN_EVTCHN_H */
|
||||
|
|
|
@ -909,6 +909,18 @@ static bool kvm_xen_hcall_evtchn_op(struct kvm_xen_exit *exit, X86CPU *cpu,
|
|||
}
|
||||
break;
|
||||
}
|
||||
case EVTCHNOP_send: {
|
||||
struct evtchn_send send;
|
||||
|
||||
qemu_build_assert(sizeof(send) == 4);
|
||||
if (kvm_copy_from_gva(cs, arg, &send, sizeof(send))) {
|
||||
err = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
err = xen_evtchn_send_op(&send);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue