vhost_vdpa: implement IRQ offloading in vhost_vdpa
authorZhu Lingshan <lingshan.zhu@intel.com>
Fri, 31 Jul 2020 06:55:31 +0000 (14:55 +0800)
committerMichael S. Tsirkin <mst@redhat.com>
Wed, 5 Aug 2020 15:08:42 +0000 (11:08 -0400)
This patch introduce a set of functions for setup/unsetup
and update irq offloading respectively by register/unregister
and re-register the irq_bypass_producer.

With these functions, this commit can setup/unsetup
irq offloading through setting DRIVER_OK/!DRIVER_OK, and
update irq offloading through SET_VRING_CALL.

Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
Suggested-by: Jason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20200731065533.4144-5-lingshan.zhu@intel.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
drivers/vhost/Kconfig
drivers/vhost/vdpa.c

index d3688c6..587fbae 100644 (file)
@@ -65,6 +65,7 @@ config VHOST_VDPA
        tristate "Vhost driver for vDPA-based backend"
        depends on EVENTFD
        select VHOST
+       select IRQ_BYPASS_MANAGER
        depends on VDPA
        help
          This kernel module can be loaded in host kernel to accelerate
index 1789e5f..7441b98 100644 (file)
@@ -82,6 +82,39 @@ static irqreturn_t vhost_vdpa_config_cb(void *private)
        return IRQ_HANDLED;
 }
 
+static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
+{
+       struct vhost_virtqueue *vq = &v->vqs[qid];
+       const struct vdpa_config_ops *ops = v->vdpa->config;
+       struct vdpa_device *vdpa = v->vdpa;
+       int ret, irq;
+
+       if (!ops->get_vq_irq)
+               return;
+
+       irq = ops->get_vq_irq(vdpa, qid);
+       spin_lock(&vq->call_ctx.ctx_lock);
+       irq_bypass_unregister_producer(&vq->call_ctx.producer);
+       if (!vq->call_ctx.ctx || irq < 0) {
+               spin_unlock(&vq->call_ctx.ctx_lock);
+               return;
+       }
+
+       vq->call_ctx.producer.token = vq->call_ctx.ctx;
+       vq->call_ctx.producer.irq = irq;
+       ret = irq_bypass_register_producer(&vq->call_ctx.producer);
+       spin_unlock(&vq->call_ctx.ctx_lock);
+}
+
+static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
+{
+       struct vhost_virtqueue *vq = &v->vqs[qid];
+
+       spin_lock(&vq->call_ctx.ctx_lock);
+       irq_bypass_unregister_producer(&vq->call_ctx.producer);
+       spin_unlock(&vq->call_ctx.ctx_lock);
+}
+
 static void vhost_vdpa_reset(struct vhost_vdpa *v)
 {
        struct vdpa_device *vdpa = v->vdpa;
@@ -121,11 +154,15 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
 {
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
-       u8 status;
+       u8 status, status_old;
+       int nvqs = v->nvqs;
+       u16 i;
 
        if (copy_from_user(&status, statusp, sizeof(status)))
                return -EFAULT;
 
+       status_old = ops->get_status(vdpa);
+
        /*
         * Userspace shouldn't remove status bits unless reset the
         * status to 0.
@@ -135,6 +172,15 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
 
        ops->set_status(vdpa, status);
 
+       /* vq irq is not expected to be changed once DRIVER_OK is set */
+       if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
+               for (i = 0; i < nvqs; i++)
+                       vhost_vdpa_setup_vq_irq(v, i);
+
+       if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
+               for (i = 0; i < nvqs; i++)
+                       vhost_vdpa_unsetup_vq_irq(v, i);
+
        return 0;
 }
 
@@ -293,6 +339,7 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
 
        return 0;
 }
+
 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
                                   void __user *argp)
 {
@@ -351,6 +398,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
                        cb.private = NULL;
                }
                ops->set_vq_cb(vdpa, idx, &cb);
+               vhost_vdpa_setup_vq_irq(v, idx);
                break;
 
        case VHOST_SET_VRING_NUM:
@@ -726,6 +774,18 @@ err:
        return r;
 }
 
+static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
+{
+       struct vhost_virtqueue *vq;
+       int i;
+
+       for (i = 0; i < v->nvqs; i++) {
+               vq = &v->vqs[i];
+               if (vq->call_ctx.producer.irq)
+                       irq_bypass_unregister_producer(&vq->call_ctx.producer);
+       }
+}
+
 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
 {
        struct vhost_vdpa *v = filep->private_data;
@@ -738,6 +798,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
        vhost_vdpa_iotlb_free(v);
        vhost_vdpa_free_domain(v);
        vhost_vdpa_config_put(v);
+       vhost_vdpa_clean_irq(v);
        vhost_dev_cleanup(&v->vdev);
        kfree(v->vdev.vqs);
        mutex_unlock(&d->mutex);