RDMA/hns: Use flush framework for the case in aeq
authorYixian Liu <liuyixian@huawei.com>
Sat, 22 Feb 2020 10:25:57 +0000 (18:25 +0800)
committerJason Gunthorpe <jgg@mellanox.com>
Fri, 28 Feb 2020 15:28:31 +0000 (11:28 -0400)
As now we already have flush framework, using it instead of current flush
process for qp error in asynchronized interrupt (aeq).

Link: https://lore.kernel.org/r/1582367158-27030-2-git-send-email-liuyixian@huawei.com
Signed-off-by: Yixian Liu <liuyixian@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_qp.c

index 593bf8d..2b372ee 100644 (file)
@@ -5184,39 +5184,6 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
        return ret;
 }
 
-static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
-{
-       struct hns_roce_qp *hr_qp;
-       struct ib_qp_attr attr;
-       int attr_mask;
-       int ret;
-
-       hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
-       if (!hr_qp) {
-               dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
-               return;
-       }
-
-       if (hr_qp->ibqp.uobject) {
-               if (hr_qp->sdb_en == 1) {
-                       hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
-                       if (hr_qp->rdb_en == 1)
-                               hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
-               } else {
-                       dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
-                       return;
-               }
-       }
-
-       attr_mask = IB_QP_STATE;
-       attr.qp_state = IB_QPS_ERR;
-       ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
-                                   hr_qp->state, IB_QPS_ERR);
-       if (ret)
-               dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
-                       qpn);
-}
-
 static void hns_roce_irq_work_handle(struct work_struct *work)
 {
        struct hns_roce_work *irq_work =
@@ -5240,17 +5207,14 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
        case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
                dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
                        qpn, irq_work->sub_type);
-               hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
                break;
        case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
                dev_err(dev, "Invalid request local work queue 0x%x error.\n",
                        qpn);
-               hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
                break;
        case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
                dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
                        qpn, irq_work->sub_type);
-               hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
                break;
        case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
                dev_warn(dev, "SRQ limit reach.\n");
index c52e1b0..6c3f0f7 100644 (file)
@@ -98,6 +98,15 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
                return;
        }
 
+       if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
+           (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
+            event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
+            event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) {
+               qp->state = IB_QPS_ERR;
+               if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+                       init_flush_work(hr_dev, qp);
+       }
+
        qp->event(qp, (enum hns_roce_event)event_type);
 
        if (atomic_dec_and_test(&qp->refcount))