RDMA/efa: Group keep alive received counter with other SW stats
authorGal Pressman <galpress@amazon.com>
Tue, 15 Sep 2020 14:14:48 +0000 (17:14 +0300)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 22 Sep 2020 23:21:11 +0000 (20:21 -0300)
The keep alive received counter is a software stat, keep it grouped with
all other software stats.  Since all stored stats are software stats,
remove the efa_sw_stats struct and use efa_stats instead.

Link: https://lore.kernel.org/r/20200915141449.8428-2-galpress@amazon.com
Reviewed-by: Daniel Kranzdorf <dkkranzd@amazon.com>
Reviewed-by: Yossi Leybovich <sleybo@amazon.com>
Signed-off-by: Gal Pressman <galpress@amazon.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/efa/efa.h
drivers/infiniband/hw/efa/efa_verbs.c

index 64ae8ba..e5d9712 100644 (file)
@@ -33,7 +33,8 @@ struct efa_irq {
        char name[EFA_IRQNAME_SIZE];
 };
 
-struct efa_sw_stats {
+/* Don't use anything other than atomic64 */
+struct efa_stats {
        atomic64_t alloc_pd_err;
        atomic64_t create_qp_err;
        atomic64_t create_cq_err;
@@ -41,11 +42,6 @@ struct efa_sw_stats {
        atomic64_t alloc_ucontext_err;
        atomic64_t create_ah_err;
        atomic64_t mmap_err;
-};
-
-/* Don't use anything other than atomic64 */
-struct efa_stats {
-       struct efa_sw_stats sw_stats;
        atomic64_t keep_alive_rcvd;
 };
 
index 52b7ea9..c0c4eee 100644 (file)
@@ -380,7 +380,7 @@ int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
 err_dealloc_pd:
        efa_pd_dealloc(dev, result.pdn);
 err_out:
-       atomic64_inc(&dev->stats.sw_stats.alloc_pd_err);
+       atomic64_inc(&dev->stats.alloc_pd_err);
        return err;
 }
 
@@ -742,7 +742,7 @@ err_free_mapped:
 err_free_qp:
        kfree(qp);
 err_out:
-       atomic64_inc(&dev->stats.sw_stats.create_qp_err);
+       atomic64_inc(&dev->stats.create_qp_err);
        return ERR_PTR(err);
 }
 
@@ -1128,7 +1128,7 @@ err_free_mapped:
                        DMA_FROM_DEVICE);
 
 err_out:
-       atomic64_inc(&dev->stats.sw_stats.create_cq_err);
+       atomic64_inc(&dev->stats.create_cq_err);
        return err;
 }
 
@@ -1581,7 +1581,7 @@ err_unmap:
 err_free:
        kfree(mr);
 err_out:
-       atomic64_inc(&dev->stats.sw_stats.reg_mr_err);
+       atomic64_inc(&dev->stats.reg_mr_err);
        return ERR_PTR(err);
 }
 
@@ -1709,7 +1709,7 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
 err_dealloc_uar:
        efa_dealloc_uar(dev, result.uarn);
 err_out:
-       atomic64_inc(&dev->stats.sw_stats.alloc_ucontext_err);
+       atomic64_inc(&dev->stats.alloc_ucontext_err);
        return err;
 }
 
@@ -1742,7 +1742,7 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
                ibdev_dbg(&dev->ibdev,
                          "pgoff[%#lx] does not have valid entry\n",
                          vma->vm_pgoff);
-               atomic64_inc(&dev->stats.sw_stats.mmap_err);
+               atomic64_inc(&dev->stats.mmap_err);
                return -EINVAL;
        }
        entry = to_emmap(rdma_entry);
@@ -1784,7 +1784,7 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
                        "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
                        entry->address, rdma_entry->npages * PAGE_SIZE,
                        entry->mmap_flag, err);
-               atomic64_inc(&dev->stats.sw_stats.mmap_err);
+               atomic64_inc(&dev->stats.mmap_err);
        }
 
        rdma_user_mmap_entry_put(rdma_entry);
@@ -1869,7 +1869,7 @@ int efa_create_ah(struct ib_ah *ibah,
 err_destroy_ah:
        efa_ah_destroy(dev, ah);
 err_out:
-       atomic64_inc(&dev->stats.sw_stats.create_ah_err);
+       atomic64_inc(&dev->stats.create_ah_err);
        return err;
 }
 
@@ -1930,13 +1930,14 @@ int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
 
        s = &dev->stats;
        stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
-       stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->sw_stats.alloc_pd_err);
-       stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->sw_stats.create_qp_err);
-       stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->sw_stats.create_cq_err);
-       stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->sw_stats.reg_mr_err);
-       stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->sw_stats.alloc_ucontext_err);
-       stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->sw_stats.create_ah_err);
-       stats->value[EFA_MMAP_ERR] = atomic64_read(&s->sw_stats.mmap_err);
+       stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
+       stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
+       stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
+       stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
+       stats->value[EFA_ALLOC_UCONTEXT_ERR] =
+               atomic64_read(&s->alloc_ucontext_err);
+       stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
+       stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
 
        return ARRAY_SIZE(efa_stats_names);
 }