if (req->mr)
ib_dereg_mr(req->mr);
kfree(req->sge);
- rtrs_iu_free(req->iu, DMA_TO_DEVICE,
- sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(req->iu, sess->s.dev->ib_dev, 1);
}
kfree(sess->reqs);
sess->reqs = NULL;
rtrs_cq_qp_destroy(&con->c);
if (con->rsp_ius) {
- rtrs_iu_free(con->rsp_ius, DMA_FROM_DEVICE,
- sess->s.dev->ib_dev, con->queue_size);
+ rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_size);
con->rsp_ius = NULL;
con->queue_size = 0;
}
struct rtrs_iu *iu;
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
- rtrs_iu_free(iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
if (unlikely(wc->status != IB_WC_SUCCESS)) {
rtrs_err(sess->clt, "Sess info request send failed: %s\n",
out:
rtrs_clt_update_wc_stats(con);
- rtrs_iu_free(iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
rtrs_clt_change_state(sess, state);
}
out:
if (tx_iu)
- rtrs_iu_free(tx_iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
if (rx_iu)
- rtrs_iu_free(rx_iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
if (unlikely(err))
/* If we've never taken async path because of malloc problems */
rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t t,
struct ib_device *dev, enum dma_data_direction,
void (*done)(struct ib_cq *cq, struct ib_wc *wc));
-void rtrs_iu_free(struct rtrs_iu *iu, enum dma_data_direction dir,
- struct ib_device *dev, u32 queue_size);
+void rtrs_iu_free(struct rtrs_iu *iu, struct ib_device *dev, u32 queue_size);
int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu);
int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
struct ib_send_wr *head);
struct rtrs_srv_mr *srv_mr;
srv_mr = &sess->mrs[i];
- rtrs_iu_free(srv_mr->iu, DMA_TO_DEVICE,
- sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
ib_dereg_mr(srv_mr->mr);
ib_dma_unmap_sg(sess->s.dev->ib_dev, srv_mr->sgt.sgl,
srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
sgt = &srv_mr->sgt;
mr = srv_mr->mr;
free_iu:
- rtrs_iu_free(srv_mr->iu, DMA_TO_DEVICE,
- sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
dereg_mr:
ib_dereg_mr(mr);
unmap_sg:
struct rtrs_iu *iu;
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
- rtrs_iu_free(iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
if (unlikely(wc->status != IB_WC_SUCCESS)) {
rtrs_err(s, "Sess info response send failed: %s\n",
if (unlikely(err)) {
rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
iu_free:
- rtrs_iu_free(tx_iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
}
rwr_free:
kfree(rwr);
goto close;
out:
- rtrs_iu_free(iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
return;
close:
close_sess(sess);
err = rtrs_iu_post_recv(&con->c, rx_iu);
if (unlikely(err)) {
rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
- rtrs_iu_free(rx_iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
return err;
}
return NULL;
for (i = 0; i < queue_size; i++) {
iu = &ius[i];
+ iu->direction = dir;
iu->buf = kzalloc(size, gfp_mask);
if (!iu->buf)
goto err;
iu->cqe.done = done;
iu->size = size;
- iu->direction = dir;
}
return ius;
err:
- rtrs_iu_free(ius, dir, dma_dev, i);
+ rtrs_iu_free(ius, dma_dev, i);
return NULL;
}
EXPORT_SYMBOL_GPL(rtrs_iu_alloc);
-void rtrs_iu_free(struct rtrs_iu *ius, enum dma_data_direction dir,
- struct ib_device *ibdev, u32 queue_size)
+void rtrs_iu_free(struct rtrs_iu *ius, struct ib_device *ibdev, u32 queue_size)
{
struct rtrs_iu *iu;
int i;
for (i = 0; i < queue_size; i++) {
iu = &ius[i];
- ib_dma_unmap_single(ibdev, iu->dma_addr, iu->size, dir);
+ ib_dma_unmap_single(ibdev, iu->dma_addr, iu->size, iu->direction);
kfree(iu->buf);
}
kfree(ius);