blk-mq: add a new blk_mq_complete_request_remote API
authorChristoph Hellwig <hch@lst.de>
Thu, 11 Jun 2020 06:44:50 +0000 (08:44 +0200)
committerJens Axboe <axboe@kernel.dk>
Wed, 24 Jun 2020 15:15:57 +0000 (09:15 -0600)
This is a variant of blk_mq_complete_request_remote that only completes
the request if it needs to be bounced to another CPU or a softirq.  If
the request can be completed locally the function returns false and lets
the driver complete it without requring and indirect function call.

Reviewed-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c
include/linux/blk-mq.h

index 961635b..b8738b3 100644 (file)
@@ -632,8 +632,11 @@ static int blk_softirq_cpu_dead(unsigned int cpu)
        return 0;
 }
 
-static void __blk_mq_complete_request(struct request *rq)
+
+static void __blk_mq_complete_request_remote(void *data)
 {
+       struct request *rq = data;
+
        /*
         * For most of single queue controllers, there is only one irq vector
         * for handling I/O completion, and the only irq's affinity is set
@@ -649,11 +652,6 @@ static void __blk_mq_complete_request(struct request *rq)
                rq->q->mq_ops->complete(rq);
 }
 
-static void __blk_mq_complete_request_remote(void *data)
-{
-       __blk_mq_complete_request(data);
-}
-
 static inline bool blk_mq_complete_need_ipi(struct request *rq)
 {
        int cpu = raw_smp_processor_id();
@@ -672,14 +670,7 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
        return cpu_online(rq->mq_ctx->cpu);
 }
 
-/**
- * blk_mq_complete_request - end I/O on a request
- * @rq:                the request being processed
- *
- * Description:
- *     Complete a request by scheduling the ->complete_rq operation.
- **/
-void blk_mq_complete_request(struct request *rq)
+bool blk_mq_complete_request_remote(struct request *rq)
 {
        WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
 
@@ -687,10 +678,8 @@ void blk_mq_complete_request(struct request *rq)
         * For a polled request, always complete locallly, it's pointless
         * to redirect the completion.
         */
-       if (rq->cmd_flags & REQ_HIPRI) {
-               rq->q->mq_ops->complete(rq);
-               return;
-       }
+       if (rq->cmd_flags & REQ_HIPRI)
+               return false;
 
        if (blk_mq_complete_need_ipi(rq)) {
                rq->csd.func = __blk_mq_complete_request_remote;
@@ -698,8 +687,26 @@ void blk_mq_complete_request(struct request *rq)
                rq->csd.flags = 0;
                smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
        } else {
-               __blk_mq_complete_request(rq);
+               if (rq->q->nr_hw_queues > 1)
+                       return false;
+               blk_mq_trigger_softirq(rq);
        }
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
+
+/**
+ * blk_mq_complete_request - end I/O on a request
+ * @rq:                the request being processed
+ *
+ * Description:
+ *     Complete a request by scheduling the ->complete_rq operation.
+ **/
+void blk_mq_complete_request(struct request *rq)
+{
+       if (!blk_mq_complete_request_remote(rq))
+               rq->q->mq_ops->complete(rq);
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
index 8e6ab76..1641ec6 100644 (file)
@@ -504,6 +504,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
 void blk_mq_complete_request(struct request *rq);
+bool blk_mq_complete_request_remote(struct request *rq);
 bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
                           struct bio *bio, unsigned int nr_segs);
 bool blk_mq_queue_stopped(struct request_queue *q);